aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/habanalabs/common/memory.c23
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c2
-rw-r--r--drivers/acpi/apei/einj-core.c17
-rw-r--r--drivers/acpi/ec.c10
-rw-r--r--drivers/acpi/pfr_update.c2
-rw-r--r--drivers/acpi/processor_perflib.c5
-rw-r--r--drivers/ata/ahci.c57
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_xgene.c7
-rw-r--r--drivers/ata/ata_piix.c1
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/libata-sata.c53
-rw-r--r--drivers/ata/libata-scsi.c49
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c12
-rw-r--r--drivers/atm/atmtcp.c17
-rw-r--r--drivers/base/power/main.c4
-rw-r--r--drivers/base/regmap/regmap-irq.c30
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/block/drbd/drbd_int.h39
-rw-r--r--drivers/block/drbd/drbd_main.c59
-rw-r--r--drivers/block/drbd/drbd_receiver.c264
-rw-r--r--drivers/block/drbd/drbd_worker.c56
-rw-r--r--drivers/block/loop.c45
-rw-r--r--drivers/block/ublk_drv.c100
-rw-r--r--drivers/block/zloop.c3
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btnxpuart.c8
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c3
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c59
-rw-r--r--drivers/comedi/comedi_fops.c5
-rw-r--r--drivers/comedi/drivers.c23
-rw-r--r--drivers/comedi/drivers/pcl726.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs5
-rw-r--r--drivers/cpuidle/governors/menu.c50
-rw-r--r--drivers/cxl/acpi.c59
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/acpi.c11
-rw-r--r--drivers/cxl/core/cdat.c6
-rw-r--r--drivers/cxl/core/core.h36
-rw-r--r--drivers/cxl/core/edac.c55
-rw-r--r--drivers/cxl/core/hdm.c125
-rw-r--r--drivers/cxl/core/mbox.c37
-rw-r--r--drivers/cxl/core/mce.h2
-rw-r--r--drivers/cxl/core/memdev.c52
-rw-r--r--drivers/cxl/core/port.c29
-rw-r--r--drivers/cxl/core/region.c486
-rw-r--r--drivers/cxl/core/trace.h133
-rw-r--r--drivers/cxl/cxl.h17
-rw-r--r--drivers/cxl/cxlmem.h12
-rw-r--r--drivers/cxl/pci.c2
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/cv1800b-dmamux.c259
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c12
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c5
-rw-r--r--drivers/dma/fsl-qdma.c3
-rw-r--r--drivers/dma/idxd/init.c1
-rw-r--r--drivers/dma/idxd/registers.h60
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/mv_xor.c21
-rw-r--r--drivers/dma/nbpfaxi.c13
-rw-r--r--drivers/dma/qcom/gpi.c11
-rw-r--r--drivers/dma/sh/Kconfig2
-rw-r--r--drivers/dma/stm32/stm32-dma.c12
-rw-r--r--drivers/dma/stm32/stm32-dma3.c10
-rw-r--r--drivers/dma/stm32/stm32-mdma.c8
-rw-r--r--drivers/dma/sun4i-dma.c46
-rw-r--r--drivers/dma/ti/Kconfig4
-rw-r--r--drivers/dpll/zl3073x/Kconfig10
-rw-r--r--drivers/firewire/core-card.c59
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firewire/core-device.c15
-rw-r--r--drivers/firewire/core-transaction.c98
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firewire/ohci.c162
-rw-r--r--drivers/firmware/efi/Kconfig8
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/efi.c8
-rw-r--r--drivers/firmware/efi/libstub/printk.c4
-rw-r--r--drivers/firmware/efi/ovmf-debug-log.c111
-rw-r--r--drivers/fpga/zynq-fpga.c8
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-adnp.c2
-rw-r--r--drivers/gpio/gpio-adp5520.c2
-rw-r--r--drivers/gpio/gpio-adp5585.c2
-rw-r--r--drivers/gpio/gpio-aggregator.c4
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c2
-rw-r--r--drivers/gpio/gpio-amd-fch.c2
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-bd71815.c2
-rw-r--r--drivers/gpio/gpio-bd71828.c2
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c2
-rw-r--r--drivers/gpio/gpio-bt8xx.c2
-rw-r--r--drivers/gpio/gpio-cgbc.c2
-rw-r--r--drivers/gpio/gpio-creg-snps.c2
-rw-r--r--drivers/gpio/gpio-cros-ec.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-cs5535.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-dln2.c2
-rw-r--r--drivers/gpio/gpio-eic-sprd.c2
-rw-r--r--drivers/gpio/gpio-em.c2
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-f7188x.c2
-rw-r--r--drivers/gpio/gpio-graniterapids.c2
-rw-r--r--drivers/gpio/gpio-gw-pld.c2
-rw-r--r--drivers/gpio/gpio-htc-egpio.c2
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-imx-scu.c2
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-janz-ttl.c2
-rw-r--r--drivers/gpio/gpio-kempld.c2
-rw-r--r--drivers/gpio/gpio-latch.c4
-rw-r--r--drivers/gpio/gpio-ljca.c2
-rw-r--r--drivers/gpio/gpio-logicvc.c2
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c2
-rw-r--r--drivers/gpio/gpio-loongson.c2
-rw-r--r--drivers/gpio/gpio-lp3943.c2
-rw-r--r--drivers/gpio/gpio-lp873x.c2
-rw-r--r--drivers/gpio/gpio-lp87565.c2
-rw-r--r--drivers/gpio/gpio-lpc18xx.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c10
-rw-r--r--drivers/gpio/gpio-macsmc.c2
-rw-r--r--drivers/gpio/gpio-madera.c2
-rw-r--r--drivers/gpio/gpio-max730x.c2
-rw-r--r--drivers/gpio/gpio-max732x.c4
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-max77650.c2
-rw-r--r--drivers/gpio/gpio-max77759.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c2
-rw-r--r--drivers/gpio/gpio-mc33880.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c2
-rw-r--r--drivers/gpio/gpio-mlxbf2.c2
-rw-r--r--drivers/gpio/gpio-mlxbf3.c54
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-mmio.c24
-rw-r--r--drivers/gpio/gpio-mockup.c4
-rw-r--r--drivers/gpio/gpio-moxtet.c2
-rw-r--r--drivers/gpio/gpio-mpc5200.c4
-rw-r--r--drivers/gpio/gpio-mpfs.c2
-rw-r--r--drivers/gpio/gpio-mpsse.c4
-rw-r--r--drivers/gpio/gpio-msc313.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-nomadik.c2
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c4
-rw-r--r--drivers/gpio/gpio-octeon.c2
-rw-r--r--drivers/gpio/gpio-omap.c4
-rw-r--r--drivers/gpio/gpio-palmas.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pca9570.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c4
-rw-r--r--drivers/gpio/gpio-pch.c2
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c10
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c2
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-rdc321x.c2
-rw-r--r--drivers/gpio/gpio-reg.c6
-rw-r--r--drivers/gpio/gpio-regmap.c4
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-rtd.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c2
-rw-r--r--drivers/gpio/gpio-sch.c2
-rw-r--r--drivers/gpio/gpio-sch311x.c2
-rw-r--r--drivers/gpio/gpio-sim.c4
-rw-r--r--drivers/gpio/gpio-siox.c2
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpio/gpio-sprd.c2
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpio/gpio-syscon.c4
-rw-r--r--drivers/gpio/gpio-tangier.c2
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-tegra186.c2
-rw-r--r--drivers/gpio/gpio-thunderx.c4
-rw-r--r--drivers/gpio/gpio-timberdale.c2
-rw-r--r--drivers/gpio/gpio-tpic2810.c4
-rw-r--r--drivers/gpio/gpio-tps65086.c2
-rw-r--r--drivers/gpio/gpio-tps65218.c2
-rw-r--r--drivers/gpio/gpio-tps65219.c4
-rw-r--r--drivers/gpio/gpio-tps6586x.c2
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpio/gpio-tps65912.c2
-rw-r--r--drivers/gpio/gpio-tps68470.c2
-rw-r--r--drivers/gpio/gpio-tqmx86.c2
-rw-r--r--drivers/gpio/gpio-ts4900.c2
-rw-r--r--drivers/gpio/gpio-ts5500.c2
-rw-r--r--drivers/gpio/gpio-twl4030.c2
-rw-r--r--drivers/gpio/gpio-twl6040.c2
-rw-r--r--drivers/gpio/gpio-uniphier.c4
-rw-r--r--drivers/gpio/gpio-viperboard.c4
-rw-r--r--drivers/gpio/gpio-virtio.c2
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c2
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpio-winbond.c2
-rw-r--r--drivers/gpio/gpio-wm831x.c2
-rw-r--r--drivers/gpio/gpio-wm8350.c2
-rw-r--r--drivers/gpio/gpio-wm8994.c2
-rw-r--r--drivers/gpio/gpio-xgene.c2
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpio/gpio-xra1403.c2
-rw-r--r--drivers/gpio/gpio-xtensa.c2
-rw-r--r--drivers/gpio/gpio-zevio.c2
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpio-zynqmp-modepin.c2
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c14
-rw-r--r--drivers/gpio/gpiolib.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c61
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c28
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h20
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c30
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c4
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/drm_bridge.c1
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c82
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs26
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c14
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c22
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c20
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c21
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c47
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c59
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c13
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c72
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c60
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c16
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c10
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c2
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml14
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi.xml28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c4
-rw-r--r--drivers/gpu/drm/nova/file.rs3
-rw-r--r--drivers/gpu/drm/nova/nova.rs2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c9
-rw-r--r--drivers/gpu/drm/tegra/gem.c2
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c8
-rw-r--r--drivers/gpu/drm/xe/Kconfig1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_bars.h1
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c8
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c3
-rw-r--r--drivers/gpu/drm/xe/xe_device.c9
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c28
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c29
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c7
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c44
-rw-r--r--drivers/gpu/drm/xe/xe_module.c8
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c29
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c27
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c56
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h17
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h2
-rw-r--r--drivers/gpu/nova-core/driver.rs2
-rw-r--r--drivers/gpu/nova-core/firmware.rs5
-rw-r--r--drivers/gpu/nova-core/nova_core.rs2
-rw-r--r--drivers/gpu/nova-core/regs.rs2
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs2
-rw-r--r--drivers/gpu/nova-core/util.rs4
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-asus.c8
-rw-r--r--drivers/hid/hid-cp2112.c2
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-elecom.c2
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-input-test.c10
-rw-r--r--drivers/hid/hid-input.c51
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-mcp2200.c4
-rw-r--r--drivers/hid/hid-mcp2221.c6
-rw-r--r--drivers/hid/hid-multitouch.c8
-rw-r--r--drivers/hid/hid-ntrig.c3
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hid/hid-steam.c35
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h3
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h2
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c4
-rw-r--r--drivers/hid/wacom_wac.c1
-rw-r--r--drivers/hwmon/ltc2992.c4
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c20
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c32
-rw-r--r--drivers/i2c/busses/i2c-tegra.c64
-rw-r--r--drivers/i2c/i2c-core-acpi.c1
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c3
-rw-r--r--drivers/i3c/device.c11
-rw-r--r--drivers/i3c/internals.h38
-rw-r--r--drivers/i3c/master.c38
-rw-r--r--drivers/i3c/master/Kconfig10
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/dw-i3c-master.c47
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c90
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c2
-rw-r--r--drivers/i3c/master/renesas-i3c.c1404
-rw-r--r--drivers/i3c/master/svc-i3c-master.c32
-rw-r--r--drivers/idle/intel_idle.c2
-rw-r--r--drivers/iio/accel/sca3300.c2
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ad4130.c2
-rw-r--r--drivers/iio/adc/ad4170-4.c2
-rw-r--r--drivers/iio/adc/ad7124.c14
-rw-r--r--drivers/iio/adc/ad7173.c87
-rw-r--r--drivers/iio/adc/ad7380.c1
-rw-r--r--drivers/iio/adc/ad7768-1.c2
-rw-r--r--drivers/iio/adc/rohm-bd79124.c4
-rw-r--r--drivers/iio/adc/rzg2l_adc.c33
-rw-r--r--drivers/iio/adc/ti-ads7950.c2
-rw-r--r--drivers/iio/addac/ad74115.c2
-rw-r--r--drivers/iio/addac/ad74413r.c4
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c6
-rw-r--r--drivers/iio/light/as73211.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c9
-rw-r--r--drivers/iio/proximity/isl29501.c16
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c26
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c23
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c30
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c5
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/evdev.c8
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/xpad.c64
-rw-r--r--drivers/input/keyboard/adp5588-keys.c7
-rw-r--r--drivers/input/keyboard/atkbd.c12
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c17
-rw-r--r--drivers/input/keyboard/samsung-keypad.c137
-rw-r--r--drivers/input/misc/Kconfig7
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/cs40l50-vibra.c1
-rw-r--r--drivers/input/misc/max77693-haptic.c41
-rw-r--r--drivers/input/misc/max8997_haptic.c96
-rw-r--r--drivers/input/misc/pcf50633-input.c113
-rw-r--r--drivers/input/rmi4/Kconfig15
-rw-r--r--drivers/input/rmi4/Makefile2
-rw-r--r--drivers/input/rmi4/rmi_bus.c6
-rw-r--r--drivers/input/rmi4/rmi_driver.h2
-rw-r--r--drivers/input/rmi4/rmi_f1a.c143
-rw-r--r--drivers/input/rmi4/rmi_f21.c179
-rw-r--r--drivers/input/touch-overlay.c277
-rw-r--r--drivers/input/touchscreen/ad7879.c9
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c26
-rw-r--r--drivers/input/touchscreen/goodix.c50
-rw-r--r--drivers/input/touchscreen/st1232.c35
-rw-r--r--drivers/iommu/amd/init.c4
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c8
-rw-r--r--drivers/iommu/intel/iommu.c2
-rw-r--r--drivers/iommu/iommufd/viommu.c4
-rw-r--r--drivers/iommu/riscv/iommu.c2
-rw-r--r--drivers/iommu/virtio-iommu.c15
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c1
-rw-r--r--drivers/irqchip/irq-gic-v5-iwb.c11
-rw-r--r--drivers/irqchip/irq-msi-lib.c6
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c10
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c12
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c2
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/mailbox/Kconfig19
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c10
-rw-r--r--drivers/mailbox/pcc.c102
-rw-r--r--drivers/mailbox/qcom-ipcc.c3
-rw-r--r--drivers/md/dm-flakey.c9
-rw-r--r--drivers/md/dm-ima.c42
-rw-r--r--drivers/md/dm-path-selector.c8
-rw-r--r--drivers/md/dm-path-selector.h2
-rw-r--r--drivers/md/dm-ps-historical-service-time.c9
-rw-r--r--drivers/md/dm-ps-io-affinity.c5
-rw-r--r--drivers/md/dm-ps-queue-length.c9
-rw-r--r--drivers/md/dm-ps-round-robin.c9
-rw-r--r--drivers/md/dm-ps-service-time.c9
-rw-r--r--drivers/md/dm-raid.c49
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/dm-thin.c7
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.c3
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c185
-rw-r--r--drivers/md/dm-verity.h22
-rw-r--r--drivers/md/dm-zone.c2
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/md/md-bitmap.c8
-rw-r--r--drivers/md/md-cluster.c16
-rw-r--r--drivers/md/md.c187
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1-10.c2
-rw-r--r--drivers/md/raid1.c94
-rw-r--r--drivers/md/raid1.h22
-rw-r--r--drivers/md/raid10.c16
-rw-r--r--drivers/md/raid5-ppl.c6
-rw-r--r--drivers/md/raid5.c30
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/i2c/alvium-csi2.c1
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c7
-rw-r--r--drivers/media/i2c/ds90ub913.c2
-rw-r--r--drivers/media/i2c/ds90ub953.c2
-rw-r--r--drivers/media/i2c/dw9768.c1
-rw-r--r--drivers/media/i2c/gc0308.c3
-rw-r--r--drivers/media/i2c/gc2145.c3
-rw-r--r--drivers/media/i2c/imx219.c2
-rw-r--r--drivers/media/i2c/imx283.c3
-rw-r--r--drivers/media/i2c/imx290.c3
-rw-r--r--drivers/media/i2c/imx296.c1
-rw-r--r--drivers/media/i2c/imx415.c1
-rw-r--r--drivers/media/i2c/max9286.c2
-rw-r--r--drivers/media/i2c/max96717.c2
-rw-r--r--drivers/media/i2c/mt9m114.c6
-rw-r--r--drivers/media/i2c/ov4689.c3
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5645.c3
-rw-r--r--drivers/media/i2c/ov64a40.c7
-rw-r--r--drivers/media/i2c/ov8858.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c2
-rw-r--r--drivers/media/i2c/tc358746.c5
-rw-r--r--drivers/media/i2c/thp7312.c4
-rw-r--r--drivers/media/i2c/vd55g1.c4
-rw-r--r--drivers/media/i2c/vd56g3.c4
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c4
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c5
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c1
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c12
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be.c2
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c17
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c1
-rw-r--r--drivers/media/rc/gpio-ir-recv.c4
-rw-r--r--drivers/memstick/core/memstick.c1
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c1
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c83
-rw-r--r--drivers/misc/ti_fpc202.c2
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c33
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c37
-rw-r--r--drivers/mmc/host/sdhci_am654.c18
-rw-r--r--drivers/most/core.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c67
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/can/spi/mcp251x.c4
-rw-r--r--drivers/net/dsa/b53/b53_common.c2
-rw-r--r--drivers/net/dsa/microchip/ksz8.c20
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c7
-rw-r--r--drivers/net/dsa/mt7530.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c2
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c2
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c59
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c14
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c14
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c61
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c723
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h87
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c18
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c15
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h6
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c21
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h2
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c26
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c15
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c78
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c29
-rw-r--r--drivers/net/hyperv/rndis_filter.c23
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c5
-rw-r--r--drivers/net/netdevsim/netdev.c10
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/mdio_bus_provider.c3
-rw-r--r--drivers/net/phy/mscc/mscc.h16
-rw-r--r--drivers/net/phy/mscc/mscc_main.c10
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c84
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c23
-rw-r--r--drivers/net/phy/qcom/qca807x.c2
-rw-r--r--drivers/net/phy/smsc.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c17
-rw-r--r--drivers/net/ppp/pptp.c18
-rw-r--r--drivers/net/pse-pd/pd692x0.c63
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/usbnet.c6
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c2
-rw-r--r--drivers/nvme/host/auth.c4
-rw-r--r--drivers/nvme/host/core.c16
-rw-r--r--drivers/nvme/host/fc.c4
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/fc.c6
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c6
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/dynamic.c9
-rw-r--r--drivers/of/of_reserved_mem.c17
-rw-r--r--drivers/pci/bus.c5
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h20
-rw-r--r--drivers/pci/controller/dwc/Kconfig12
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c40
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c107
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c327
-rw-r--r--drivers/pci/controller/dwc/pcie-sophgo.c257
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig1
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c48
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h1
-rw-r--r--drivers/pci/controller/pci-aardvark.c59
-rw-r--r--drivers/pci/controller/pci-host-common.c5
-rw-r--r--drivers/pci/controller/pci-host-common.h2
-rw-r--r--drivers/pci/controller/pci-mvebu.c6
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c428
-rw-r--r--drivers/pci/controller/pci-xgene.c33
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c45
-rw-r--r--drivers/pci/controller/pcie-altera.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c80
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c46
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c68
-rw-r--r--drivers/pci/controller/pcie-mediatek.c48
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c70
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c64
-rw-r--r--drivers/pci/controller/pcie-rockchip.h26
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c49
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c46
-rw-r--r--drivers/pci/controller/pcie-xilinx.c58
-rw-r--r--drivers/pci/controller/plda/Kconfig1
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c45
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h1
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c2
-rw-r--r--drivers/pci/controller/vmd.c242
-rw-r--r--drivers/pci/endpoint/Kconfig8
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c130
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c144
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c1
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c100
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c40
-rw-r--r--drivers/pci/hotplug/TODO4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/hotplug/pnv_php.c248
-rw-r--r--drivers/pci/iov.c153
-rw-r--r--drivers/pci/msi/msi.c2
-rw-r--r--drivers/pci/pci-acpi.c7
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/pci.c30
-rw-r--r--drivers/pci/pci.h84
-rw-r--r--drivers/pci/pcie/aer.c7
-rw-r--r--drivers/pci/pcie/aspm.c11
-rw-r--r--drivers/pci/pcie/portdrv.c2
-rw-r--r--drivers/pci/pcie/ptm.c2
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/setup-bus.c3
-rw-r--r--drivers/pci/setup-res.c35
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c1
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c180
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c288
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c65
-rw-r--r--drivers/phy/phy-snps-eusb2.c46
-rw-r--r--drivers/phy/qualcomm/Kconfig16
-rw-r--r--drivers/phy/qualcomm/Makefile1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c89
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31-eusb2.c324
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c224
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c89
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h64
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h68
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c141
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c15
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c52
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c32
-rw-r--r--drivers/phy/st/phy-stih407-usb.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c4
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c1
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c14
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm4908.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c8
-rw-r--r--drivers/pinctrl/berlin/berlin.c10
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c21
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c23
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c14
-rw-r--r--drivers/pinctrl/core.c13
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c4
-rw-r--r--drivers/pinctrl/mediatek/Kconfig12
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c20
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8189.c1700
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8189.h2452
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c122
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c22
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c4
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c10
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.c4
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c4
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c21
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c10
-rw-r--r--drivers/pinctrl/pinctrl-at91.c4
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c22
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c4
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c2
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c4
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c12
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c6
-rw-r--r--drivers/pinctrl/pinctrl-eic7700.c704
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c9
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c7
-rw-r--r--drivers/pinctrl/pinctrl-k210.c2
-rw-r--r--drivers/pinctrl/pinctrl-k230.c13
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c17
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c9
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c16
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c2
-rw-r--r--drivers/pinctrl/pinctrl-mlxbf3.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c2
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c4
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c6
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c10
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c2
-rw-r--r--drivers/pinctrl/pinctrl-rp1.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c4
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c2
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c4
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c2
-rw-r--r--drivers/pinctrl/pinctrl-xway.c16
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c2
-rw-r--r--drivers/pinctrl/pinmux.c45
-rw-r--r--drivers/pinctrl/pinmux.h10
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm8
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-milos.c1339
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c47
-rw-r--r--drivers/pinctrl/renesas/Kconfig249
-rw-r--r--drivers/pinctrl/renesas/gpio.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c5
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c5
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c51
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c103
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c9
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c8
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c5
-rw-r--r--drivers/pinctrl/stm32/Kconfig20
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32-hdp.c720
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c146
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h22
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c15
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c19
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c4
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c2
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c5
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c54
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c13
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c19
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c3
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c4
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h2
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c4
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c6
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c5
-rw-r--r--drivers/platform/x86/portwell-ec.c4
-rw-r--r--drivers/platform/x86/silicom-platform.c2
-rw-r--r--drivers/power/reset/Kconfig1
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c2
-rw-r--r--drivers/power/reset/qcom-pon.c30
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/bq2415x_charger.c2
-rw-r--r--drivers/power/supply/bq24190_charger.c16
-rw-r--r--drivers/power/supply/bq256xx_charger.c6
-rw-r--r--drivers/power/supply/bq25980_charger.c6
-rw-r--r--drivers/power/supply/cpcap-charger.c5
-rw-r--r--drivers/power/supply/max14577_charger.c4
-rw-r--r--drivers/power/supply/max1720x_battery.c13
-rw-r--r--drivers/power/supply/power_supply_core.c185
-rw-r--r--drivers/power/supply/qcom_battmgr.c25
-rw-r--r--drivers/power/supply/qcom_smbx.c (renamed from drivers/power/supply/qcom_pmi8998_charger.c)152
-rw-r--r--drivers/power/supply/twl4030_charger.c1
-rw-r--r--drivers/power/supply/ug3105_battery.c81
-rw-r--r--drivers/ptp/ptp_private.h5
-rw-r--r--drivers/ptp/ptp_vclock.c7
-rw-r--r--drivers/pwm/pwm-pca9685.c2
-rw-r--r--drivers/regulator/act8865-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/pca9450-regulator.c13
-rw-r--r--drivers/regulator/qcom-pm8008-regulator.c2
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c2
-rw-r--r--drivers/regulator/tps65219-regulator.c12
-rw-r--r--drivers/remoteproc/Kconfig11
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/pru_rproc.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c621
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_common.c4
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c74
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c2
-rw-r--r--drivers/rtc/Kconfig21
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/lib.c40
-rw-r--r--drivers/rtc/rtc-ds1307.c30
-rw-r--r--drivers/rtc/rtc-ds1685.c4
-rw-r--r--drivers/rtc/rtc-hym8563.c15
-rw-r--r--drivers/rtc/rtc-m41t80.c25
-rw-r--r--drivers/rtc/rtc-max31335.c12
-rw-r--r--drivers/rtc/rtc-nct3018y.c15
-rw-r--r--drivers/rtc/rtc-pcf85063.c267
-rw-r--r--drivers/rtc/rtc-pcf8563.c15
-rw-r--r--drivers/rtc/rtc-rv3028.c15
-rw-r--r--drivers/rtc/rtc-rv3032.c21
-rw-r--r--drivers/rtc/rtc-s3c.c8
-rw-r--r--drivers/rtc/rtc-sh.c8
-rw-r--r--drivers/rtc/rtc-stm32.c2
-rw-r--r--drivers/rtc/sysfs.c64
-rw-r--r--drivers/rtc/test_rtc_lib.c (renamed from drivers/rtc/lib_test.c)0
-rw-r--r--drivers/s390/char/sclp.c11
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/scsi/aacraid/comminit.c3
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/libsas/sas_ata.c10
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_internal.h78
-rw-r--r--drivers/scsi/libsas/sas_phy.c6
-rw-r--r--drivers/scsi/libsas/sas_port.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c91
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c60
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/soc/fsl/qe/gpio.c4
-rw-r--r--drivers/soc/qcom/ubwc_config.c23
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c2
-rw-r--r--drivers/soc/tegra/pmc.c51
-rw-r--r--drivers/soundwire/amd_manager.c14
-rw-r--r--drivers/soundwire/bus.c6
-rw-r--r--drivers/soundwire/debugfs.c6
-rw-r--r--drivers/soundwire/intel_ace2x.c11
-rw-r--r--drivers/soundwire/intel_auxdevice.c1
-rw-r--r--drivers/soundwire/mipi_disco.c4
-rw-r--r--drivers/soundwire/qcom.c6
-rw-r--r--drivers/soundwire/stream.c2
-rw-r--r--drivers/spi/spi-cs42l43.c2
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-mem.c4
-rw-r--r--drivers/spi/spi-qpic-snand.c22
-rw-r--r--drivers/spi/spi-st-ssc4.c10
-rw-r--r--drivers/spi/spi-xcomm.c2
-rw-r--r--drivers/ssb/driver_gpio.c4
-rw-r--r--drivers/staging/greybus/gpio.c2
-rw-r--r--drivers/target/target_core_fabric_lib.c63
-rw-r--r--drivers/target/target_core_iblock.c33
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h4
-rw-r--r--drivers/target/target_core_pr.c18
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c8
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/ufs/core/ufs-sysfs.c3
-rw-r--r--drivers/ufs/core/ufshcd.c193
-rw-r--r--drivers/ufs/host/ufs-mediatek.c330
-rw-r--r--drivers/ufs/host/ufs-mediatek.h32
-rw-r--r--drivers/ufs/host/ufs-qcom.c113
-rw-r--r--drivers/ufs/host/ufshcd-pci.c34
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c3
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c23
-rw-r--r--drivers/usb/core/hcd.c28
-rw-r--r--drivers/usb/core/quirks.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/ep0.c20
-rw-r--r--drivers/usb/dwc3/gadget.c19
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c9
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-mem.c22
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c7
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.c23
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/storage/realtek_cr.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h29
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c12
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c58
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h1
-rw-r--r--drivers/vdpa/mlx5/core/mr.c3
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c12
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c1
-rw-r--r--drivers/vfio/device_cdev.c38
-rw-r--r--drivers/vfio/group.c7
-rw-r--r--drivers/vfio/iommufd.c4
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c1
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c4
-rw-r--r--drivers/vfio/pci/mlx5/main.c1
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c2
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c2
-rw-r--r--drivers/vfio/pci/qat/main.c5
-rw-r--r--drivers/vfio/pci/vfio_pci.c1
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c24
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c3
-rw-r--r--drivers/vfio/pci/virtio/main.c3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c7
-rw-r--r--drivers/vfio/vfio_main.c3
-rw-r--r--drivers/vhost/Kconfig18
-rw-r--r--drivers/vhost/net.c97
-rw-r--r--drivers/vhost/scsi.c24
-rw-r--r--drivers/vhost/vhost.c378
-rw-r--r--drivers/vhost/vhost.h30
-rw-r--r--drivers/vhost/vringh.c118
-rw-r--r--drivers/vhost/vsock.c15
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/core/Kconfig2
-rw-r--r--drivers/video/fbdev/core/fbcon.c86
-rw-r--r--drivers/video/fbdev/core/fbmem.c3
-rw-r--r--drivers/video/fbdev/core/svgalib.c95
-rw-r--r--drivers/video/fbdev/imxfb.c9
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c24
-rw-r--r--drivers/video/fbdev/nvidia/nv_local.h2
-rw-r--r--drivers/video/fbdev/simplefb.c17
-rw-r--r--drivers/video/fbdev/via/via-gpio.c2
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c27
-rw-r--r--drivers/virtio/virtio.c7
-rw-r--r--drivers/virtio/virtio_dma_buf.c2
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/virtio/virtio_mmio.c52
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c4
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/virtio/virtio_vdpa.c44
-rw-r--r--drivers/watchdog/dw_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c6
-rw-r--r--drivers/watchdog/it87_wdt.c4
-rw-r--r--drivers/watchdog/renesas_wdt.c8
-rw-r--r--drivers/watchdog/rti_wdt.c14
-rw-r--r--drivers/watchdog/sbsa_gwdt.c50
-rw-r--r--drivers/watchdog/watchdog_core.h8
-rw-r--r--drivers/watchdog/watchdog_pretimeout.c2
-rw-r--r--drivers/watchdog/ziirave_wdt.c3
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c23
1142 files changed, 24369 insertions, 8531 deletions
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 601fdbe70179..61472a381904 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
struct hl_ctx *ctx;
- if (!hl_dmabuf)
- return;
-
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode)
@@ -1859,7 +1856,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct hl_device *hdev = ctx->hdev;
- int rc, fd;
+ CLASS(get_unused_fd, fd)(flags);
+
+ if (fd < 0) {
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
+ return fd;
+ }
exp_info.ops = &habanalabs_dmabuf_ops;
exp_info.size = total_size;
@@ -1872,13 +1874,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
return PTR_ERR(hl_dmabuf->dmabuf);
}
- fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
- if (fd < 0) {
- dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
- rc = fd;
- goto err_dma_buf_put;
- }
-
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
@@ -1890,13 +1885,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
get_file(ctx->hpriv->file_priv->filp);
*dmabuf_fd = fd;
+ fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
return 0;
-
-err_dma_buf_put:
- hl_dmabuf->dmabuf->priv = NULL;
- dma_buf_put(hl_dmabuf->dmabuf);
- return rc;
}
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index a38b88baadf2..5722e4128d3c 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -10437,7 +10437,7 @@ end:
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
WREG32(sob_addr, 0);
- kfree(lin_dma_pkts_arr);
+ kvfree(lin_dma_pkts_arr);
return rc;
}
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index bf8dc92a373a..2561b045acc7 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -315,7 +315,7 @@ static void __iomem *einj_get_parameter_address(void)
memcpy_fromio(&v5param, p, v5param_size);
acpi5 = 1;
check_vendor_extension(pa_v5, &v5param);
- if (available_error_type & ACPI65_EINJV2_SUPP) {
+ if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) {
len = v5param.einjv2_struct.length;
offset = offsetof(struct einjv2_extension_struct, component_arr);
max_nr_components = (len - offset) /
@@ -540,6 +540,9 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
struct set_error_type_with_address *v5param;
v5param = kmalloc(v5param_size, GFP_KERNEL);
+ if (!v5param)
+ return -ENOMEM;
+
memcpy_fromio(v5param, einj_param, v5param_size);
v5param->type = type;
if (type & ACPI5_VENDOR_BIT) {
@@ -1091,7 +1094,7 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct faux_device *fdev)
+static void einj_remove(struct faux_device *fdev)
{
struct apei_exec_context ctx;
@@ -1114,15 +1117,9 @@ static void __exit einj_remove(struct faux_device *fdev)
}
static struct faux_device *einj_dev;
-/*
- * einj_remove() lives in .exit.text. For drivers registered via
- * platform_driver_probe() this is ok because they cannot get unbound at
- * runtime. So mark the driver struct with __refdata to prevent modpost
- * triggering a section mismatch warning.
- */
-static struct faux_device_ops einj_device_ops __refdata = {
+static struct faux_device_ops einj_device_ops = {
.probe = einj_probe,
- .remove = __exit_p(einj_remove),
+ .remove = einj_remove,
};
static int __init einj_init(void)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 75c7db8b156a..7855bbf752b1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2033,7 +2033,7 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
- if (!strstarts(ecdt_ptr->id, "\\")) {
+ if (!strlen(ecdt_ptr->id)) {
/*
* The ECDT table on some MSI notebooks contains invalid data, together
* with an empty ID string ("").
@@ -2042,9 +2042,13 @@ void __init acpi_ec_ecdt_probe(void)
* a "fully qualified reference to the (...) embedded controller device",
* so this string always has to start with a backslash.
*
- * By verifying this we can avoid such faulty ECDT tables in a safe way.
+ * However some ThinkBook machines have a ECDT table with a valid EC
+ * description but an invalid ID string ("_SB.PC00.LPCB.EC0").
+ *
+ * Because of this we only check if the ID string is empty in order to
+ * avoid the obvious cases.
*/
- pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
+ pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
goto out;
}
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
index 318683744ed1..11b1c2828005 100644
--- a/drivers/acpi/pfr_update.c
+++ b/drivers/acpi/pfr_update.c
@@ -329,7 +329,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
if (type == PFRU_CODE_INJECT_TYPE)
return payload_hdr->rt_ver >= cap->code_rt_version;
- return payload_hdr->rt_ver >= cap->drv_rt_version;
+ return payload_hdr->svn_ver >= cap->drv_svn;
}
static void print_update_debug_info(struct pfru_updated_result *result,
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 755003bf3a45..8972446b7162 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -180,7 +180,7 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
- if (!pr || !pr->performance)
+ if (!pr)
continue;
/*
@@ -197,6 +197,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+ if (!pr->performance)
+ continue;
+
ret = acpi_processor_get_platform_limit(pr);
if (ret)
pr_err("Failed to update freq constraint for CPU%d (%d)\n",
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e1c24bbacf64..7a7f88b3fa2b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -689,40 +689,50 @@ MODULE_PARM_DESC(mask_port_map,
"where <pci_dev> is the PCI ID of an AHCI controller in the "
"form \"domain:bus:dev.func\"");
-static void ahci_apply_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv, char *mask_s)
+static char *ahci_mask_port_ext;
+module_param_named(mask_port_ext, ahci_mask_port_ext, charp, 0444);
+MODULE_PARM_DESC(mask_port_ext,
+ "32-bits mask to ignore the external/hotplug capability of ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static u32 ahci_port_mask(struct device *dev, char *mask_s)
{
unsigned int mask;
if (kstrtouint(mask_s, 0, &mask)) {
dev_err(dev, "Invalid port map mask\n");
- return;
+ return 0;
}
- hpriv->mask_port_map = mask;
+ return mask;
}
-static void ahci_get_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv)
+static u32 ahci_get_port_mask(struct device *dev, char *mask_p)
{
char *param, *end, *str, *mask_s;
char *name;
+ u32 mask = 0;
- if (!strlen(ahci_mask_port_map))
- return;
+ if (!mask_p || !strlen(mask_p))
+ return 0;
- str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ str = kstrdup(mask_p, GFP_KERNEL);
if (!str)
- return;
+ return 0;
/* Handle single mask case */
if (!strchr(str, '=')) {
- ahci_apply_port_map_mask(dev, hpriv, str);
+ mask = ahci_port_mask(dev, str);
goto free;
}
/*
- * Mask list case: parse the parameter to apply the mask only if
+ * Mask list case: parse the parameter to get the mask only if
* the device name matches.
*/
param = str;
@@ -752,11 +762,13 @@ static void ahci_get_port_map_mask(struct device *dev,
param++;
}
- ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ mask = ahci_port_mask(dev, mask_s);
}
free:
kfree(str);
+
+ return mask;
}
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
@@ -782,8 +794,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
}
/* Handle port map masks passed as module parameter. */
- if (ahci_mask_port_map)
- ahci_get_port_map_mask(&pdev->dev, hpriv);
+ hpriv->mask_port_map =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_map);
+ hpriv->mask_port_ext =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_ext);
ahci_save_initial_config(&pdev->dev, hpriv);
}
@@ -1757,11 +1771,20 @@ static void ahci_mark_external_port(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
- /* mark external ports (hotplug-capable, eSATA) */
+ /*
+ * Mark external ports (hotplug-capable, eSATA), unless we were asked to
+ * ignore this feature.
+ */
tmp = readl(port_mmio + PORT_CMD);
if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
- (tmp & PORT_CMD_HPCP))
+ (tmp & PORT_CMD_HPCP)) {
+ if (hpriv->mask_port_ext & (1U << ap->port_no)) {
+ ata_port_info(ap,
+ "Ignoring external/hotplug capability\n");
+ return;
+ }
ap->pflags |= ATA_PFLAG_EXTERNAL;
+ }
}
static void ahci_update_initial_lpm_policy(struct ata_port *ap)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 2c10c8f440d1..293b7fb216b5 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -330,6 +330,7 @@ struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
u32 mask_port_map; /* Mask of valid ports */
+ u32 mask_port_ext; /* Mask of ports ext capability */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 5d5a51a77f5d..6b8844646fcd 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -450,7 +450,6 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
- u32 rc;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
@@ -463,9 +462,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
- rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
-
- return rc;
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
/**
@@ -500,7 +497,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
- u32 rc;
+ int rc;
port_fbs_save = readl(port_mmio + PORT_FBS);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 229429ba5027..495fa096dd65 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1089,6 +1089,7 @@ static struct ata_port_operations ich_pata_ops = {
};
static struct attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
NULL
};
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index b335fb7e5cb4..c79abdfcd7a9 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -111,6 +111,7 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
static struct attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
&dev_attr_em_message_type.attr,
&dev_attr_em_message.attr,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 97d9f0488cc1..ff53f5f029b4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4602,7 +4602,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
return AC_ERR_INVALID;
/* set up init dev params taskfile */
- ata_dev_dbg(dev, "init dev params \n");
+ ata_dev_dbg(dev, "init dev params\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_INIT_DEV_PARAMS;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 2946ae6d4b2c..2586e77ebf45 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2075,7 +2075,7 @@ out:
* Check if a link is established. This is a relaxed version of
* ata_phys_link_online() which accounts for the fact that this is potentially
* called after changing the link power management policy, which may not be
- * reflected immediately in the SSTAUS register (e.g., we may still be seeing
+ * reflected immediately in the SStatus register (e.g., we may still be seeing
* the PHY in partial, slumber or devsleep Partial power management state.
* So check that:
* - A device is still present, that is, DET is 1h (Device presence detected
@@ -2089,8 +2089,13 @@ static bool ata_eh_link_established(struct ata_link *link)
u32 sstatus;
u8 det, ipm;
+ /*
+ * For old IDE/PATA adapters that do not have a valid scr_read method,
+ * or if reading the SStatus register fails, assume that the device is
+ * present. Device probe will determine if that is really the case.
+ */
if (sata_scr_read(link, SCR_STATUS, &sstatus))
- return false;
+ return true;
det = sstatus & 0x0f;
ipm = (sstatus >> 8) & 0x0f;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 4734465d3b1e..b2817a2995d6 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -900,14 +900,52 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MIN_POWER] = "min_power",
};
+/*
+ * Check if a port supports link power management.
+ * Must be called with the port locked.
+ */
+static bool ata_scsi_lpm_supported(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ return false;
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->quirks & ATA_QUIRK_NOLPM)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static ssize_t ata_scsi_lpm_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ unsigned long flags;
+ bool supported;
+
+ spin_lock_irqsave(ap->lock, flags);
+ supported = ata_scsi_lpm_supported(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return sysfs_emit(buf, "%d\n", supported);
+}
+DEVICE_ATTR(link_power_management_supported, S_IRUGO,
+ ata_scsi_lpm_supported_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_supported);
+
static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
- struct ata_link *link;
- struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
@@ -924,20 +962,11 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
spin_lock_irqsave(ap->lock, flags);
- if (ap->flags & ATA_FLAG_NO_LPM) {
+ if (!ata_scsi_lpm_supported(ap)) {
count = -EOPNOTSUPP;
goto out_unlock;
}
- ata_for_each_link(link, ap, EDGE) {
- ata_for_each_dev(dev, &ap->link, ENABLED) {
- if (dev->quirks & ATA_QUIRK_NOLPM) {
- count = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
- }
-
ap->target_lpm_policy = policy;
ata_port_schedule_eh(ap);
out_unlock:
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 27b15176db56..2ded5e476d6e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -859,18 +859,14 @@ static void ata_to_sense_error(u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
static const unsigned char stat_table[][4] = {
- /* Must be first because BUSY means no other bits valid */
- {0x80, ABORTED_COMMAND, 0x47, 0x00},
- // Busy, fake parity for now
- {0x40, ILLEGAL_REQUEST, 0x21, 0x04},
- // Device ready, unaligned write command
- {0x20, HARDWARE_ERROR, 0x44, 0x00},
- // Device fault, internal target failure
- {0x08, ABORTED_COMMAND, 0x47, 0x00},
- // Timed out in xfer, fake parity for now
- {0x04, RECOVERED_ERROR, 0x11, 0x00},
- // Recovered ECC error Medium error, recovered
- {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+ /* Busy: must be first because BUSY means no other bits valid */
+ { ATA_BUSY, ABORTED_COMMAND, 0x00, 0x00 },
+ /* Device fault: INTERNAL TARGET FAILURE */
+ { ATA_DF, HARDWARE_ERROR, 0x44, 0x00 },
+ /* Corrected data error */
+ { ATA_CORR, RECOVERED_ERROR, 0x00, 0x00 },
+
+ { 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */
};
/*
@@ -942,6 +938,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
"missing result TF: can't generate ATA PT sense data\n");
+ if (qc->err_mask)
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
@@ -996,8 +994,8 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
- "missing result TF: can't generate sense data\n");
- return;
+ "Missing result TF: reporting aborted command\n");
+ goto aborted;
}
/* Use ata_to_sense_error() to map status register bits
@@ -1008,13 +1006,15 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
ata_to_sense_error(tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
- } else {
- /* Could not decode error */
- ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
- tf->status, qc->err_mask);
- ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
+
+ /* Could not decode error */
+ ata_dev_warn(dev,
+ "Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n",
+ tf->error, tf->status, qc->err_mask);
+aborted:
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -3904,21 +3904,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
- /* Disable CDL if it is enabled */
- if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
- return 0;
+ /* Disable CDL */
ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
/*
- * Enable CDL if not already enabled. Since this is mutually
- * exclusive with NCQ priority, allow this only if NCQ priority
- * is disabled.
+ * Enable CDL. Since CDL is mutually exclusive with NCQ
+ * priority, allow this only if NCQ priority is disabled.
*/
- if (dev->flags & ATA_DFLAG_CDL_ENABLED)
- return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index f7a933eefe05..9eefdc5df5df 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -758,7 +758,7 @@ static void pata_macio_irq_clear(struct ata_port *ap)
static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
{
- dev_dbg(priv->dev, "Enabling & resetting... \n");
+ dev_dbg(priv->dev, "Enabling & resetting...\n");
if (priv->mediabay)
return;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index d792ce6d97bf..ae914dcb0c83 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -295,7 +295,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/* Set the PIO timing registers using value table for 133MHz */
- ata_port_dbg(ap, "Set pio regs... \n");
+ ata_port_dbg(ap, "Set PIO regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
@@ -308,7 +308,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to pio mode[%u] \n", pio);
+ ata_port_dbg(ap, "Set to PIO mode[%u]\n", pio);
}
/**
@@ -341,7 +341,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
- ata_port_dbg(ap, "Set udma regs... \n");
+ ata_port_dbg(ap, "Set UDMA regs...\n");
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
@@ -350,14 +350,14 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode);
+ ata_port_dbg(ap, "Set to UDMA mode[%u]\n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
- ata_port_dbg(ap, "Set mdma regs... \n");
+ ata_port_dbg(ap, "Set MDMA regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
@@ -366,7 +366,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
- ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode);
+ ata_port_dbg(ap, "Set to MDMA mode[%u]\n", mdma_mode);
} else {
ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode);
}
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index eeae160c898d..fa3c76a2b49d 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return NULL;
}
+static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct atmtcp_hdr *hdr;
+
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ return -EINVAL;
+
+ hdr = (struct atmtcp_hdr *)skb->data;
+ if (hdr->length == ATMTCP_HDR_MAGIC)
+ return -EINVAL;
+
+ return 0;
+}
static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
{
@@ -288,9 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (skb->len < sizeof(struct atmtcp_hdr))
- goto done;
-
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
@@ -347,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
+ .pre_send = atmtcp_c_pre_send,
.send = atmtcp_c_send
};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index dbf5456cd891..2ea6e05e6ec9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -675,7 +675,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -1330,7 +1330,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d1585f073776..6112d942499b 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -21,6 +21,7 @@
struct regmap_irq_chip_data {
struct mutex lock;
+ struct lock_class_key lock_key;
struct irq_chip irq_chip;
struct regmap *map;
@@ -801,7 +802,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
- mutex_init(&d->lock);
+ /*
+ * If one regmap-irq is the parent of another then we'll try
+ * to lock the child with the parent locked, use an explicit
+ * lock_key so lockdep can figure out what's going on.
+ */
+ lockdep_register_key(&d->lock_key);
+ mutex_init_with_key(&d->lock, &d->lock_key);
for (i = 0; i < chip->num_irqs; i++)
d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
@@ -816,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->mask_buf[i],
chip->irq_drv_data);
if (ret)
- goto err_alloc;
+ goto err_mutex;
}
if (chip->mask_base && !chip->handle_mask_sync) {
@@ -827,7 +834,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -838,7 +845,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -855,7 +862,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -879,7 +886,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -901,7 +908,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -910,7 +917,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->status_is_level) {
ret = read_irq_data(d);
if (ret < 0)
- goto err_alloc;
+ goto err_mutex;
memcpy(d->prev_status_buf, d->status_buf,
array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
@@ -918,7 +925,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
if (ret)
- goto err_alloc;
+ goto err_mutex;
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
@@ -935,6 +942,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
err_domain:
/* Should really dispose of the domain but... */
+err_mutex:
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
err_alloc:
kfree(d->type_buf);
kfree(d->type_buf_def);
@@ -1027,6 +1037,8 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->config_buf[i]);
kfree(d->config_buf);
}
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index f021e27644e0..658c7e2ac8bf 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -186,7 +186,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->request = bcma_gpio_request;
chip->free = bcma_gpio_free;
chip->get = bcma_gpio_get_value;
- chip->set_rv = bcma_gpio_set_value;
+ chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->parent = bus->dev;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e21492981f7d..f6d6276974ee 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -380,6 +380,9 @@ enum {
/* this is/was a write request */
__EE_WRITE,
+ /* hand back using mempool_free(e, drbd_buffer_page_pool) */
+ __EE_RELEASE_TO_MEMPOOL,
+
/* this is/was a write same request */
__EE_WRITE_SAME,
@@ -402,6 +405,7 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
+#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
@@ -858,7 +862,6 @@ struct drbd_device {
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
struct list_head done_ee; /* need to send P_WRITE_ACK */
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
- struct list_head net_ee; /* zero-copy network send in progress */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
@@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t drbd_request_mempool;
extern mempool_t drbd_ee_mempool;
-/* drbd's page pool, used to buffer data received from the peer,
- * or data requested by the peer.
- *
- * This does not have an emergency reserve.
- *
- * When allocating from this pool, it first takes pages from the pool.
- * Only if the pool is depleted will try to allocate from the system.
- *
- * The assumption is that pages taken from this pool will be processed,
- * and given back, "quickly", and then can be recycled, so we can avoid
- * frequent calls to alloc_page(), and still will be able to make progress even
- * under memory pressure.
- */
-extern struct page *drbd_pp_pool;
-extern spinlock_t drbd_pp_lock;
-extern int drbd_pp_vacant;
-extern wait_queue_head_t drbd_pp_wait;
-
/* We also need a standard (emergency-reserve backed) page pool
* for meta data IO (activity log, bitmap).
* We can keep it global, as long as it is used as "N pages at a time".
@@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
*/
#define DRBD_MIN_POOL_PAGES 128
extern mempool_t drbd_md_io_page_pool;
+extern mempool_t drbd_buffer_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
@@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
sector_t, unsigned int,
unsigned int,
gfp_t) __must_hold(local);
-extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
- int);
-#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
-#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
@@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
-{
- struct page *page = peer_req->pages;
- page_chain_for_each(page) {
- if (page_count(page) > 1)
- return 1;
- }
- return 0;
-}
-
static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
struct drbd_resource *resource = device->resource;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 52724b79be30..c73376886e7a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
+mempool_t drbd_buffer_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;
-/* I do not use a standard mempool, because:
- 1) I want to hand out the pre-allocated objects first.
- 2) I want to be able to interrupt sleeping allocation with a signal.
- Note: This is a single linked list, the next pointer is the private
- member of struct page.
- */
-struct page *drbd_pp_pool;
-DEFINE_SPINLOCK(drbd_pp_lock);
-int drbd_pp_vacant;
-wait_queue_head_t drbd_pp_wait;
-
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
@@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
+ bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
struct page *page = peer_req->pages;
unsigned len = peer_req->i.size;
int err;
@@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(peer_device, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0);
+ if (likely(use_sendpage))
+ err = _drbd_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ else
+ err = _drbd_no_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+
if (err)
return err;
len -= l;
@@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
INIT_LIST_HEAD(&device->sync_ee);
INIT_LIST_HEAD(&device->done_ee);
INIT_LIST_HEAD(&device->read_ee);
- INIT_LIST_HEAD(&device->net_ee);
INIT_LIST_HEAD(&device->resync_reads);
INIT_LIST_HEAD(&device->resync_work.list);
INIT_LIST_HEAD(&device->unplug_work.list);
@@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
D_ASSERT(device, list_empty(&device->read_ee));
- D_ASSERT(device, list_empty(&device->net_ee));
D_ASSERT(device, list_empty(&device->resync_reads));
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
D_ASSERT(device, list_empty(&device->resync_work.list));
@@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
static void drbd_destroy_mempools(void)
{
- struct page *page;
-
- while (drbd_pp_pool) {
- page = drbd_pp_pool;
- drbd_pp_pool = (struct page *)page_private(page);
- __free_page(page);
- drbd_pp_vacant--;
- }
-
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
bioset_exit(&drbd_io_bio_set);
bioset_exit(&drbd_md_io_bio_set);
+ mempool_exit(&drbd_buffer_page_pool);
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
@@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
- struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
- int i, ret;
+ int ret;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
+ ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
+ if (ret)
+ goto Enomem;
+
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache);
if (ret)
@@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
- for (i = 0; i < number; i++) {
- page = alloc_page(GFP_HIGHUSER);
- if (!page)
- goto Enomem;
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- }
- drbd_pp_vacant = number;
-
return 0;
Enomem:
@@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
drbd_err(device, "%d EEs in done list found!\n", rr);
-
- rr = drbd_free_peer_reqs(device, &device->net_ee);
- if (rr)
- drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
@@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
return err;
}
- /*
- * allocate all necessary structs
- */
- init_waitqueue_head(&drbd_pp_wait);
-
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&drbd_devices);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 975024cf03c5..caaf2781136d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
+#include <linux/mempool.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
@@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-/*
- * some helper functions to deal with single linked page lists,
- * page->private being our "next" pointer.
- */
-
-/* If at least n pages are linked at head, get n pages off.
- * Otherwise, don't modify head, and return NULL.
- * Locking is the responsibility of the caller.
- */
-static struct page *page_chain_del(struct page **head, int n)
-{
- struct page *page;
- struct page *tmp;
-
- BUG_ON(!n);
- BUG_ON(!head);
-
- page = *head;
-
- if (!page)
- return NULL;
-
- while (page) {
- tmp = page_chain_next(page);
- if (--n == 0)
- break; /* found sufficient pages */
- if (tmp == NULL)
- /* insufficient pages, don't use any of them. */
- return NULL;
- page = tmp;
- }
-
- /* add end of list marker for the returned list */
- set_page_private(page, 0);
- /* actual return value, and adjustment of head */
- page = *head;
- *head = tmp;
- return page;
-}
-
-/* may be used outside of locks to find the tail of a (usually short)
- * "private" page chain, before adding it back to a global chain head
- * with page_chain_add() under a spinlock. */
-static struct page *page_chain_tail(struct page *page, int *len)
-{
- struct page *tmp;
- int i = 1;
- while ((tmp = page_chain_next(page))) {
- ++i;
- page = tmp;
- }
- if (len)
- *len = i;
- return page;
-}
-
-static int page_chain_free(struct page *page)
-{
- struct page *tmp;
- int i = 0;
- page_chain_for_each_safe(page, tmp) {
- put_page(page);
- ++i;
- }
- return i;
-}
-
-static void page_chain_add(struct page **head,
- struct page *chain_first, struct page *chain_last)
-{
-#if 1
- struct page *tmp;
- tmp = page_chain_tail(chain_first, NULL);
- BUG_ON(tmp != chain_last);
-#endif
-
- /* add chain to head */
- set_page_private(chain_last, (unsigned long)*head);
- *head = chain_first;
-}
-
-static struct page *__drbd_alloc_pages(struct drbd_device *device,
- unsigned int number)
+static struct page *__drbd_alloc_pages(unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
unsigned int i = 0;
- /* Yes, testing drbd_pp_vacant outside the lock is racy.
- * So what. It saves a spin_lock. */
- if (drbd_pp_vacant >= number) {
- spin_lock(&drbd_pp_lock);
- page = page_chain_del(&drbd_pp_pool, number);
- if (page)
- drbd_pp_vacant -= number;
- spin_unlock(&drbd_pp_lock);
- if (page)
- return page;
- }
-
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
for (i = 0; i < number; i++) {
- tmp = alloc_page(GFP_TRY);
+ tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
if (!tmp)
- break;
+ goto fail;
set_page_private(tmp, (unsigned long)page);
page = tmp;
}
-
- if (i == number)
- return page;
-
- /* Not enough pages immediately available this time.
- * No need to jump around here, drbd_alloc_pages will retry this
- * function "soon". */
- if (page) {
- tmp = page_chain_tail(page, NULL);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
+ return page;
+fail:
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ mempool_free(page, &drbd_buffer_page_pool);
}
return NULL;
}
-static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
- struct list_head *to_be_freed)
-{
- struct drbd_peer_request *peer_req, *tmp;
-
- /* The EEs are always appended to the end of the list. Since
- they are sent in order over the wire, they have to finish
- in order. As soon as we see the first not finished we can
- stop to examine the list... */
-
- list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
- if (drbd_peer_req_has_active_page(peer_req))
- break;
- list_move(&peer_req->w.list, to_be_freed);
- }
-}
-
-static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
-{
- LIST_HEAD(reclaimed);
- struct drbd_peer_request *peer_req, *t;
-
- spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
- spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-}
-
-static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
-{
- struct drbd_peer_device *peer_device;
- int vnr;
-
- rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
- struct drbd_device *device = peer_device->device;
- if (!atomic_read(&device->pp_in_use_by_net))
- continue;
-
- kref_get(&device->kref);
- rcu_read_unlock();
- drbd_reclaim_net_peer_reqs(device);
- kref_put(&device->kref, drbd_destroy_device);
- rcu_read_lock();
- }
- rcu_read_unlock();
-}
-
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @peer_device: DRBD device.
@@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
bool retry)
{
struct drbd_device *device = peer_device->device;
- struct page *page = NULL;
+ struct page *page;
struct net_conf *nc;
- DEFINE_WAIT(wait);
unsigned int mxb;
rcu_read_lock();
@@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
- if (atomic_read(&device->pp_in_use) < mxb)
- page = __drbd_alloc_pages(device, number);
-
- /* Try to keep the fast path fast, but occasionally we need
- * to reclaim the pages we lended to the network stack. */
- if (page && atomic_read(&device->pp_in_use_by_net) > 512)
- drbd_reclaim_net_peer_reqs(device);
-
- while (page == NULL) {
- prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
-
- drbd_reclaim_net_peer_reqs(device);
-
- if (atomic_read(&device->pp_in_use) < mxb) {
- page = __drbd_alloc_pages(device, number);
- if (page)
- break;
- }
-
- if (!retry)
- break;
-
- if (signal_pending(current)) {
- drbd_warn(device, "drbd_alloc_pages interrupted!\n");
- break;
- }
-
- if (schedule_timeout(HZ/10) == 0)
- mxb = UINT_MAX;
- }
- finish_wait(&drbd_pp_wait, &wait);
+ if (atomic_read(&device->pp_in_use) >= mxb)
+ schedule_timeout_interruptible(HZ / 10);
+ page = __drbd_alloc_pages(number);
if (page)
atomic_add(number, &device->pp_in_use);
@@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_device *device, struct page *page)
{
- atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
- int i;
+ struct page *tmp;
+ int i = 0;
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
- i = page_chain_free(page);
- else {
- struct page *tmp;
- tmp = page_chain_tail(page, &i);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
- }
- i = atomic_sub_return(i, a);
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ if (page_count(page) == 1)
+ mempool_free(page, &drbd_buffer_page_pool);
+ else
+ put_page(page);
+ i++;
+ }
+ i = atomic_sub_return(i, &device->pp_in_use);
if (i < 0)
- drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
- is_net ? "pp_in_use_by_net" : "pp_in_use", i);
- wake_up(&drbd_pp_wait);
+ drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
}
/*
@@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
gfpflags_allow_blocking(gfp_mask));
if (!page)
goto fail;
+ if (!mempool_is_saturated(&drbd_buffer_page_pool))
+ peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
}
memset(peer_req, 0, sizeof(*peer_req));
@@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
-void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
- int is_net)
+void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
might_sleep();
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
- drbd_free_pages(device, peer_req->pages, is_net);
+ drbd_free_pages(device, peer_req->pages);
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
@@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
- int is_net = list == &device->net_ee;
spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- __drbd_free_peer_req(device, peer_req, is_net);
+ drbd_free_peer_req(device, peer_req);
count++;
}
return count;
@@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
- LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
list_splice_init(&device->done_ee, &work_list);
spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
@@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
data_size -= len;
}
kunmap(page);
- drbd_free_pages(peer_device->device, page, 0);
+ drbd_free_pages(peer_device->device, page);
return err;
}
@@ -5224,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
put_ldev(device);
}
- /* tcp_close and release of sendpage pages can be deferred. I don't
- * want to use SO_LINGER, because apparently it can be deferred for
- * more than 20 seconds (longest time I checked).
- *
- * Actually we don't care for exactly when the network stack does its
- * put_page(), but release our reference on these pages right here.
- */
- i = drbd_free_peer_reqs(device, &device->net_ee);
- if (i)
- drbd_info(device, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&device->pp_in_use_by_net);
if (i)
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
@@ -5980,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
- conn_reclaim_net_peer_reqs(connection);
-
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
if (drbd_send_ping(connection)) {
drbd_err(connection, "drbd_send_ping has failed\n");
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index a6ea737b3b71..dea3e79d044f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1030,22 +1030,6 @@ out:
return 1;
}
-/* helper */
-static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
-{
- if (drbd_peer_req_has_active_page(peer_req)) {
- /* This might happen if sendpage() has not finished */
- int i = PFN_UP(peer_req->i.size);
- atomic_add(i, &device->pp_in_use_by_net);
- atomic_sub(i, &device->pp_in_use);
- spin_lock_irq(&device->resource->req_lock);
- list_add_tail(&peer_req->w.list, &device->net_ee);
- spin_unlock_irq(&device->resource->req_lock);
- wake_up(&drbd_pp_wait);
- } else
- drbd_free_peer_req(device, peer_req);
-}
-
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
* @w: work object.
@@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
@@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev_if_state(device, D_FAILED)) {
@@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
/* update resync data with failure */
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
-
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev(device)) {
@@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
-
- dec_unacked(device);
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block/ack() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1b6ee91f8eb9..053a086d547e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -137,20 +137,35 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
-static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
+static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
{
loff_t loopsize;
+ int ret;
+
+ if (S_ISBLK(file_inode(file)->i_mode)) {
+ loopsize = i_size_read(file->f_mapping->host);
+ } else {
+ struct kstat stat;
+
+ /*
+ * Get the accurate file size. This provides better results than
+ * cached inode data, particularly for network filesystems where
+ * metadata may be stale.
+ */
+ ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
+ if (ret)
+ return 0;
- /* Compute loopsize in bytes */
- loopsize = i_size_read(file->f_mapping->host);
- if (offset > 0)
- loopsize -= offset;
+ loopsize = stat.size;
+ }
+
+ if (lo->lo_offset > 0)
+ loopsize -= lo->lo_offset;
/* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
-
- if (sizelimit > 0 && sizelimit < loopsize)
- loopsize = sizelimit;
+ if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
+ loopsize = lo->lo_sizelimit;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
@@ -158,11 +173,6 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
return loopsize >> 9;
}
-static loff_t get_loop_size(struct loop_device *lo, struct file *file)
-{
- return get_size(lo->lo_offset, lo->lo_sizelimit, file);
-}
-
/*
* We support direct I/O only if lo_offset is aligned with the logical I/O size
* of backing device, and the logical block size of loop is bigger than that of
@@ -569,7 +579,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
error = -EINVAL;
/* size of the new backing store needs to be the same */
- if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
+ if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
goto out_err;
/*
@@ -1063,7 +1073,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
loop_update_dio(lo);
loop_sysfs_init(lo);
- size = get_loop_size(lo, file);
+ size = lo_calculate_size(lo, file);
loop_set_size(lo, size);
/* Order wrt reading lo_state in loop_validate_file(). */
@@ -1255,8 +1265,7 @@ out_unfreeze:
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
if (!err && size_changed) {
- loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
- lo->lo_backing_file);
+ loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, new_size);
}
out_unlock:
@@ -1399,7 +1408,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- size = get_loop_size(lo, lo->lo_backing_file);
+ size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, size);
return 0;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 6561d2a561fa..67d4a867aec4 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -235,10 +235,11 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
- unsigned int nr_privileged_daemon;
+ bool unprivileged_daemons;
struct mutex cancel_mutex;
bool canceling;
pid_t ublksrv_tgid;
+ struct delayed_work exit_work;
};
/* header of ublk_params */
@@ -1389,7 +1390,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
{
blk_status_t res;
- if (unlikely(ubq->fail_io))
+ if (unlikely(READ_ONCE(ubq->fail_io)))
return BLK_STS_TARGET;
/* With recovery feature enabled, force_abort is set in
@@ -1401,7 +1402,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) &&
+ unlikely(READ_ONCE(ubq->force_abort)))
return BLK_STS_IOERR;
if (check_cancel && unlikely(ubq->canceling))
@@ -1550,7 +1552,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
+ ub->unprivileged_daemons = false;
ub->ublksrv_tgid = -1;
}
@@ -1594,13 +1596,63 @@ static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
ublk_get_queue(ub, i)->canceling = canceling;
}
-static int ublk_ch_release(struct inode *inode, struct file *filp)
+static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
{
- struct ublk_device *ub = filp->private_data;
+ int i, j;
+
+ if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG)))
+ return false;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ for (j = 0; j < ubq->q_depth; j++) {
+ struct ublk_io *io = &ubq->ios[j];
+ unsigned int refs = refcount_read(&io->ref) +
+ io->task_registered_buffers;
+
+ /*
+ * UBLK_REFCOUNT_INIT or zero means no active
+ * reference
+ */
+ if (refs != UBLK_REFCOUNT_INIT && refs != 0)
+ return true;
+
+ /* reset to zero if the io hasn't active references */
+ refcount_set(&io->ref, 0);
+ io->task_registered_buffers = 0;
+ }
+ }
+ return false;
+}
+
+static void ublk_ch_release_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, exit_work.work);
struct gendisk *disk;
int i;
/*
+ * For zero-copy and auto buffer register modes, I/O references
+ * might not be dropped naturally when the daemon is killed, but
+ * io_uring guarantees that registered bvec kernel buffers are
+ * unregistered finally when freeing io_uring context, then the
+ * active references are dropped.
+ *
+ * Wait until active references are dropped for avoiding use-after-free
+ *
+ * registered buffer may be unregistered in io_ring's release hander,
+ * so have to wait by scheduling work function for avoiding the two
+ * file release dependency.
+ */
+ if (ublk_check_and_reset_active_ref(ub)) {
+ schedule_delayed_work(&ub->exit_work, 1);
+ return;
+ }
+
+ /*
* disk isn't attached yet, either device isn't live, or it has
* been removed already, so we needn't to do anything
*/
@@ -1644,7 +1696,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* Transition the device to the nosrv state. What exactly this
* means depends on the recovery flags
*/
- blk_mq_quiesce_queue(disk->queue);
if (ublk_nosrv_should_stop_dev(ub)) {
/*
* Allow any pending/future I/O to pass through quickly
@@ -1652,8 +1703,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* waits for all pending I/O to complete
*/
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_get_queue(ub, i)->force_abort = true;
- blk_mq_unquiesce_queue(disk->queue);
+ WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
ublk_stop_dev_unlocked(ub);
} else {
@@ -1663,9 +1713,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
} else {
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_get_queue(ub, i)->fail_io = true;
+ WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
}
- blk_mq_unquiesce_queue(disk->queue);
}
unlock:
mutex_unlock(&ub->mutex);
@@ -1675,6 +1724,23 @@ unlock:
ublk_reset_ch_dev(ub);
out:
clear_bit(UB_STATE_OPEN, &ub->state);
+
+ /* put the reference grabbed in ublk_ch_release() */
+ ublk_put_device(ub);
+}
+
+static int ublk_ch_release(struct inode *inode, struct file *filp)
+{
+ struct ublk_device *ub = filp->private_data;
+
+ /*
+ * Grab ublk device reference, so it won't be gone until we are
+ * really released from work function.
+ */
+ ublk_get_device(ub);
+
+ INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
+ schedule_delayed_work(&ub->exit_work, 0);
return 0;
}
@@ -1980,12 +2046,10 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
__must_hold(&ub->mutex)
{
ubq->nr_io_ready++;
- if (ublk_queue_ready(ubq)) {
+ if (ublk_queue_ready(ubq))
ub->nr_queues_ready++;
-
- if (capable(CAP_SYS_ADMIN))
- ub->nr_privileged_daemon++;
- }
+ if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
+ ub->unprivileged_daemons = true;
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
/* now we are ready for handling ublk io request */
@@ -2880,8 +2944,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
ublk_apply_params(ub);
- /* don't probe partitions if any one ubq daemon is un-trusted */
- if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+ /* don't probe partitions if any daemon task is un-trusted */
+ if (ub->unprivileged_daemons)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub);
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
index 553b1a713ab9..a423228e201b 100644
--- a/drivers/block/zloop.c
+++ b/drivers/block/zloop.c
@@ -700,6 +700,8 @@ static void zloop_free_disk(struct gendisk *disk)
struct zloop_device *zlo = disk->private_data;
unsigned int i;
+ blk_mq_free_tag_set(&zlo->tag_set);
+
for (i = 0; i < zlo->nr_zones; i++) {
struct zloop_zone *zone = &zlo->zones[i];
@@ -1080,7 +1082,6 @@ static int zloop_ctl_remove(struct zloop_options *opts)
del_gendisk(zlo->disk);
put_disk(zlo->disk);
- blk_mq_free_tag_set(&zlo->tag_set);
pr_info("Removed device %d\n", opts->id);
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 4390fd571dbd..a8c520dc09e1 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
* WMT command.
*/
err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
- if (err == -EINTR) {
- bt_dev_err(hdev, "Execution of wmt command interrupted");
- clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
- goto err_free_wc;
- }
+ TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 73a4a325c867..76e7f857fb7d 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -543,10 +543,10 @@ static int ps_setup(struct hci_dev *hdev)
}
if (psdata->wakeup_source) {
- ret = devm_request_irq(&serdev->dev, psdata->irq_handler,
- ps_host_wakeup_irq_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- dev_name(&serdev->dev), nxpdev);
+ ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler,
+ NULL, ps_host_wakeup_irq_handler,
+ IRQF_ONESHOT,
+ dev_name(&serdev->dev), nxpdev);
if (ret)
bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
disable_irq(psdata->irq_handler);
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
index 04b578a0be17..61f1a290ff08 100644
--- a/drivers/cdx/controller/cdx_rpmsg.c
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = rpdev->dst;
- strscpy(chinfo.name, cdx_rpmsg_id_table[0].name,
- strlen(cdx_rpmsg_id_table[0].name));
+ strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
if (!cdx_mcdi->ept) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 064944ae9fdc..8e9050f99e9e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4607,10 +4607,10 @@ return_unspecified:
* The NetFN and Command in the response is not even
* marginally correct.
*/
- dev_warn(intf->si_dev,
- "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
- (msg->data[0] >> 2) | 1, msg->data[1],
- msg->rsp[0] >> 2, msg->rsp[1]);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
goto return_unspecified;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bb42dfe1c6a8..8b5524069c15 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2108,7 +2108,6 @@ static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
static int __init init_ipmi_si(void)
{
struct smi_info *e, *e2;
- enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
@@ -2190,9 +2189,6 @@ static int __init init_ipmi_si(void)
initialized = true;
mutex_unlock(&smi_infos_lock);
- if (type)
- return 0;
-
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index ab759b492fdd..a013ddbf1466 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1146,14 +1146,8 @@ static struct ipmi_smi_watcher smi_watcher = {
.smi_gone = ipmi_smi_gone
};
-static int action_op(const char *inval, char *outval)
+static int action_op_set_val(const char *inval)
{
- if (outval)
- strcpy(outval, action);
-
- if (!inval)
- return 0;
-
if (strcmp(inval, "reset") == 0)
action_val = WDOG_TIMEOUT_RESET;
else if (strcmp(inval, "none") == 0)
@@ -1164,18 +1158,26 @@ static int action_op(const char *inval, char *outval)
action_val = WDOG_TIMEOUT_POWER_DOWN;
else
return -EINVAL;
- strcpy(action, inval);
return 0;
}
-static int preaction_op(const char *inval, char *outval)
+static int action_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preaction);
+ strcpy(outval, action);
if (!inval)
return 0;
+ rv = action_op_set_val(inval);
+ if (!rv)
+ strcpy(action, inval);
+ return rv;
+}
+static int preaction_op_set_val(const char *inval)
+{
if (strcmp(inval, "pre_none") == 0)
preaction_val = WDOG_PRETIMEOUT_NONE;
else if (strcmp(inval, "pre_smi") == 0)
@@ -1188,18 +1190,26 @@ static int preaction_op(const char *inval, char *outval)
preaction_val = WDOG_PRETIMEOUT_MSG_INT;
else
return -EINVAL;
- strcpy(preaction, inval);
return 0;
}
-static int preop_op(const char *inval, char *outval)
+static int preaction_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preop);
+ strcpy(outval, preaction);
if (!inval)
return 0;
+ rv = preaction_op_set_val(inval);
+ if (!rv)
+ strcpy(preaction, inval);
+ return 0;
+}
+static int preop_op_set_val(const char *inval)
+{
if (strcmp(inval, "preop_none") == 0)
preop_val = WDOG_PREOP_NONE;
else if (strcmp(inval, "preop_panic") == 0)
@@ -1208,7 +1218,22 @@ static int preop_op(const char *inval, char *outval)
preop_val = WDOG_PREOP_GIVE_DATA;
else
return -EINVAL;
- strcpy(preop, inval);
+ return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+ int rv;
+
+ if (outval)
+ strcpy(outval, preop);
+
+ if (!inval)
+ return 0;
+
+ rv = preop_op_set_val(inval);
+ if (!rv)
+ strcpy(preop, inval);
return 0;
}
@@ -1245,18 +1270,18 @@ static int __init ipmi_wdog_init(void)
{
int rv;
- if (action_op(action, NULL)) {
+ if (action_op_set_val(action)) {
action_op("reset", NULL);
pr_info("Unknown action '%s', defaulting to reset\n", action);
}
- if (preaction_op(preaction, NULL)) {
+ if (preaction_op_set_val(preaction)) {
preaction_op("pre_none", NULL);
pr_info("Unknown preaction '%s', defaulting to none\n",
preaction);
}
- if (preop_op(preop, NULL)) {
+ if (preop_op_set_val(preop)) {
preop_op("preop_none", NULL);
pr_info("Unknown preop '%s', defaulting to none\n", preop);
}
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 23b7178522ae..7e2f2b1a1c36 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -1587,6 +1587,9 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
memset(&data[n], 0, (MIN_SAMPLES - n) *
sizeof(unsigned int));
}
+ } else {
+ memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) *
+ sizeof(unsigned int));
}
ret = parse_insn(dev, insns + i, data, file);
if (ret < 0)
@@ -1670,6 +1673,8 @@ static int do_insn_ioctl(struct comedi_device *dev,
memset(&data[insn->n], 0,
(MIN_SAMPLES - insn->n) * sizeof(unsigned int));
}
+ } else {
+ memset(data, 0, n_data * sizeof(unsigned int));
}
ret = parse_insn(dev, insn, data, file);
if (ret < 0)
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index f1dc854928c1..c9ebaadc5e82 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -620,11 +620,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int base_chan = (chan < 32) ? 0 : chan;
unsigned int _data[2];
+ unsigned int i;
int ret;
- if (insn->n == 0)
- return 0;
-
memset(_data, 0, sizeof(_data));
memset(&_insn, 0, sizeof(_insn));
_insn.insn = INSN_BITS;
@@ -635,18 +633,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- _data[0] = 1U << (chan - base_chan); /* mask */
- _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */
+ _data[0] = 1U << (chan - base_chan); /* mask */
}
+ for (i = 0; i < insn->n; i++) {
+ if (insn->insn == INSN_WRITE)
+ _data[1] = data[i] ? _data[0] : 0; /* bits */
- ret = s->insn_bits(dev, s, &_insn, _data);
- if (ret < 0)
- return ret;
+ ret = s->insn_bits(dev, s, &_insn, _data);
+ if (ret < 0)
+ return ret;
- if (insn->insn == INSN_READ)
- data[0] = (_data[1] >> (chan - base_chan)) & 1;
+ if (insn->insn == INSN_READ)
+ data[i] = (_data[1] >> (chan - base_chan)) & 1;
+ }
- return 1;
+ return insn->n;
}
static int __comedi_device_postconfig_async(struct comedi_device *dev,
diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c
index 0430630e6ebb..b542896fa0e4 100644
--- a/drivers/comedi/drivers/pcl726.c
+++ b/drivers/comedi/drivers/pcl726.c
@@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev,
* Hook up the external trigger source interrupt only if the
* user config option is valid and the board supports interrupts.
*/
- if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (board->irq_mask & (1U << it->options[1]))) {
ret = request_irq(it->options[1], pcl726_interrupt, 0,
dev->board_name, dev);
if (ret == 0) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 06a1c7dd081f..f366d35c5840 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2793,6 +2793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
{}
};
#endif
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 9ad85fe6fd05..7e1fbf9a091f 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -9,7 +9,6 @@ use kernel::{
cpumask::CpumaskVar,
device::{Core, Device},
error::code::*,
- fmt,
macros::vtable,
module_platform_driver, of, opp, platform,
prelude::*,
@@ -19,7 +18,7 @@ use kernel::{
/// Finds exact supply name from the OF node.
fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
- let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?;
+ let prop_name = CString::try_from_fmt(fmt!("{name}-supply")).ok()?;
dev.fwnode()?
.property_present(&prop_name)
.then(|| CString::try_from_fmt(fmt!("{name}")).ok())
@@ -221,7 +220,7 @@ impl platform::Driver for CPUFreqDTDriver {
module_platform_driver! {
type: CPUFreqDTDriver,
name: "cpufreq-dt",
- author: "Viresh Kumar <viresh.kumar@linaro.org>",
+ authors: ["Viresh Kumar <viresh.kumar@linaro.org>"],
description: "Generic CPUFreq DT driver",
license: "GPL v2",
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 52d5d26fc7c6..b2e3d0b0a116 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns)
static DEFINE_PER_CPU(struct menu_device, menu_devices);
+static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
+{
+ /* Update the repeating-pattern data. */
+ data->intervals[data->interval_ptr++] = interval_us;
+ if (data->interval_ptr >= INTERVALS)
+ data->interval_ptr = 0;
+}
+
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/*
@@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (data->needs_update) {
menu_update(drv, dev);
data->needs_update = 0;
+ } else if (!dev->last_residency_ns) {
+ /*
+ * This happens when the driver rejects the previously selected
+ * idle state and returns an error, so update the recent
+ * intervals table to prevent invalid information from being
+ * used going forward.
+ */
+ menu_update_intervals(data, UINT_MAX);
}
/* Find the shortest expected idle interval. */
@@ -271,20 +287,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return 0;
}
- if (tick_nohz_tick_stopped()) {
- /*
- * If the tick is already stopped, the cost of possible short
- * idle duration misprediction is much higher, because the CPU
- * may be stuck in a shallow idle state for a long time as a
- * result of it. In that case say we might mispredict and use
- * the known time till the closest timer event for the idle
- * state selection.
- */
- if (predicted_ns < TICK_NSEC)
- predicted_ns = data->next_timer_ns;
- } else if (latency_req > predicted_ns) {
- latency_req = predicted_ns;
- }
+ /*
+ * If the tick is already stopped, the cost of possible short idle
+ * duration misprediction is much higher, because the CPU may be stuck
+ * in a shallow idle state for a long time as a result of it. In that
+ * case, say we might mispredict and use the known time till the closest
+ * timer event for the idle state selection.
+ */
+ if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)
+ predicted_ns = data->next_timer_ns;
/*
* Find the idle state with the lowest power while satisfying
@@ -300,13 +311,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx == -1)
idx = i; /* first enabled state */
+ if (s->exit_latency_ns > latency_req)
+ break;
+
if (s->target_residency_ns > predicted_ns) {
/*
* Use a physical idle state, not busy polling, unless
* a timer is going to trigger soon enough.
*/
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->exit_latency_ns <= latency_req &&
s->target_residency_ns <= data->next_timer_ns) {
predicted_ns = s->target_residency_ns;
idx = i;
@@ -338,8 +351,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return idx;
}
- if (s->exit_latency_ns > latency_req)
- break;
idx = i;
}
@@ -482,10 +493,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
- /* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
- if (data->interval_ptr >= INTERVALS)
- data->interval_ptr = 0;
+ menu_update_intervals(data, ktime_to_us(measured_ns));
}
/**
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index a1a99ec3f12c..712624cba2b6 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -335,6 +335,63 @@ static int add_or_reset_cxl_resource(struct resource *parent, struct resource *r
return rc;
}
+static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
+{
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct range *hpa = &cxld->hpa_range;
+ resource_size_t size = range_len(hpa);
+ resource_size_t start = hpa->start;
+ resource_size_t cache_size;
+ struct resource res;
+ int nid, rc;
+
+ res = DEFINE_RES(start, size, 0);
+ nid = phys_to_target_node(start);
+
+ rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
+ if (rc)
+ return rc;
+
+ /*
+ * The cache range is expected to be within the CFMWS.
+ * Currently there is only support cache_size == cxl_size. CXL
+ * size is then half of the total CFMWS window size.
+ */
+ size = size >> 1;
+ if (cache_size && size != cache_size) {
+ dev_warn(&cxld->dev,
+ "Extended Linear Cache size %pa != CXL size %pa. No Support!",
+ &cache_size, &size);
+ return -ENXIO;
+ }
+
+ cxlrd->cache_size = cache_size;
+
+ return 0;
+}
+
+static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
+{
+ int rc;
+
+ rc = cxl_acpi_set_cache_size(cxlrd);
+ if (!rc)
+ return;
+
+ if (rc != -EOPNOTSUPP) {
+ /*
+ * Failing to support extended linear cache region resize does not
+ * prevent the region from functioning. Only causes cxl list showing
+ * incorrect region size.
+ */
+ dev_warn(cxlrd->cxlsd.cxld.dev.parent,
+ "Extended linear cache calculation failed rc:%d\n", rc);
+ }
+
+ /* Ignoring return code */
+ cxlrd->cache_size = 0;
+}
+
DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
@@ -394,6 +451,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
ig = CXL_DECODER_MIN_GRANULARITY;
cxld->interleave_granularity = ig;
+ cxl_setup_extended_linear_cache(cxlrd);
+
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
if (ways != 1 && ways != 3) {
cxims_ctx = (struct cxl_cxims_context) {
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 79e2ef81fde8..5ad8fef210b5 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -15,7 +15,6 @@ cxl_core-y += hdm.o
cxl_core-y += pmu.o
cxl_core-y += cdat.o
cxl_core-y += ras.o
-cxl_core-y += acpi.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
cxl_core-$(CONFIG_CXL_MCE) += mce.o
diff --git a/drivers/cxl/core/acpi.c b/drivers/cxl/core/acpi.c
deleted file mode 100644
index f13b4dae6ac5..000000000000
--- a/drivers/cxl/core/acpi.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
-#include <linux/acpi.h>
-#include "cxl.h"
-#include "core.h"
-
-int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
- int nid, resource_size_t *size)
-{
- return hmat_get_extended_linear_cache_size(backing_res, nid, size);
-}
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 0ccef2f2a26a..c0af645425f4 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -336,7 +336,7 @@ static int match_cxlrd_hb(struct device *dev, void *data)
cxlrd = to_cxl_root_decoder(dev);
cxlsd = &cxlrd->cxlsd;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
for (int i = 0; i < cxlsd->nr_targets; i++) {
if (host_bridge == cxlsd->target[i]->dport_dev)
return 1;
@@ -987,7 +987,7 @@ void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
bool is_root;
int rc;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
struct xarray *usp_xa __free(free_perf_xa) =
kzalloc(sizeof(*usp_xa), GFP_KERNEL);
@@ -1057,7 +1057,7 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
{
struct cxl_dpa_perf *perf;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 29b61828a847..2669f251d677 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -5,6 +5,7 @@
#define __CXL_CORE_H__
#include <cxl/mailbox.h>
+#include <linux/rwsem.h>
extern const struct device_type cxl_nvdimm_bridge_type;
extern const struct device_type cxl_nvdimm_type;
@@ -12,6 +13,11 @@ extern const struct device_type cxl_pmu_type;
extern struct attribute_group cxl_base_attribute_group;
+enum cxl_detach_mode {
+ DETACH_ONLY,
+ DETACH_INVALIDATE,
+};
+
#ifdef CONFIG_CXL_REGION
extern struct device_attribute dev_attr_create_pmem_region;
extern struct device_attribute dev_attr_create_ram_region;
@@ -20,7 +26,11 @@ extern struct device_attribute dev_attr_region;
extern const struct device_type cxl_pmem_region_type;
extern const struct device_type cxl_dax_region_type;
extern const struct device_type cxl_region_type;
-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+
+int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode);
+
#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
#define CXL_REGION_TYPE(x) (&cxl_region_type)
#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
@@ -48,8 +58,11 @@ static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;
}
-static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+static inline int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ int pos, enum cxl_detach_mode mode)
{
+ return 0;
}
static inline int cxl_region_init(void)
{
@@ -80,6 +93,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr);
enum cxl_rcrb {
CXL_RCRB_DOWNSTREAM,
@@ -96,8 +110,20 @@ u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8)
#define PCI_CAP_EXP_SIZEOF 0x3c
-extern struct rw_semaphore cxl_dpa_rwsem;
-extern struct rw_semaphore cxl_region_rwsem;
+struct cxl_rwsem {
+ /*
+ * All changes to HPA (interleave configuration) occur with this
+ * lock held for write.
+ */
+ struct rw_semaphore region;
+ /*
+ * All changes to a device DPA space occur with this lock held
+ * for write.
+ */
+ struct rw_semaphore dpa;
+};
+
+extern struct cxl_rwsem cxl_rwsem;
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
@@ -120,8 +146,6 @@ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
int cxl_ras_init(void);
void cxl_ras_exit(void);
int cxl_gpf_port_setup(struct cxl_dport *dport);
-int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
- int nid, resource_size_t *size);
#ifdef CONFIG_CXL_FEATURES
struct cxl_feat_entry *
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
index 623aaa4439c4..79994ca9bc9f 100644
--- a/drivers/cxl/core/edac.c
+++ b/drivers/cxl/core/edac.c
@@ -115,10 +115,9 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
flags, min_cycle);
}
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return ret;
cxlr = cxl_ps_ctx->cxlr;
p = &cxlr->params;
@@ -158,10 +157,9 @@ static int cxl_scrub_set_attrbs_region(struct device *dev,
struct cxl_region *cxlr;
int ret, i;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return ret;
cxlr = cxl_ps_ctx->cxlr;
p = &cxlr->params;
@@ -697,7 +695,7 @@ static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
ECS_THRESHOLD_IDX_4096);
break;
default:
- dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n",
+ dev_dbg(dev, "Invalid CXL ECS threshold count(%u) to set\n",
val);
dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
@@ -1340,16 +1338,15 @@ cxl_mem_perform_sparing(struct device *dev,
struct cxl_memdev_sparing_in_payload sparing_pi;
struct cxl_event_dram *rec = NULL;
u16 validity_flags = 0;
+ int ret;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return ret;
- struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_dpa_rwsem);
- if (!dpa_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return ret;
if (!cxl_sparing_ctx->cap_safe_when_in_use) {
/* Memory to repair must be offline */
@@ -1523,7 +1520,7 @@ static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
struct cxl_memdev *cxlmd = ctx->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa))
return -EINVAL;
ctx->dpa = dpa;
@@ -1787,16 +1784,15 @@ static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
struct cxl_mem_repair_attrbs attrbs = { 0 };
+ int ret;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return ret;
- struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_dpa_rwsem);
- if (!dpa_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return ret;
if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
/* Memory to repair must be offline */
@@ -1892,7 +1888,7 @@ static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa))
return -EINVAL;
cxl_ppr_ctx->dpa = dpa;
@@ -1923,8 +1919,11 @@ static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
{
struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR)
+ if (val != EDAC_DO_MEM_REPAIR ||
+ !cxl_resource_contains_addr(&cxlds->dpa_res, cxl_ppr_ctx->dpa))
return -EINVAL;
return cxl_mem_perform_ppr(cxl_ppr_ctx);
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index ab1007495f6b..e9e1d555cec6 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -16,7 +16,10 @@
* for enumerating these registers and capabilities.
*/
-DECLARE_RWSEM(cxl_dpa_rwsem);
+struct cxl_rwsem cxl_rwsem = {
+ .region = __RWSEM_INITIALIZER(cxl_rwsem.region),
+ .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
+};
static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
int *target_map)
@@ -214,7 +217,7 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
{
struct resource *p1, *p2;
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
__cxl_dpa_debug(file, p1, 0);
for (p2 = p1->child; p2; p2 = p2->sibling)
@@ -266,7 +269,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
struct resource *res = cxled->dpa_res;
resource_size_t skip_start;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
/* save @skip_start, before @res is released */
skip_start = res->start - cxled->skip;
@@ -281,7 +284,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
static void cxl_dpa_release(void *cxled)
{
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
__cxl_dpa_release(cxled);
}
@@ -293,7 +296,7 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
devm_remove_action(&port->dev, cxl_dpa_release, cxled);
__cxl_dpa_release(cxled);
}
@@ -361,7 +364,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct resource *res;
int rc;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
if (!len) {
dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
@@ -470,7 +473,7 @@ int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
{
struct device *dev = cxlds->dev;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxlds->nr_partitions)
return -EBUSY;
@@ -516,9 +519,8 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct cxl_port *port = cxled_to_port(cxled);
int rc;
- down_write(&cxl_dpa_rwsem);
- rc = __cxl_dpa_reserve(cxled, base, len, skipped);
- up_write(&cxl_dpa_rwsem);
+ scoped_guard(rwsem_write, &cxl_rwsem.dpa)
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
if (rc)
return rc;
@@ -529,7 +531,7 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
if (cxled->dpa_res)
return resource_size(cxled->dpa_res);
@@ -540,19 +542,26 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
resource_size_t base = -1;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
if (cxled->dpa_res)
base = cxled->dpa_res->start;
return base;
}
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr)
+{
+ struct resource _addr = DEFINE_RES_MEM(addr, 1);
+
+ return resource_contains(res, &_addr);
+}
+
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
struct device *dev = &cxled->cxld.dev;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (!cxled->dpa_res)
return 0;
if (cxled->cxld.region) {
@@ -582,7 +591,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
struct device *dev = &cxled->cxld.dev;
int part;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
return -EBUSY;
@@ -614,7 +623,7 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
struct resource *p, *last;
int part;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxled->cxld.region) {
dev_dbg(dev, "decoder attached to %s\n",
dev_name(&cxled->cxld.region->dev));
@@ -764,46 +773,12 @@ static int cxld_await_commit(void __iomem *hdm, int id)
return -ETIMEDOUT;
}
-static int cxl_decoder_commit(struct cxl_decoder *cxld)
+static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)
{
- struct cxl_port *port = to_cxl_port(cxld->dev.parent);
- struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- int id = cxld->id, rc;
+ int id = cxld->id;
u64 base, size;
u32 ctrl;
- if (cxld->flags & CXL_DECODER_F_ENABLE)
- return 0;
-
- if (cxl_num_decoders_committed(port) != id) {
- dev_dbg(&port->dev,
- "%s: out of order commit, expected decoder%d.%d\n",
- dev_name(&cxld->dev), port->id,
- cxl_num_decoders_committed(port));
- return -EBUSY;
- }
-
- /*
- * For endpoint decoders hosted on CXL memory devices that
- * support the sanitize operation, make sure sanitize is not in-flight.
- */
- if (is_endpoint_decoder(&cxld->dev)) {
- struct cxl_endpoint_decoder *cxled =
- to_cxl_endpoint_decoder(&cxld->dev);
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_memdev_state *mds =
- to_cxl_memdev_state(cxlmd->cxlds);
-
- if (mds && mds->security.sanitize_active) {
- dev_dbg(&cxlmd->dev,
- "attempted to commit %s during sanitize\n",
- dev_name(&cxld->dev));
- return -EBUSY;
- }
- }
-
- down_read(&cxl_dpa_rwsem);
/* common decoder settings */
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
cxld_set_interleave(cxld, &ctrl);
@@ -837,7 +812,47 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
}
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (cxl_num_decoders_committed(port) != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id,
+ cxl_num_decoders_committed(port));
+ return -EBUSY;
+ }
+
+ /*
+ * For endpoint decoders hosted on CXL memory devices that
+ * support the sanitize operation, make sure sanitize is not in-flight.
+ */
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_memdev_state *mds =
+ to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (mds && mds->security.sanitize_active) {
+ dev_dbg(&cxlmd->dev,
+ "attempted to commit %s during sanitize\n",
+ dev_name(&cxld->dev));
+ return -EBUSY;
+ }
+ }
+
+ scoped_guard(rwsem_read, &cxl_rwsem.dpa)
+ setup_hw_decoder(cxld, hdm);
port->commit_end++;
rc = cxld_await_commit(hdm, cxld->id);
@@ -875,7 +890,7 @@ void cxl_port_commit_reap(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
/*
* Once the highest committed decoder is disabled, free any other
@@ -907,7 +922,6 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
- down_read(&cxl_dpa_rwsem);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
@@ -916,7 +930,6 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
cxld->flags &= ~CXL_DECODER_F_ENABLE;
@@ -1025,7 +1038,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
else
cxld->target_type = CXL_DECODER_DEVMEM;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
if (cxld->id != cxl_num_decoders_committed(port)) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 2689e6453c5a..fa6dd0c94656 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -899,6 +899,10 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
return;
}
+ if (event_type == CXL_CPER_EVENT_MEM_SPARING) {
+ trace_cxl_memory_sparing(cxlmd, type, &evt->mem_sparing);
+ return;
+ }
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
@@ -909,8 +913,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
* translations. Take topology mutation locks and lookup
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
*/
- guard(rwsem_read)(&cxl_region_rwsem);
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
@@ -926,12 +930,30 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
+ if (evt->gen_media.media_hdr.descriptor &
+ CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ WARN_ON_ONCE((evt->gen_media.media_hdr.type &
+ CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
+ !get_unaligned_le24(evt->gen_media.cme_count));
+ else
+ WARN_ON_ONCE(evt->gen_media.media_hdr.type &
+ CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
+
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
hpa_alias, &evt->gen_media);
} else if (event_type == CXL_CPER_EVENT_DRAM) {
if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
+ if (evt->dram.media_hdr.descriptor &
+ CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ WARN_ON_ONCE((evt->dram.media_hdr.type &
+ CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
+ !get_unaligned_le24(evt->dram.cvme_count));
+ else
+ WARN_ON_ONCE(evt->dram.media_hdr.type &
+ CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
+
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
&evt->dram);
}
@@ -952,6 +974,8 @@ static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
ev_type = CXL_CPER_EVENT_DRAM;
else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
ev_type = CXL_CPER_EVENT_MEM_MODULE;
+ else if (uuid_equal(uuid, &CXL_EVENT_MEM_SPARING_UUID))
+ ev_type = CXL_CPER_EVENT_MEM_SPARING;
cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
}
@@ -1265,7 +1289,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
/* synchronize with cxl_mem_probe() and decoder write operations */
guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
/*
* Require an endpoint to be safe otherwise the driver can not
* be sure that the device is unmapped.
@@ -1401,8 +1425,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
int nr_records = 0;
int rc;
- rc = mutex_lock_interruptible(&mds->poison.lock);
- if (rc)
+ ACQUIRE(mutex_intr, lock)(&mds->poison.mutex);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
return rc;
po = mds->poison.list_out;
@@ -1437,7 +1461,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
}
} while (po->flags & CXL_POISON_FLAG_MORE);
- mutex_unlock(&mds->poison.lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
@@ -1473,7 +1496,7 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds)
return rc;
}
- mutex_init(&mds->poison.lock);
+ mutex_init(&mds->poison.mutex);
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
diff --git a/drivers/cxl/core/mce.h b/drivers/cxl/core/mce.h
index ace73424eeb6..ca272e8db6c7 100644
--- a/drivers/cxl/core/mce.h
+++ b/drivers/cxl/core/mce.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_CXL_MCE
int devm_cxl_register_mce_notifier(struct device *dev,
- struct notifier_block *mce_notifer);
+ struct notifier_block *mce_notifier);
#else
static inline int
devm_cxl_register_mce_notifier(struct device *dev,
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index f88a13adf7fa..c569e00a511f 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -232,15 +232,13 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
if (!port || !is_cxl_endpoint(port))
return -EINVAL;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
return rc;
- }
if (cxl_num_decoders_committed(port) == 0) {
/* No regions mapped to this memdev */
@@ -249,8 +247,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
/* Regions mapped, collect poison by endpoint */
rc = cxl_get_poison_by_endpoint(port);
}
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
return rc;
}
@@ -267,7 +263,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
dev_dbg(cxlds->dev, "device has no dpa resource\n");
return -EINVAL;
}
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) {
dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
dpa, &cxlds->dpa_res);
return -EINVAL;
@@ -292,19 +288,17 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
return rc;
- }
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
- goto out;
+ return rc;
inject.address = cpu_to_le64(dpa);
mbox_cmd = (struct cxl_mbox_cmd) {
@@ -314,7 +308,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
};
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
- goto out;
+ return rc;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
@@ -327,11 +321,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.length = cpu_to_le32(1),
};
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
-out:
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
@@ -347,19 +338,17 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
return rc;
- }
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
- goto out;
+ return rc;
/*
* In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
@@ -378,7 +367,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
- goto out;
+ return rc;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
@@ -391,11 +380,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
.length = cpu_to_le32(1),
};
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
-out:
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index eb46c6764d20..29197376b18e 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -30,18 +30,12 @@
* instantiated by the core.
*/
-/*
- * All changes to the interleave configuration occur with this lock held
- * for write.
- */
-DECLARE_RWSEM(cxl_region_rwsem);
-
static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);
int cxl_num_decoders_committed(struct cxl_port *port)
{
- lockdep_assert_held(&cxl_region_rwsem);
+ lockdep_assert_held(&cxl_rwsem.region);
return port->commit_end + 1;
}
@@ -176,7 +170,7 @@ static ssize_t target_list_show(struct device *dev,
ssize_t offset;
int rc;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
rc = emit_target_list(cxlsd, buf);
if (rc < 0)
return rc;
@@ -196,7 +190,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- /* without @cxl_dpa_rwsem, make sure @part is not reloaded */
+ /* without @cxl_rwsem.dpa, make sure @part is not reloaded */
int part = READ_ONCE(cxled->part);
const char *desc;
@@ -235,7 +229,7 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
}
static DEVICE_ATTR_RO(dpa_resource);
@@ -560,7 +554,7 @@ static ssize_t decoders_committed_show(struct device *dev,
{
struct cxl_port *port = to_cxl_port(dev);
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
}
@@ -1722,7 +1716,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
if (xa_empty(&port->dports))
return -EINVAL;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
@@ -2001,12 +1995,9 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL");
static void cxld_unregister(void *dev)
{
- struct cxl_endpoint_decoder *cxled;
-
- if (is_endpoint_decoder(dev)) {
- cxled = to_cxl_endpoint_decoder(dev);
- cxl_decoder_kill_region(cxled);
- }
+ if (is_endpoint_decoder(dev))
+ cxl_decoder_detach(NULL, to_cxl_endpoint_decoder(dev), -1,
+ DETACH_INVALIDATE);
device_unregister(dev);
}
@@ -2293,7 +2284,7 @@ static const struct attribute_group *cxl_bus_attribute_groups[] = {
NULL,
};
-struct bus_type cxl_bus_type = {
+const struct bus_type cxl_bus_type = {
.name = "cxl",
.uevent = cxl_bus_uevent,
.match = cxl_bus_match,
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index ba42259c3701..71cc42d05248 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -141,16 +141,12 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
if (cxlr->mode != CXL_PARTMODE_PMEM)
- rc = sysfs_emit(buf, "\n");
- else
- rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%pUb\n", &p->uuid);
}
static int is_dup(struct device *match, void *data)
@@ -162,7 +158,7 @@ static int is_dup(struct device *match, void *data)
if (!is_cxl_region(match))
return 0;
- lockdep_assert_held(&cxl_region_rwsem);
+ lockdep_assert_held(&cxl_rwsem.region);
cxlr = to_cxl_region(match);
p = &cxlr->params;
@@ -192,27 +188,22 @@ static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
if (uuid_is_null(&temp))
return -EINVAL;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &region_rwsem)))
return rc;
if (uuid_equal(&p->uuid, &temp))
- goto out;
+ return len;
- rc = -EBUSY;
if (p->state >= CXL_CONFIG_ACTIVE)
- goto out;
+ return -EBUSY;
rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
if (rc < 0)
- goto out;
+ return rc;
uuid_copy(&p->uuid, &temp);
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
- return rc;
return len;
}
static DEVICE_ATTR_RW(uuid);
@@ -349,33 +340,40 @@ err:
return rc;
}
-static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
+static int queue_reset(struct cxl_region *cxlr)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- bool commit;
- ssize_t rc;
+ int rc;
- rc = kstrtobool(buf, &commit);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ /* Already in the requested state? */
+ if (p->state < CXL_CONFIG_COMMIT)
+ return 0;
+
+ p->state = CXL_CONFIG_RESET_PENDING;
+
+ return 0;
+}
+
+static int __commit(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
/* Already in the requested state? */
- if (commit && p->state >= CXL_CONFIG_COMMIT)
- goto out;
- if (!commit && p->state < CXL_CONFIG_COMMIT)
- goto out;
+ if (p->state >= CXL_CONFIG_COMMIT)
+ return 0;
/* Not ready to commit? */
- if (commit && p->state < CXL_CONFIG_ACTIVE) {
- rc = -ENXIO;
- goto out;
- }
+ if (p->state < CXL_CONFIG_ACTIVE)
+ return -ENXIO;
/*
* Invalidate caches before region setup to drop any speculative
@@ -383,33 +381,61 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
*/
rc = cxl_region_invalidate_memregion(cxlr);
if (rc)
- goto out;
+ return rc;
- if (commit) {
- rc = cxl_region_decode_commit(cxlr);
- if (rc == 0)
- p->state = CXL_CONFIG_COMMIT;
- } else {
- p->state = CXL_CONFIG_RESET_PENDING;
- up_write(&cxl_region_rwsem);
- device_release_driver(&cxlr->dev);
- down_write(&cxl_region_rwsem);
+ rc = cxl_region_decode_commit(cxlr);
+ if (rc)
+ return rc;
- /*
- * The lock was dropped, so need to revalidate that the reset is
- * still pending.
- */
- if (p->state == CXL_CONFIG_RESET_PENDING) {
- cxl_region_decode_reset(cxlr, p->interleave_ways);
- p->state = CXL_CONFIG_ACTIVE;
- }
- }
+ p->state = CXL_CONFIG_COMMIT;
-out:
- up_write(&cxl_region_rwsem);
+ return 0;
+}
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool commit;
+ ssize_t rc;
+
+ rc = kstrtobool(buf, &commit);
if (rc)
return rc;
+
+ if (commit) {
+ rc = __commit(cxlr);
+ if (rc)
+ return rc;
+ return len;
+ }
+
+ rc = queue_reset(cxlr);
+ if (rc)
+ return rc;
+
+ /*
+ * Unmap the region and depend the reset-pending state to ensure
+ * it does not go active again until post reset
+ */
+ device_release_driver(&cxlr->dev);
+
+ /*
+ * With the reset pending take cxl_rwsem.region unconditionally
+ * to ensure the reset gets handled before returning.
+ */
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ /*
+ * Revalidate that the reset is still pending in case another
+ * thread already handled this reset.
+ */
+ if (p->state == CXL_CONFIG_RESET_PENDING) {
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
return len;
}
@@ -420,13 +446,10 @@ static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
}
static DEVICE_ATTR_RW(commit);
@@ -450,15 +473,12 @@ static ssize_t interleave_ways_show(struct device *dev,
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->interleave_ways);
}
static const struct attribute_group *get_cxl_region_target_group(void);
@@ -493,23 +513,21 @@ static ssize_t interleave_ways_store(struct device *dev,
return -EINVAL;
}
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
- rc = -EBUSY;
- goto out;
- }
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
save = p->interleave_ways;
p->interleave_ways = val;
rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
- if (rc)
+ if (rc) {
p->interleave_ways = save;
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
return rc;
+ }
+
return len;
}
static DEVICE_ATTR_RW(interleave_ways);
@@ -520,15 +538,12 @@ static ssize_t interleave_granularity_show(struct device *dev,
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->interleave_granularity);
}
static ssize_t interleave_granularity_store(struct device *dev,
@@ -561,19 +576,15 @@ static ssize_t interleave_granularity_store(struct device *dev,
if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
return -EINVAL;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
- rc = -EBUSY;
- goto out;
- }
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
p->interleave_granularity = val;
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
- return rc;
+
return len;
}
static DEVICE_ATTR_RW(interleave_granularity);
@@ -584,17 +595,15 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
u64 resource = -1ULL;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
+
if (p->res)
resource = p->res->start;
- rc = sysfs_emit(buf, "%#llx\n", resource);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%#llx\n", resource);
}
static DEVICE_ATTR_RO(resource);
@@ -622,7 +631,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
struct resource *res;
u64 remainder = 0;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
/* Nothing to do... */
if (p->res && resource_size(p->res) == size)
@@ -664,7 +673,7 @@ static void cxl_region_iomem_release(struct cxl_region *cxlr)
struct cxl_region_params *p = &cxlr->params;
if (device_is_registered(&cxlr->dev))
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
if (p->res) {
/*
* Autodiscovered regions may not have been able to insert their
@@ -681,7 +690,7 @@ static int free_hpa(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
if (!p->res)
return 0;
@@ -705,15 +714,14 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
if (rc)
return rc;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
if (val)
rc = alloc_hpa(cxlr, val);
else
rc = free_hpa(cxlr);
- up_write(&cxl_region_rwsem);
if (rc)
return rc;
@@ -729,15 +737,12 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
u64 size = 0;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (p->res)
size = resource_size(p->res);
- rc = sysfs_emit(buf, "%#llx\n", size);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%#llx\n", size);
}
static DEVICE_ATTR_RW(size);
@@ -763,26 +768,20 @@ static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
struct cxl_endpoint_decoder *cxled;
int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (pos >= p->interleave_ways) {
dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
p->interleave_ways);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
cxled = p->targets[pos];
if (!cxled)
- rc = sysfs_emit(buf, "\n");
- else
- rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
-out:
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
}
static int check_commit_order(struct device *dev, void *data)
@@ -897,7 +896,7 @@ cxl_port_pick_region_decoder(struct cxl_port *port,
/*
* This decoder is pinned registered as long as the endpoint decoder is
* registered, and endpoint decoder unregistration holds the
- * cxl_region_rwsem over unregister events, so no need to hold on to
+ * cxl_rwsem.region over unregister events, so no need to hold on to
* this extra reference.
*/
put_device(dev);
@@ -1088,7 +1087,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
unsigned long index;
int rc = -EBUSY;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
cxl_rr = cxl_rr_load(port, cxlr);
if (cxl_rr) {
@@ -1198,7 +1197,7 @@ static void cxl_port_detach_region(struct cxl_port *port,
struct cxl_region_ref *cxl_rr;
struct cxl_ep *ep = NULL;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
cxl_rr = cxl_rr_load(port, cxlr);
if (!cxl_rr)
@@ -2094,27 +2093,43 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return 0;
}
-static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+static struct cxl_region *
+__cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode)
{
- struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
- struct cxl_region *cxlr = cxled->cxld.region;
struct cxl_region_params *p;
- int rc = 0;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
- if (!cxlr)
- return 0;
+ if (!cxled) {
+ p = &cxlr->params;
- p = &cxlr->params;
- get_device(&cxlr->dev);
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n",
+ pos, p->interleave_ways);
+ return NULL;
+ }
+
+ if (!p->targets[pos])
+ return NULL;
+ cxled = p->targets[pos];
+ } else {
+ cxlr = cxled->cxld.region;
+ if (!cxlr)
+ return NULL;
+ p = &cxlr->params;
+ }
+
+ if (mode == DETACH_INVALIDATE)
+ cxled->part = -1;
if (p->state > CXL_CONFIG_ACTIVE) {
cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE;
}
- for (iter = ep_port; !is_cxl_root(iter);
+ for (struct cxl_port *iter = cxled_to_port(cxled); !is_cxl_root(iter);
iter = to_cxl_port(iter->dev.parent))
cxl_port_detach_region(iter, cxlr, cxled);
@@ -2125,7 +2140,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
cxled->pos);
- goto out;
+ return NULL;
}
if (p->state == CXL_CONFIG_ACTIVE) {
@@ -2139,74 +2154,79 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
.end = -1,
};
- /* notify the region driver that one of its targets has departed */
- up_write(&cxl_region_rwsem);
- device_release_driver(&cxlr->dev);
- down_write(&cxl_region_rwsem);
-out:
- put_device(&cxlr->dev);
- return rc;
+ get_device(&cxlr->dev);
+ return cxlr;
}
-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+/*
+ * Cleanup a decoder's interest in a region. There are 2 cases to
+ * handle, removing an unknown @cxled from a known position in a region
+ * (detach_target()) or removing a known @cxled from an unknown @cxlr
+ * (cxld_unregister())
+ *
+ * When the detachment finds a region release the region driver.
+ */
+int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode)
{
- down_write(&cxl_region_rwsem);
- cxled->part = -1;
- cxl_region_detach(cxled);
- up_write(&cxl_region_rwsem);
+ struct cxl_region *detach;
+
+ /* when the decoder is being destroyed lock unconditionally */
+ if (mode == DETACH_INVALIDATE) {
+ guard(rwsem_write)(&cxl_rwsem.region);
+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);
+ } else {
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
+ return rc;
+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);
+ }
+
+ if (detach) {
+ device_release_driver(&detach->dev);
+ put_device(&detach->dev);
+ }
+ return 0;
+}
+
+static int __attach_target(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ unsigned int state)
+{
+ int rc;
+
+ if (state == TASK_INTERRUPTIBLE) {
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
+ return rc;
+ guard(rwsem_read)(&cxl_rwsem.dpa);
+ return cxl_region_attach(cxlr, cxled, pos);
+ }
+ guard(rwsem_write)(&cxl_rwsem.region);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
+ return cxl_region_attach(cxlr, cxled, pos);
}
static int attach_target(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled, int pos,
unsigned int state)
{
- int rc = 0;
+ int rc = __attach_target(cxlr, cxled, pos, state);
- if (state == TASK_INTERRUPTIBLE)
- rc = down_write_killable(&cxl_region_rwsem);
- else
- down_write(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- down_read(&cxl_dpa_rwsem);
- rc = cxl_region_attach(cxlr, cxled, pos);
- up_read(&cxl_dpa_rwsem);
- up_write(&cxl_region_rwsem);
-
- if (rc)
- dev_warn(cxled->cxld.dev.parent,
- "failed to attach %s to %s: %d\n",
- dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
+ if (rc == 0)
+ return 0;
+ dev_warn(cxled->cxld.dev.parent, "failed to attach %s to %s: %d\n",
+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
return rc;
}
static int detach_target(struct cxl_region *cxlr, int pos)
{
- struct cxl_region_params *p = &cxlr->params;
- int rc;
-
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- if (pos >= p->interleave_ways) {
- dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
- p->interleave_ways);
- rc = -ENXIO;
- goto out;
- }
-
- if (!p->targets[pos]) {
- rc = 0;
- goto out;
- }
-
- rc = cxl_region_detach(p->targets[pos]);
-out:
- up_write(&cxl_region_rwsem);
- return rc;
+ return cxl_decoder_detach(cxlr, NULL, pos, DETACH_ONLY);
}
static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
@@ -2460,7 +2480,7 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
return NOTIFY_DONE;
/*
- * No need to hold cxl_region_rwsem; region parameters are stable
+ * No need to hold cxl_rwsem.region; region parameters are stable
* within the cxl_region driver.
*/
region_nid = phys_to_target_node(cxlr->params.res->start);
@@ -2483,7 +2503,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
int region_nid;
/*
- * No need to hold cxl_region_rwsem; region parameters are stable
+ * No need to hold cxl_rwsem.region; region parameters are stable
* within the cxl_region driver.
*/
region_nid = phys_to_target_node(cxlr->params.res->start);
@@ -2632,17 +2652,13 @@ static ssize_t region_show(struct device *dev, struct device_attribute *attr,
struct cxl_decoder *cxld = to_cxl_decoder(dev);
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (cxld->region)
- rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
- else
- rc = sysfs_emit(buf, "\n");
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+ return sysfs_emit(buf, "\n");
}
DEVICE_ATTR_RO(region);
@@ -2847,7 +2863,7 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
- if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ if (!cxl_resource_contains_addr(cxled->dpa_res, dpa))
return 0;
/*
@@ -2959,7 +2975,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
if (cxlrd->hpa_to_spa)
hpa = cxlrd->hpa_to_spa(cxlrd, hpa);
- if (hpa < p->res->start || hpa > p->res->end) {
+ if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
return ULLONG_MAX;
@@ -2981,7 +2997,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
struct device *dev;
int i;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->state != CXL_CONFIG_COMMIT)
return -ENXIO;
@@ -2993,7 +3009,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
cxlr_pmem->hpa_range.start = p->res->start;
cxlr_pmem->hpa_range.end = p->res->end;
- /* Snapshot the region configuration underneath the cxl_region_rwsem */
+ /* Snapshot the region configuration underneath the cxl_rwsem.region */
cxlr_pmem->nr_mappings = p->nr_targets;
for (i = 0; i < p->nr_targets; i++) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@@ -3070,7 +3086,7 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
struct cxl_dax_region *cxlr_dax;
struct device *dev;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->state != CXL_CONFIG_COMMIT)
return ERR_PTR(-ENXIO);
@@ -3270,7 +3286,7 @@ static int match_region_by_range(struct device *dev, const void *data)
cxlr = to_cxl_region(dev);
p = &cxlr->params;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->res && p->res->start == r->start && p->res->end == r->end)
return 1;
@@ -3282,15 +3298,10 @@ static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_region_params *p = &cxlr->params;
- int nid = phys_to_target_node(res->start);
resource_size_t size = resource_size(res);
resource_size_t cache_size, start;
- int rc;
-
- rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size);
- if (rc)
- return rc;
+ cache_size = cxlrd->cache_size;
if (!cache_size)
return 0;
@@ -3330,7 +3341,7 @@ static int __construct_region(struct cxl_region *cxlr,
struct resource *res;
int rc;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
p = &cxlr->params;
if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_err(cxlmd->dev.parent,
@@ -3466,10 +3477,10 @@ int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
- down_read(&cxl_region_rwsem);
- p = &cxlr->params;
- attach = p->state == CXL_CONFIG_COMMIT;
- up_read(&cxl_region_rwsem);
+ scoped_guard(rwsem_read, &cxl_rwsem.region) {
+ p = &cxlr->params;
+ attach = p->state == CXL_CONFIG_COMMIT;
+ }
if (attach) {
/*
@@ -3494,12 +3505,12 @@ u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
if (!endpoint)
return ~0ULL;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
xa_for_each(&endpoint->regions, index, iter) {
struct cxl_region_params *p = &iter->region->params;
- if (p->res->start <= spa && spa <= p->res->end) {
+ if (cxl_resource_contains_addr(p->res, spa)) {
if (!p->cache_size)
return ~0ULL;
@@ -3531,40 +3542,45 @@ static void shutdown_notifiers(void *_cxlr)
unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
}
-static int cxl_region_probe(struct device *dev)
+static int cxl_region_can_probe(struct cxl_region *cxlr)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc) {
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) {
dev_dbg(&cxlr->dev, "probe interrupted\n");
return rc;
}
if (p->state < CXL_CONFIG_COMMIT) {
dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
dev_err(&cxlr->dev,
"failed to activate, re-commit region and retry\n");
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
+ return 0;
+}
+
+static int cxl_region_probe(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ rc = cxl_region_can_probe(cxlr);
+ if (rc)
+ return rc;
+
/*
* From this point on any path that changes the region's state away from
* CXL_CONFIG_COMMIT is also responsible for releasing the driver.
*/
-out:
- up_read(&cxl_region_rwsem);
-
- if (rc)
- return rc;
cxlr->node_notifier.notifier_call = cxl_region_perf_attrs_callback;
cxlr->node_notifier.priority = CXL_CALLBACK_PRI;
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 25ebfbc1616c..a53ec4798b12 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -214,12 +214,16 @@ TRACE_EVENT(cxl_overflow,
#define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4)
#define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5)
#define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6)
+#define CXL_EVENT_RECORD_FLAG_LD_ID_VALID BIT(7)
+#define CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID BIT(8)
#define show_hdr_flags(flags) __print_flags(flags, " | ", \
{ CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \
{ CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \
{ CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \
- { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" } \
+ { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" }, \
+ { CXL_EVENT_RECORD_FLAG_LD_ID_VALID, "LD_ID_VALID" }, \
+ { CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID, "HEAD_ID_VALID" } \
)
/*
@@ -247,7 +251,9 @@ TRACE_EVENT(cxl_overflow,
__field(u64, hdr_timestamp) \
__field(u8, hdr_length) \
__field(u8, hdr_maint_op_class) \
- __field(u8, hdr_maint_op_sub_class)
+ __field(u8, hdr_maint_op_sub_class) \
+ __field(u16, hdr_ld_id) \
+ __field(u8, hdr_head_id)
#define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \
__assign_str(memdev); \
@@ -260,18 +266,22 @@ TRACE_EVENT(cxl_overflow,
__entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \
__entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \
__entry->hdr_maint_op_class = (hdr).maint_op_class; \
- __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class
+ __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class; \
+ __entry->hdr_ld_id = le16_to_cpu((hdr).ld_id); \
+ __entry->hdr_head_id = (hdr).head_id
#define CXL_EVT_TP_printk(fmt, ...) \
TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \
"len=%d flags='%s' handle=%x related_handle=%x " \
- "maint_op_class=%u maint_op_sub_class=%u : " fmt, \
+ "maint_op_class=%u maint_op_sub_class=%u " \
+ "ld_id=%x head_id=%x : " fmt, \
__get_str(memdev), __get_str(host), __entry->serial, \
cxl_event_log_type_str(__entry->log), \
__entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\
show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \
__entry->hdr_related_handle, __entry->hdr_maint_op_class, \
__entry->hdr_maint_op_sub_class, \
+ __entry->hdr_ld_id, __entry->hdr_head_id, \
##__VA_ARGS__)
TRACE_EVENT(cxl_generic_event,
@@ -496,7 +506,10 @@ TRACE_EVENT(cxl_general_media,
uuid_copy(&__entry->region_uuid, &uuid_null);
}
__entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
- __entry->cme_count = get_unaligned_le24(rec->cme_count);
+ if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ __entry->cme_count = get_unaligned_le24(rec->cme_count);
+ else
+ __entry->cme_count = 0;
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
@@ -648,7 +661,10 @@ TRACE_EVENT(cxl_dram,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->sub_channel = rec->sub_channel;
__entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
- __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
+ if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
+ else
+ __entry->cvme_count = 0;
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \
@@ -871,6 +887,111 @@ TRACE_EVENT(cxl_memory_module,
)
);
+/*
+ * Memory Sparing Event Record - MSER
+ *
+ * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60
+ */
+#define CXL_MSER_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_MSER_HARD_SPARING_FLAG BIT(1)
+#define CXL_MSER_DEV_INITED_FLAG BIT(2)
+#define show_mem_sparing_flags(flags) __print_flags(flags, "|", \
+ { CXL_MSER_QUERY_RESOURCE_FLAG, "Query Resources" }, \
+ { CXL_MSER_HARD_SPARING_FLAG, "Hard Sparing" }, \
+ { CXL_MSER_DEV_INITED_FLAG, "Device Initiated Sparing" } \
+)
+
+#define CXL_MSER_VALID_CHANNEL BIT(0)
+#define CXL_MSER_VALID_RANK BIT(1)
+#define CXL_MSER_VALID_NIBBLE BIT(2)
+#define CXL_MSER_VALID_BANK_GROUP BIT(3)
+#define CXL_MSER_VALID_BANK BIT(4)
+#define CXL_MSER_VALID_ROW BIT(5)
+#define CXL_MSER_VALID_COLUMN BIT(6)
+#define CXL_MSER_VALID_COMPONENT_ID BIT(7)
+#define CXL_MSER_VALID_COMPONENT_ID_FORMAT BIT(8)
+#define CXL_MSER_VALID_SUB_CHANNEL BIT(9)
+#define show_mem_sparing_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_MSER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_MSER_VALID_RANK, "RANK" }, \
+ { CXL_MSER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_MSER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_MSER_VALID_BANK, "BANK" }, \
+ { CXL_MSER_VALID_ROW, "ROW" }, \
+ { CXL_MSER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_MSER_VALID_COMPONENT_ID, "COMPONENT ID" }, \
+ { CXL_MSER_VALID_COMPONENT_ID_FORMAT, "COMPONENT ID PLDM FORMAT" }, \
+ { CXL_MSER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \
+)
+
+TRACE_EVENT(cxl_memory_sparing,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_event_mem_sparing *rec),
+
+ TP_ARGS(cxlmd, log, rec),
+
+ TP_STRUCT__entry(
+ CXL_EVT_TP_entry
+
+ /* Memory Sparing Event */
+ __field(u8, flags)
+ __field(u8, result)
+ __field(u16, validity_flags)
+ __field(u16, res_avail)
+ __field(u8, channel)
+ __field(u8, rank)
+ __field(u32, nibble_mask)
+ __field(u8, bank_group)
+ __field(u8, bank)
+ __field(u32, row)
+ __field(u16, column)
+ __field(u8, sub_channel)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ ),
+
+ TP_fast_assign(
+ CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_MEM_SPARING_UUID;
+
+ /* Memory Sparing Event */
+ __entry->flags = rec->flags;
+ __entry->result = rec->result;
+ __entry->validity_flags = le16_to_cpu(rec->validity_flags);
+ __entry->res_avail = le16_to_cpu(rec->res_avail);
+ __entry->channel = rec->channel;
+ __entry->rank = rec->rank;
+ __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask);
+ __entry->bank_group = rec->bank_group;
+ __entry->bank = rec->bank;
+ __entry->row = get_unaligned_le24(rec->row);
+ __entry->column = le16_to_cpu(rec->column);
+ __entry->sub_channel = rec->sub_channel;
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ ),
+
+ CXL_EVT_TP_printk("flags='%s' result=%u validity_flags='%s' " \
+ "spare resource avail=%u channel=%u rank=%u " \
+ "nibble_mask=%x bank_group=%u bank=%u " \
+ "row=%u column=%u sub_channel=%u " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s",
+ show_mem_sparing_flags(__entry->flags),
+ __entry->result,
+ show_mem_sparing_valid_flags(__entry->validity_flags),
+ __entry->res_avail, __entry->channel, __entry->rank,
+ __entry->nibble_mask, __entry->bank_group, __entry->bank,
+ __entry->row, __entry->column, __entry->sub_channel,
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID,
+ CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID,
+ CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id)
+ )
+);
+
#define show_poison_trace_type(type) \
__print_symbolic(type, \
{ CXL_POISON_TRACE_LIST, "List" }, \
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index ad863572ddb7..847e37be42c4 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -424,6 +424,7 @@ typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
/**
* struct cxl_root_decoder - Static platform CXL address decoder
* @res: host / parent resource for region allocations
+ * @cache_size: extended linear cache size if exists, otherwise zero.
* @region_id: region id for next region provisioning event
* @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address
* @platform_data: platform specific configuration data
@@ -433,6 +434,7 @@ typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
*/
struct cxl_root_decoder {
struct resource *res;
+ resource_size_t cache_size;
atomic_t region_id;
cxl_hpa_to_spa_fn hpa_to_spa;
void *platform_data;
@@ -470,7 +472,7 @@ enum cxl_config_state {
* @nr_targets: number of targets
* @cache_size: extended linear cache size if exists, otherwise zero.
*
- * State transitions are protected by the cxl_region_rwsem
+ * State transitions are protected by cxl_rwsem.region
*/
struct cxl_region_params {
enum cxl_config_state state;
@@ -816,7 +818,7 @@ int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
bool is_cxl_region(struct device *dev);
-extern struct bus_type cxl_bus_type;
+extern const struct bus_type cxl_bus_type;
struct cxl_driver {
const char *name;
@@ -913,15 +915,4 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#endif
u16 cxl_gpf_get_dvsec(struct device *dev);
-
-static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem)
-{
- if (down_read_interruptible(rwsem))
- return NULL;
-
- return rwsem;
-}
-
-DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T))
-
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 551b0ba2caa1..751478dfc410 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -254,7 +254,7 @@ enum security_cmd_enabled_bits {
* @max_errors: Maximum media error records held in device cache
* @enabled_cmds: All poison commands enabled in the CEL
* @list_out: The poison list payload returned by device
- * @lock: Protect reads of the poison list
+ * @mutex: Protect reads of the poison list
*
* Reads of the poison list are synchronized to ensure that a reader
* does not get an incomplete list because their request overlapped
@@ -265,7 +265,7 @@ struct cxl_poison_state {
u32 max_errors;
DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX);
struct cxl_mbox_poison_out *list_out;
- struct mutex lock; /* Protect reads of poison list */
+ struct mutex mutex; /* Protect reads of poison list */
};
/*
@@ -634,6 +634,14 @@ struct cxl_mbox_identify {
0x13, 0xb7, 0x74)
/*
+ * Memory Sparing Event Record UUID
+ * CXL rev 3.2 section 8.2.10.2.1.4: Table 8-60
+ */
+#define CXL_EVENT_MEM_SPARING_UUID \
+ UUID_INIT(0xe71f3a40, 0x2d29, 0x4092, 0x8a, 0x39, 0x4d, 0x1c, 0x96, \
+ 0x6c, 0x7c, 0x65)
+
+/*
* Get Event Records output payload
* CXL rev 3.0 section 8.2.9.2.2; Table 8-50
*/
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 785aa2af5eaa..bd100ac31672 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -379,7 +379,7 @@ static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
{
int rc;
- mutex_lock_io(&cxl_mbox->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
mutex_unlock(&cxl_mbox->mbox_mutex);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index db87dd2a07f7..05c7c7d9e5a4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,7 +89,6 @@ config APPLE_ADMAC
tristate "Apple ADMAC support"
depends on ARCH_APPLE || COMPILE_TEST
select DMA_ENGINE
- default ARCH_APPLE
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
@@ -111,7 +110,7 @@ config AT_HDMAC
config AT_XDMAC
tristate "Atmel XDMA support"
- depends on ARCH_AT91
+ depends on ARCH_MICROCHIP
select DMA_ENGINE
help
Support the Atmel XDMA controller.
@@ -572,6 +571,15 @@ config PLX_DMA
These are exposed via extra functions on the switch's
upstream port. Each function exposes one DMA channel.
+config SOPHGO_CV1800B_DMAMUX
+ tristate "Sophgo CV1800/SG2000 series SoC DMA multiplexer support"
+ depends on MFD_SYSCON
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Support for the DMA multiplexer on Sophgo CV1800/SG2000
+ series SoCs.
+ Say Y here if your board have this soc.
+
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index ba9732644752..a54d7688392b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
+obj-$(CONFIG_SOPHGO_CV1800B_DMAMUX) += cv1800b-dmamux.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/cv1800b-dmamux.c b/drivers/dma/cv1800b-dmamux.c
new file mode 100644
index 000000000000..e900d6595617
--- /dev/null
+++ b/drivers/dma/cv1800b-dmamux.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/llist.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+
+#define REG_DMA_CHANNEL_REMAP0 0x154
+#define REG_DMA_CHANNEL_REMAP1 0x158
+#define REG_DMA_INT_MUX 0x298
+
+#define DMAMUX_NCELLS 2
+#define MAX_DMA_MAPPING_ID 42
+#define MAX_DMA_CPU_ID 2
+#define MAX_DMA_CH_ID 7
+
+#define DMAMUX_INTMUX_REGISTER_LEN 4
+#define DMAMUX_NR_CH_PER_REGISTER 4
+#define DMAMUX_BIT_PER_CH 8
+#define DMAMUX_CH_MASk GENMASK(5, 0)
+#define DMAMUX_INT_BIT_PER_CPU 10
+#define DMAMUX_CH_UPDATE_BIT BIT(31)
+
+#define DMAMUX_CH_REGPOS(chid) \
+ ((chid) / DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REGOFF(chid) \
+ ((chid) % DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REG(chid) \
+ ((DMAMUX_CH_REGPOS(chid) * sizeof(u32)) + \
+ REG_DMA_CHANNEL_REMAP0)
+#define DMAMUX_CH_SET(chid, val) \
+ (((val) << (DMAMUX_CH_REGOFF(chid) * DMAMUX_BIT_PER_CH)) | \
+ DMAMUX_CH_UPDATE_BIT)
+#define DMAMUX_CH_MASK(chid) \
+ DMAMUX_CH_SET(chid, DMAMUX_CH_MASk)
+
+#define DMAMUX_INT_BIT(chid, cpuid) \
+ BIT((cpuid) * DMAMUX_INT_BIT_PER_CPU + (chid))
+#define DMAMUX_INTEN_BIT(cpuid) \
+ DMAMUX_INT_BIT(8, cpuid)
+#define DMAMUX_INT_CH_BIT(chid, cpuid) \
+ (DMAMUX_INT_BIT(chid, cpuid) | DMAMUX_INTEN_BIT(cpuid))
+#define DMAMUX_INT_MASK(chid) \
+ (DMAMUX_INT_BIT(chid, 0) | \
+ DMAMUX_INT_BIT(chid, 1) | \
+ DMAMUX_INT_BIT(chid, 2))
+#define DMAMUX_INT_CH_MASK(chid, cpuid) \
+ (DMAMUX_INT_MASK(chid) | DMAMUX_INTEN_BIT(cpuid))
+
+struct cv1800_dmamux_data {
+ struct dma_router dmarouter;
+ struct regmap *regmap;
+ spinlock_t lock;
+ struct llist_head free_maps;
+ struct llist_head reserve_maps;
+ DECLARE_BITMAP(mapped_peripherals, MAX_DMA_MAPPING_ID);
+};
+
+struct cv1800_dmamux_map {
+ struct llist_node node;
+ unsigned int channel;
+ unsigned int peripheral;
+ unsigned int cpu;
+};
+
+static void cv1800_dmamux_free(struct device *dev, void *route_data)
+{
+ struct cv1800_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct cv1800_dmamux_map *map = route_data;
+
+ guard(spinlock_irqsave)(&dmamux->lock);
+
+ regmap_update_bits(dmamux->regmap,
+ DMAMUX_CH_REG(map->channel),
+ DMAMUX_CH_MASK(map->channel),
+ DMAMUX_CH_UPDATE_BIT);
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(map->channel, map->cpu),
+ DMAMUX_INTEN_BIT(map->cpu));
+
+ dev_dbg(dev, "free channel %u for req %u (cpu %u)\n",
+ map->channel, map->peripheral, map->cpu);
+}
+
+static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct cv1800_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct cv1800_dmamux_map *map;
+ struct llist_node *node;
+ unsigned long flags;
+ unsigned int chid, devid, cpuid;
+ int ret;
+
+ if (dma_spec->args_count != DMAMUX_NCELLS) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ devid = dma_spec->args[0];
+ cpuid = dma_spec->args[1];
+ dma_spec->args_count = 1;
+
+ if (devid > MAX_DMA_MAPPING_ID) {
+ dev_err(&pdev->dev, "invalid device id: %u\n", devid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cpuid > MAX_DMA_CPU_ID) {
+ dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+
+ if (test_bit(devid, dmamux->mapped_peripherals)) {
+ llist_for_each_entry(map, dmamux->reserve_maps.first, node) {
+ if (map->peripheral == devid && map->cpu == cpuid)
+ goto found;
+ }
+
+ ret = -EINVAL;
+ goto failed;
+ } else {
+ node = llist_del_first(&dmamux->free_maps);
+ if (!node) {
+ ret = -ENODEV;
+ goto failed;
+ }
+
+ map = llist_entry(node, struct cv1800_dmamux_map, node);
+ llist_add(&map->node, &dmamux->reserve_maps);
+ set_bit(devid, dmamux->mapped_peripherals);
+ }
+
+found:
+ chid = map->channel;
+ map->peripheral = devid;
+ map->cpu = cpuid;
+
+ regmap_set_bits(dmamux->regmap,
+ DMAMUX_CH_REG(chid),
+ DMAMUX_CH_SET(chid, devid));
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(chid, cpuid),
+ DMAMUX_INT_CH_BIT(chid, cpuid));
+
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[0] = chid;
+
+ dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
+ chid, devid, cpuid);
+
+ return map;
+
+failed:
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ of_node_put(dma_spec->np);
+ dev_err(&pdev->dev, "errno %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static int cv1800_dmamux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mux_node = dev->of_node;
+ struct cv1800_dmamux_data *data;
+ struct cv1800_dmamux_map *tmp;
+ struct device *parent = dev->parent;
+ struct regmap *regmap = NULL;
+ unsigned int i;
+
+ if (!parent)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(parent->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->lock);
+ init_llist_head(&data->free_maps);
+ init_llist_head(&data->reserve_maps);
+
+ for (i = 0; i <= MAX_DMA_CH_ID; i++) {
+ tmp = devm_kmalloc(dev, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ /* It is OK for not allocating all channel */
+ dev_warn(dev, "can not allocate channel %u\n", i);
+ continue;
+ }
+
+ init_llist_node(&tmp->node);
+ tmp->channel = i;
+ llist_add(&tmp->node, &data->free_maps);
+ }
+
+ /* if no channel is allocated, the probe must fail */
+ if (llist_empty(&data->free_maps))
+ return -ENOMEM;
+
+ data->regmap = regmap;
+ data->dmarouter.dev = dev;
+ data->dmarouter.route_free = cv1800_dmamux_free;
+
+ platform_set_drvdata(pdev, data);
+
+ return of_dma_router_register(mux_node,
+ cv1800_dmamux_route_allocate,
+ &data->dmarouter);
+}
+
+static void cv1800_dmamux_remove(struct platform_device *pdev)
+{
+ of_dma_controller_free(pdev->dev.of_node);
+}
+
+static const struct of_device_id cv1800_dmamux_ids[] = {
+ { .compatible = "sophgo,cv1800b-dmamux", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800_dmamux_ids);
+
+static struct platform_driver cv1800_dmamux_driver = {
+ .probe = cv1800_dmamux_probe,
+ .remove = cv1800_dmamux_remove,
+ .driver = {
+ .name = "cv1800-dmamux",
+ .of_match_table = cv1800_dmamux_ids,
+ },
+};
+module_platform_driver(cv1800_dmamux_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo CV1800/SG2000 Series SoC DMAMUX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index c2b88cc99e5d..b43255f914f3 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -24,18 +24,6 @@
#include "../virt-dma.h"
static inline
-struct device *dchan2dev(struct dma_chan *dchan)
-{
- return &dchan->dev->device;
-}
-
-static inline
-struct device *chan2dev(struct dw_edma_chan *chan)
-{
- return &chan->vc.chan.dev->device;
-}
-
-static inline
struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct dw_edma_desc, vd);
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index b4323d243d6d..4be81db24a19 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -48,11 +48,6 @@ struct dpdmai_cmd_destroy {
__le32 dpdmai_id;
} __packed;
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
- return (val & MAKE_UMASK64(width)) << lsoffset;
-}
-
/**
* dpdmai_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 823f5c6bc2e1..21e13f1207cb 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -148,6 +148,9 @@
* @__reserved1: Reserved field.
* @cfg8b_w1: Compound descriptor command queue origin produced
* by qDMA and dynamic debug field.
+ * @__reserved2: Reserved field.
+ * @cmd: Command for QDMA (see FSL_QDMA_CMD_RWTTYPE and
+ * others).
* @data: Pointer to the memory 40-bit address, describes DMA
* source information and DMA destination information.
*/
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 80355d03004d..35bdefd3728b 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -1036,7 +1036,6 @@ static void idxd_reset_prepare(struct pci_dev *pdev)
const char *idxd_name;
int rc;
- dev = &idxd->pdev->dev;
idxd_name = dev_name(idxd_confdev(idxd));
struct idxd_saved_states *idxd_saved __free(kfree) =
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 006ba206ab1b..9c1c546fe443 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -45,7 +45,7 @@ union gen_cap_reg {
u64 rsvd3:32;
};
u64 bits;
-} __packed;
+};
#define IDXD_GENCAP_OFFSET 0x10
union wq_cap_reg {
@@ -65,7 +65,7 @@ union wq_cap_reg {
u64 rsvd4:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_WQCAP_OFFSET 0x20
#define IDXD_WQCFG_MIN 5
@@ -79,7 +79,7 @@ union group_cap_reg {
u64 rsvd:45;
};
u64 bits;
-} __packed;
+};
#define IDXD_GRPCAP_OFFSET 0x30
union engine_cap_reg {
@@ -88,7 +88,7 @@ union engine_cap_reg {
u64 rsvd:56;
};
u64 bits;
-} __packed;
+};
#define IDXD_ENGCAP_OFFSET 0x38
@@ -114,7 +114,7 @@ union offsets_reg {
u64 rsvd:48;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_TABLE_MULT 0x100
@@ -128,7 +128,7 @@ union gencfg_reg {
u32 rsvd2:18;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENCTRL_OFFSET 0x88
union genctrl_reg {
@@ -139,7 +139,7 @@ union genctrl_reg {
u32 rsvd:29;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENSTATS_OFFSET 0x90
union gensts_reg {
@@ -149,7 +149,7 @@ union gensts_reg {
u32 rsvd:28;
};
u32 bits;
-} __packed;
+};
enum idxd_device_status_state {
IDXD_DEVICE_STATE_DISABLED = 0,
@@ -183,7 +183,7 @@ union idxd_command_reg {
u32 int_req:1;
};
u32 bits;
-} __packed;
+};
enum idxd_cmd {
IDXD_CMD_ENABLE_DEVICE = 1,
@@ -213,7 +213,7 @@ union cmdsts_reg {
u8 active:1;
};
u32 bits;
-} __packed;
+};
#define IDXD_CMDSTS_ACTIVE 0x80000000
#define IDXD_CMDSTS_ERR_MASK 0xff
#define IDXD_CMDSTS_RES_SHIFT 8
@@ -284,7 +284,7 @@ union sw_err_reg {
u64 rsvd5;
};
u64 bits[4];
-} __packed;
+};
union iaa_cap_reg {
struct {
@@ -303,7 +303,7 @@ union iaa_cap_reg {
u64 rsvd:52;
};
u64 bits;
-} __packed;
+};
#define IDXD_IAACAP_OFFSET 0x180
@@ -320,7 +320,7 @@ union evlcfg_reg {
u64 rsvd2:28;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_EVL_SIZE_MIN 0x0040
#define IDXD_EVL_SIZE_MAX 0xffff
@@ -334,7 +334,7 @@ union msix_perm {
u32 pasid:20;
};
u32 bits;
-} __packed;
+};
union group_flags {
struct {
@@ -352,13 +352,13 @@ union group_flags {
u64 rsvd5:26;
};
u64 bits;
-} __packed;
+};
struct grpcfg {
u64 wqs[4];
u64 engines;
union group_flags flags;
-} __packed;
+};
union wqcfg {
struct {
@@ -410,7 +410,7 @@ union wqcfg {
u64 op_config[4];
};
u32 bits[16];
-} __packed;
+};
#define WQCFG_PASID_IDX 2
#define WQCFG_PRIVL_IDX 2
@@ -474,7 +474,7 @@ union idxd_perfcap {
u64 rsvd3:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_EVNTCAP_OFFSET 0x80
union idxd_evntcap {
@@ -483,7 +483,7 @@ union idxd_evntcap {
u64 rsvd:36;
};
u64 bits;
-} __packed;
+};
struct idxd_event {
union {
@@ -493,7 +493,7 @@ struct idxd_event {
};
u32 val;
};
-} __packed;
+};
#define IDXD_CNTRCAP_OFFSET 0x800
struct idxd_cntrcap {
@@ -506,7 +506,7 @@ struct idxd_cntrcap {
u32 val;
};
struct idxd_event events[];
-} __packed;
+};
#define IDXD_PERFRST_OFFSET 0x10
union idxd_perfrst {
@@ -516,7 +516,7 @@ union idxd_perfrst {
u32 rsvd:30;
};
u32 val;
-} __packed;
+};
#define IDXD_OVFSTATUS_OFFSET 0x30
#define IDXD_PERFFRZ_OFFSET 0x20
@@ -533,7 +533,7 @@ union idxd_cntrcfg {
u64 rsvd3:4;
};
u64 val;
-} __packed;
+};
#define IDXD_FLTCFG_OFFSET 0x300
@@ -543,7 +543,7 @@ union idxd_cntrdata {
u64 event_count_value;
};
u64 val;
-} __packed;
+};
union event_cfg {
struct {
@@ -551,7 +551,7 @@ union event_cfg {
u64 event_enc:28;
};
u64 val;
-} __packed;
+};
union filter_cfg {
struct {
@@ -562,7 +562,7 @@ union filter_cfg {
u64 eng:8;
};
u64 val;
-} __packed;
+};
#define IDXD_EVLSTATUS_OFFSET 0xf0
@@ -580,7 +580,7 @@ union evl_status_reg {
u32 bits_upper32;
};
u64 bits;
-} __packed;
+};
#define IDXD_MAX_BATCH_IDENT 256
@@ -620,17 +620,17 @@ struct __evl_entry {
};
u64 fault_addr;
u64 rsvd5;
-} __packed;
+};
struct dsa_evl_entry {
struct __evl_entry e;
struct dsa_completion_record cr;
-} __packed;
+};
struct iax_evl_entry {
struct __evl_entry e;
u64 rsvd[4];
struct iax_completion_record cr;
-} __packed;
+};
#endif
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index c8dc504510f1..b7fb843c67a6 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -641,7 +641,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
int chan_num = TDMA_CHANNEL_NUM;
struct gen_pool *pool = NULL;
- type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev);
+ type = (kernel_ulong_t)device_get_match_data(&pdev->dev);
/* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa6e4646fdc2..1fdcb0f5c9e7 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
*/
mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr))
+ return ERR_PTR(-ENOMEM);
+
mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) {
+ ret = -ENOMEM;
+ goto err_unmap_src;
+ }
+
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
@@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->dma_desc_pool_virt =
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
GFP_KERNEL);
- if (!mv_chan->dma_desc_pool_virt)
- return ERR_PTR(-ENOMEM);
+ if (!mv_chan->dma_desc_pool_virt) {
+ ret = -ENOMEM;
+ goto err_unmap_dst;
+ }
/* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
@@ -1155,6 +1165,13 @@ err_free_irq:
err_free_dma:
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+err_unmap_dst:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+err_unmap_src:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+
return ERR_PTR(ret);
}
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 7a2488a0d6a3..765462303de0 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
list_add_tail(&ldesc->node, &lhead);
ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dchan->device->dev,
+ ldesc->hwdesc_dma_addr))
+ goto unmap_error;
dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
hwdesc, &ldesc->hwdesc_dma_addr);
@@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
spin_unlock_irq(&chan->lock);
return ARRAY_SIZE(dpage->desc);
+
+unmap_error:
+ while (i--) {
+ ldesc--; hwdesc--;
+
+ dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
+ sizeof(hwdesc), DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static void nbpf_desc_put(struct nbpf_desc *desc)
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index b1f0001cc99c..8e87738086b2 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -569,17 +569,6 @@ static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
writel_relaxed(val, addr);
}
-/* gpi_write_reg_field - write to specific bit field */
-static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
- u32 mask, u32 shift, u32 val)
-{
- u32 tmp = gpi_read_reg(gpii, addr);
-
- tmp &= ~mask;
- val = tmp | ((val << shift) & mask);
- gpi_write_reg(gpii, addr, val);
-}
-
static __always_inline void
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
{
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 6ea5a880b433..8184d475a49a 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -16,7 +16,7 @@ config SH_DMAE_BASE
depends on SUPERH || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
- default y
+ default SUPERH || SH_DMA
select RENESAS_DMA
help
Enable support for the Renesas SuperH DMA controllers.
diff --git a/drivers/dma/stm32/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c
index 917f8e922373..04389936c8a6 100644
--- a/drivers/dma/stm32/stm32-dma.c
+++ b/drivers/dma/stm32/stm32-dma.c
@@ -613,7 +613,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -676,7 +676,7 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
chan->status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
}
static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
@@ -728,7 +728,7 @@ static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: reconfigured after pause/resume\n", &chan->vchan);
}
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
@@ -744,7 +744,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
stm32_dma_post_resume_reconfigure(chan);
- else if (scr & STM32_DMA_SCR_DBM)
+ else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2)
stm32_dma_configure_next_sg(chan);
} else {
chan->busy = false;
@@ -820,7 +820,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma_start_transfer(chan);
}
@@ -922,7 +922,7 @@ static int stm32_dma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
index 0c6c4258b195..50e7106c5cb7 100644
--- a/drivers/dma/stm32/stm32-dma3.c
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -801,7 +801,7 @@ static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp)
@@ -1452,7 +1452,7 @@ static int stm32_dma3_pause(struct dma_chan *c)
chan->dma_status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
return 0;
}
@@ -1465,7 +1465,7 @@ static int stm32_dma3_resume(struct dma_chan *c)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
@@ -1490,7 +1490,7 @@ static int stm32_dma3_terminate_all(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
vchan_dma_desc_free_list(&chan->vchan, &head);
- dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: terminated\n", &chan->vchan);
return 0;
}
@@ -1543,7 +1543,7 @@ static void stm32_dma3_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma3_chan_start(chan);
}
diff --git a/drivers/dma/stm32/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c
index e6d525901de7..080c1c725216 100644
--- a/drivers/dma/stm32/stm32-mdma.c
+++ b/drivers/dma/stm32/stm32-mdma.c
@@ -1187,7 +1187,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_mdma_issue_pending(struct dma_chan *c)
@@ -1200,7 +1200,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
if (!vchan_issue_pending(&chan->vchan))
goto end;
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
if (!chan->desc && !chan->busy)
stm32_mdma_start_transfer(chan);
@@ -1220,7 +1220,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
if (!ret)
- dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
return ret;
}
@@ -1261,7 +1261,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 24796aaaddfa..00d2fd38d17f 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1249,11 +1249,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "No clock specified\n");
- return PTR_ERR(priv->clk);
- }
+ priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "Couldn't start the clock\n");
if (priv->cfg->has_reset) {
priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
@@ -1328,12 +1327,6 @@ static int sun4i_dma_probe(struct platform_device *pdev)
vchan_init(&vchan->vc, &priv->slave);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable the clock\n");
- return ret;
- }
-
/*
* Make sure the IRQs are all disabled and accounted for. The bootloader
* likes to leave these dirty
@@ -1343,33 +1336,23 @@ static int sun4i_dma_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
0, dev_name(&pdev->dev), priv);
- if (ret) {
- dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_clk_disable;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n");
- ret = dma_async_device_register(&priv->slave);
- if (ret) {
- dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
- goto err_clk_disable;
- }
+ ret = dmaenginem_async_device_register(&priv->slave);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register DMA engine device\n");
ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
priv);
- if (ret) {
- dev_err(&pdev->dev, "of_dma_controller_register failed\n");
- goto err_dma_unregister;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register translation function\n");
dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
return 0;
-
-err_dma_unregister:
- dma_async_device_unregister(&priv->slave);
-err_clk_disable:
- clk_disable_unprepare(priv->clk);
- return ret;
}
static void sun4i_dma_remove(struct platform_device *pdev)
@@ -1380,9 +1363,6 @@ static void sun4i_dma_remove(struct platform_device *pdev)
disable_irq(priv->irq);
of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&priv->slave);
-
- clk_disable_unprepare(priv->clk);
}
static struct sun4i_dma_config sun4i_a10_dma_cfg = {
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index 2adc2cca10e9..dbf168146d35 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -17,7 +17,7 @@ config TI_EDMA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
- default y
+ default ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
help
Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
@@ -29,7 +29,7 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
- default y
+ default ARCH_OMAP
help
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
diff --git a/drivers/dpll/zl3073x/Kconfig b/drivers/dpll/zl3073x/Kconfig
index 7db262ab8458..5bbca1400581 100644
--- a/drivers/dpll/zl3073x/Kconfig
+++ b/drivers/dpll/zl3073x/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config ZL3073X
- tristate "Microchip Azurite DPLL/PTP/SyncE devices"
+ tristate "Microchip Azurite DPLL/PTP/SyncE devices" if COMPILE_TEST
depends on NET
select DPLL
select NET_DEVLINK
@@ -16,9 +16,9 @@ config ZL3073X
config ZL3073X_I2C
tristate "I2C bus implementation for Microchip Azurite devices"
- depends on I2C && ZL3073X
+ depends on I2C && NET
select REGMAP_I2C
- default m
+ select ZL3073X
help
This is I2C bus implementation for Microchip Azurite DPLL/PTP/SyncE
devices.
@@ -28,9 +28,9 @@ config ZL3073X_I2C
config ZL3073X_SPI
tristate "SPI bus implementation for Microchip Azurite devices"
- depends on SPI && ZL3073X
+ depends on NET && SPI
select REGMAP_SPI
- default m
+ select ZL3073X
help
This is SPI bus implementation for Microchip Azurite DPLL/PTP/SyncE
devices.
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 01354b9de8b2..aae774e7a5c3 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(fw_schedule_bus_reset);
static void br_work(struct work_struct *work)
{
- struct fw_card *card = container_of(work, struct fw_card, br_work.work);
+ struct fw_card *card = from_work(card, work, br_work.work);
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
@@ -273,10 +273,6 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
fw_device_set_broadcast_channel);
}
-static const char gap_count_table[] = {
- 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
-};
-
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
fw_card_get(card);
@@ -286,7 +282,10 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
static void bm_work(struct work_struct *work)
{
- struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
+ static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ };
+ struct fw_card *card = from_work(card, work, bm_work.work);
struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
int root_id, new_root_id, irm_id, bm_id, local_id;
@@ -574,7 +573,6 @@ EXPORT_SYMBOL(fw_card_initialize);
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts)
{
- struct workqueue_struct *isoc_wq;
int ret;
// This workqueue should be:
@@ -589,29 +587,48 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
// contexts if they are scheduled to the same cycle.
- isoc_wq = alloc_workqueue("firewire-isoc-card%u",
- WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
- supported_isoc_contexts, card->index);
- if (!isoc_wq)
+ card->isoc_wq = alloc_workqueue("firewire-isoc-card%u",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ supported_isoc_contexts, card->index);
+ if (!card->isoc_wq)
return -ENOMEM;
+ // This workqueue should be:
+ // * != WQ_BH Sleepable.
+ // * == WQ_UNBOUND Any core can process data for asynchronous context.
+ // * == WQ_MEM_RECLAIM Used for any backend of block device.
+ // * == WQ_FREEZABLE The target device would not be available when being freezed.
+ // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
+ // * == WQ_SYSFS Parameters are available via sysfs.
+ // * max_active == 4 A hardIRQ could notify events for a pair of requests and
+ // response AR/AT contexts.
+ card->async_wq = alloc_workqueue("firewire-async-card%u",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ 4, card->index);
+ if (!card->async_wq) {
+ ret = -ENOMEM;
+ goto err_isoc;
+ }
+
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
- guard(mutex)(&card_mutex);
+ scoped_guard(mutex, &card_mutex) {
+ generate_config_rom(card, tmp_config_rom);
+ ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
+ if (ret < 0)
+ goto err_async;
- generate_config_rom(card, tmp_config_rom);
- ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret < 0) {
- destroy_workqueue(isoc_wq);
- return ret;
+ list_add_tail(&card->link, &card_list);
}
- card->isoc_wq = isoc_wq;
- list_add_tail(&card->link, &card_list);
-
return 0;
+err_async:
+ destroy_workqueue(card->async_wq);
+err_isoc:
+ destroy_workqueue(card->isoc_wq);
+ return ret;
}
EXPORT_SYMBOL(fw_card_add);
@@ -744,6 +761,7 @@ void fw_core_remove_card(struct fw_card *card)
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
drain_workqueue(card->isoc_wq);
+ drain_workqueue(card->async_wq);
scoped_guard(spinlock_irqsave, &card->lock)
fw_destroy_nodes(card);
@@ -753,6 +771,7 @@ void fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
destroy_workqueue(card->isoc_wq);
+ destroy_workqueue(card->async_wq);
WARN_ON(!list_empty(&card->transaction_list));
}
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index bd04980009a4..78b10c6ef7fe 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1313,8 +1313,7 @@ static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
static void iso_resource_work(struct work_struct *work)
{
struct iso_resource_event *e;
- struct iso_resource *r =
- container_of(work, struct iso_resource, work.work);
+ struct iso_resource *r = from_work(r, work, work.work);
struct client *client = r->client;
unsigned long index = r->resource.handle;
int generation, channel, bandwidth, todo;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index ec3e21ad2025..aeacd4cfd694 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -853,8 +853,7 @@ static void fw_schedule_device_work(struct fw_device *device,
static void fw_device_shutdown(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
if (time_before64(get_jiffies_64(),
device->card->reset_jiffies + SHUTDOWN_DELAY)
@@ -921,8 +920,7 @@ static int update_unit(struct device *dev, void *data)
static void fw_device_update(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
@@ -1002,8 +1000,7 @@ static int compare_configuration_rom(struct device *dev, const void *data)
static void fw_device_init(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
struct fw_card *card = device->card;
struct device *found;
u32 minor;
@@ -1184,8 +1181,7 @@ static int reread_config_rom(struct fw_device *device, int generation,
static void fw_device_refresh(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
struct fw_card *card = device->card;
int ret, node_id = device->node_id;
bool changed;
@@ -1251,8 +1247,7 @@ static void fw_device_refresh(struct work_struct *work)
static void fw_device_workfn(struct work_struct *work)
{
- struct fw_device *device = container_of(to_delayed_work(work),
- struct fw_device, work);
+ struct fw_device *device = from_work(device, to_delayed_work(work), work);
device->workfn(work);
}
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 2bd5deb9054e..1d1c2d8f85ae 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -550,6 +550,23 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
+static void complete_address_handler(struct kref *kref)
+{
+ struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
+
+ complete(&handler->done);
+}
+
+static void get_address_handler(struct fw_address_handler *handler)
+{
+ kref_get(&handler->kref);
+}
+
+static int put_address_handler(struct fw_address_handler *handler)
+{
+ return kref_put(&handler->kref, complete_address_handler);
+}
+
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
@@ -557,9 +574,10 @@ const struct fw_address_region fw_unit_space_region =
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
- * When a request is received that falls within the specified address range,
- * the specified callback is invoked. The parameters passed to the callback
- * give the details of the particular request.
+ * When a request is received that falls within the specified address range, the specified callback
+ * is invoked. The parameters passed to the callback give the details of the particular request.
+ * The callback is invoked in the workqueue context in most cases. However, if the request is
+ * initiated by the local node, the callback is invoked in the initiator's context.
*
* To be called in process context.
* Return value: 0 on success, non-zero otherwise.
@@ -595,6 +613,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
if (other != NULL) {
handler->offset += other->length;
} else {
+ init_completion(&handler->done);
+ kref_init(&handler->kref);
list_add_tail_rcu(&handler->link, &address_handler_list);
ret = 0;
break;
@@ -620,6 +640,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
list_del_rcu(&handler->link);
synchronize_rcu();
+
+ if (!put_address_handler(handler))
+ wait_for_completion(&handler->done);
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@@ -913,22 +936,31 @@ static void handle_exclusive_region_request(struct fw_card *card,
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
request->length);
if (handler)
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ get_address_handler(handler);
}
- if (!handler)
+ if (!handler) {
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+
+ // Outside the RCU read-side critical section. Without spinlock. With reference count.
+ handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
+ request->data, request->length, handler->callback_data);
+ put_address_handler(handler);
}
+// To use kmalloc allocator efficiently, this should be power of two.
+#define BUFFER_ON_KERNEL_STACK_SIZE 4
+
static void handle_fcp_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
- struct fw_address_handler *handler;
- int tcode, destination, source;
+ struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
+ struct fw_address_handler *handler, **handlers;
+ int tcode, destination, source, i, count, buffer_size;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
@@ -949,15 +981,55 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
+ count = 0;
+ handlers = buffer_on_kernel_stack;
+ buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
scoped_guard(rcu) {
list_for_each_entry_rcu(handler, &address_handler_list, link) {
- if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ if (is_enclosing_handler(handler, offset, request->length)) {
+ if (count >= buffer_size) {
+ int next_size = buffer_size * 2;
+ struct fw_address_handler **buffer_on_kernel_heap;
+
+ if (handlers == buffer_on_kernel_stack)
+ buffer_on_kernel_heap = NULL;
+ else
+ buffer_on_kernel_heap = handlers;
+
+ buffer_on_kernel_heap =
+ krealloc_array(buffer_on_kernel_heap, next_size,
+ sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
+ // FCP is used for purposes unrelated to significant system
+ // resources (e.g. storage or networking), so allocation
+ // failures are not considered so critical.
+ if (!buffer_on_kernel_heap)
+ break;
+
+ if (handlers == buffer_on_kernel_stack) {
+ memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
+ sizeof(buffer_on_kernel_stack));
+ }
+
+ handlers = buffer_on_kernel_heap;
+ buffer_size = next_size;
+ }
+ get_address_handler(handler);
+ handlers[count++] = handler;
+ }
}
}
+ for (i = 0; i < count; ++i) {
+ handler = handlers[i];
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ put_address_handler(handler);
+ }
+
+ if (handlers != buffer_on_kernel_stack)
+ kfree(handlers);
+
fw_send_response(card, request, RCODE_COMPLETE);
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 1bf0e15c1540..6d6446713539 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1007,7 +1007,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
- /* If the AT tasklet already ran, we may be last user. */
+ /* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
@@ -1026,7 +1026,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
- /* If the AT tasklet already ran, we may be last user. */
+ /* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index edaedd156a6d..5d8301b0f3aa 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -101,7 +101,7 @@ struct ar_context {
void *pointer;
unsigned int last_buffer_index;
u32 regs;
- struct tasklet_struct tasklet;
+ struct work_struct work;
};
struct context;
@@ -128,7 +128,6 @@ struct context {
int total_allocation;
u32 current_bus;
bool running;
- bool flushing;
/*
* List of page-sized buffers for storing DMA descriptors.
@@ -157,8 +156,12 @@ struct context {
int prev_z;
descriptor_callback_t callback;
+};
- struct tasklet_struct tasklet;
+struct at_context {
+ struct context context;
+ struct work_struct work;
+ bool flushing;
};
struct iso_context {
@@ -204,8 +207,8 @@ struct fw_ohci {
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
- struct context at_request_ctx;
- struct context at_response_ctx;
+ struct at_context at_request_ctx;
+ struct at_context at_response_ctx;
u32 it_context_support;
u32 it_context_mask; /* unoccupied IT contexts */
@@ -1016,9 +1019,9 @@ static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
}
}
-static void ar_context_tasklet(unsigned long data)
+static void ohci_ar_context_work(struct work_struct *work)
{
- struct ar_context *ctx = (struct ar_context *)data;
+ struct ar_context *ctx = from_work(ctx, work, work);
unsigned int end_buffer_index, end_buffer_offset;
void *p, *end;
@@ -1026,23 +1029,19 @@ static void ar_context_tasklet(unsigned long data)
if (!p)
return;
- end_buffer_index = ar_search_last_active_buffer(ctx,
- &end_buffer_offset);
+ end_buffer_index = ar_search_last_active_buffer(ctx, &end_buffer_offset);
ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
if (end_buffer_index < ar_first_buffer_index(ctx)) {
- /*
- * The filled part of the overall buffer wraps around; handle
- * all packets up to the buffer end here. If the last packet
- * wraps around, its tail will be visible after the buffer end
- * because the buffer start pages are mapped there again.
- */
+ // The filled part of the overall buffer wraps around; handle all packets up to the
+ // buffer end here. If the last packet wraps around, its tail will be visible after
+ // the buffer end because the buffer start pages are mapped there again.
void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
p = handle_ar_packets(ctx, p, buffer_end);
if (p < buffer_end)
goto error;
- /* adjust p to point back into the actual buffer */
+ // adjust p to point back into the actual buffer
p -= AR_BUFFERS * PAGE_SIZE;
}
@@ -1057,7 +1056,6 @@ static void ar_context_tasklet(unsigned long data)
ar_recycle_buffers(ctx, end_buffer_index);
return;
-
error:
ctx->pointer = NULL;
}
@@ -1073,7 +1071,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
ctx->regs = regs;
ctx->ohci = ohci;
- tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
+ INIT_WORK(&ctx->work, ohci_ar_context_work);
for (i = 0; i < AR_BUFFERS; i++) {
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
@@ -1181,16 +1179,16 @@ static void context_retire_descriptors(struct context *ctx)
}
}
-static void context_tasklet(unsigned long data)
+static void ohci_at_context_work(struct work_struct *work)
{
- struct context *ctx = (struct context *) data;
+ struct at_context *ctx = from_work(ctx, work, work);
- context_retire_descriptors(ctx);
+ context_retire_descriptors(&ctx->context);
}
static void ohci_isoc_context_work(struct work_struct *work)
{
- struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
+ struct fw_iso_context *base = from_work(base, work, work);
struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
context_retire_descriptors(&isoc_ctx->context);
@@ -1248,7 +1246,6 @@ static int context_init(struct context *ctx, struct fw_ohci *ohci,
ctx->buffer_tail = list_entry(ctx->buffer_list.next,
struct descriptor_buffer, list);
- tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
ctx->callback = callback;
/*
@@ -1388,17 +1385,17 @@ struct driver_data {
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
-static int at_context_queue_packet(struct context *ctx,
- struct fw_packet *packet)
+static int at_context_queue_packet(struct at_context *ctx, struct fw_packet *packet)
{
- struct fw_ohci *ohci = ctx->ohci;
+ struct context *context = &ctx->context;
+ struct fw_ohci *ohci = context->ohci;
dma_addr_t d_bus, payload_bus;
struct driver_data *driver_data;
struct descriptor *d, *last;
__le32 *header;
int z, tcode;
- d = context_get_descriptors(ctx, 4, &d_bus);
+ d = context_get_descriptors(context, 4, &d_bus);
if (d == NULL) {
packet->ack = RCODE_SEND_ERROR;
return -1;
@@ -1428,7 +1425,7 @@ static int at_context_queue_packet(struct context *ctx,
ohci1394_at_data_set_destination_id(header,
async_header_get_destination(packet->header));
- if (ctx == &ctx->ohci->at_response_ctx) {
+ if (ctx == &ohci->at_response_ctx) {
ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
} else {
ohci1394_at_data_set_destination_offset(header,
@@ -1517,37 +1514,42 @@ static int at_context_queue_packet(struct context *ctx,
return -1;
}
- context_append(ctx, d, z, 4 - z);
+ context_append(context, d, z, 4 - z);
- if (ctx->running)
- reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+ if (context->running)
+ reg_write(ohci, CONTROL_SET(context->regs), CONTEXT_WAKE);
else
- context_run(ctx, 0);
+ context_run(context, 0);
return 0;
}
-static void at_context_flush(struct context *ctx)
+static void at_context_flush(struct at_context *ctx)
{
- tasklet_disable(&ctx->tasklet);
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return;
- ctx->flushing = true;
- context_tasklet((unsigned long)ctx);
- ctx->flushing = false;
+ disable_work_sync(&ctx->work);
- tasklet_enable(&ctx->tasklet);
+ WRITE_ONCE(ctx->flushing, true);
+ ohci_at_context_work(&ctx->work);
+ WRITE_ONCE(ctx->flushing, false);
+
+ enable_work(&ctx->work);
}
static int handle_at_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
+ struct at_context *ctx = container_of(context, struct at_context, context);
+ struct fw_ohci *ohci = ctx->context.ohci;
struct driver_data *driver_data;
struct fw_packet *packet;
- struct fw_ohci *ohci = context->ohci;
int evt;
- if (last->transfer_status == 0 && !context->flushing)
+ if (last->transfer_status == 0 && !READ_ONCE(ctx->flushing))
/* This descriptor isn't done yet, stop iteration. */
return 0;
@@ -1581,7 +1583,7 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_missing_ack:
- if (context->flushing)
+ if (READ_ONCE(ctx->flushing))
packet->ack = RCODE_GENERATION;
else {
/*
@@ -1603,7 +1605,7 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_no_status:
- if (context->flushing) {
+ if (READ_ONCE(ctx->flushing)) {
packet->ack = RCODE_GENERATION;
break;
}
@@ -1700,13 +1702,14 @@ static void handle_local_lock(struct fw_ohci *ohci,
fw_core_handle_response(&ohci->card, &response);
}
-static void handle_local_request(struct context *ctx, struct fw_packet *packet)
+static void handle_local_request(struct at_context *ctx, struct fw_packet *packet)
{
+ struct fw_ohci *ohci = ctx->context.ohci;
u64 offset, csr;
- if (ctx == &ctx->ohci->at_request_ctx) {
+ if (ctx == &ohci->at_request_ctx) {
packet->ack = ACK_PENDING;
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
offset = async_header_get_offset(packet->header);
@@ -1714,54 +1717,55 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
/* Handle config rom reads. */
if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
- handle_local_rom(ctx->ohci, packet, csr);
+ handle_local_rom(ohci, packet, csr);
else switch (csr) {
case CSR_BUS_MANAGER_ID:
case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO:
- handle_local_lock(ctx->ohci, packet, csr);
+ handle_local_lock(ohci, packet, csr);
break;
default:
- if (ctx == &ctx->ohci->at_request_ctx)
- fw_core_handle_request(&ctx->ohci->card, packet);
+ if (ctx == &ohci->at_request_ctx)
+ fw_core_handle_request(&ohci->card, packet);
else
- fw_core_handle_response(&ctx->ohci->card, packet);
+ fw_core_handle_response(&ohci->card, packet);
break;
}
- if (ctx == &ctx->ohci->at_response_ctx) {
+ if (ctx == &ohci->at_response_ctx) {
packet->ack = ACK_COMPLETE;
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
}
-static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
+static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet)
{
+ struct fw_ohci *ohci = ctx->context.ohci;
unsigned long flags;
int ret;
- spin_lock_irqsave(&ctx->ohci->lock, flags);
+ spin_lock_irqsave(&ohci->lock, flags);
- if (async_header_get_destination(packet->header) == ctx->ohci->node_id &&
- ctx->ohci->generation == packet->generation) {
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ if (async_header_get_destination(packet->header) == ohci->node_id &&
+ ohci->generation == packet->generation) {
+ spin_unlock_irqrestore(&ohci->lock, flags);
// Timestamping on behalf of the hardware.
- packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
handle_local_request(ctx, packet);
return;
}
ret = at_context_queue_packet(ctx, packet);
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ spin_unlock_irqrestore(&ohci->lock, flags);
if (ret < 0) {
// Timestamping on behalf of the hardware.
- packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
}
@@ -2028,8 +2032,7 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
static void bus_reset_work(struct work_struct *work)
{
- struct fw_ohci *ohci =
- container_of(work, struct fw_ohci, bus_reset_work);
+ struct fw_ohci *ohci = from_work(ohci, work, bus_reset_work);
int self_id_count, generation, new_generation, i, j;
u32 reg, quadlet;
void *free_rom = NULL;
@@ -2141,8 +2144,8 @@ static void bus_reset_work(struct work_struct *work)
// FIXME: Document how the locking works.
scoped_guard(spinlock_irq, &ohci->lock) {
ohci->generation = -1; // prevent AT packet queueing
- context_stop(&ohci->at_request_ctx);
- context_stop(&ohci->at_response_ctx);
+ context_stop(&ohci->at_request_ctx.context);
+ context_stop(&ohci->at_response_ctx.context);
}
/*
@@ -2239,16 +2242,16 @@ static irqreturn_t irq_handler(int irq, void *data)
}
if (event & OHCI1394_RQPkt)
- tasklet_schedule(&ohci->ar_request_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
if (event & OHCI1394_RSPkt)
- tasklet_schedule(&ohci->ar_response_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
if (event & OHCI1394_reqTxComplete)
- tasklet_schedule(&ohci->at_request_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work);
if (event & OHCI1394_respTxComplete)
- tasklet_schedule(&ohci->at_response_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work);
if (event & OHCI1394_isochRx) {
iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
@@ -2528,7 +2531,7 @@ static int ohci_enable(struct fw_card *card,
* They shouldn't do that in this initial case where the link
* isn't enabled. This means we have to use the same
* workaround here, setting the bus header to 0 and then write
- * the right values in the bus reset tasklet.
+ * the right values in the bus reset work item.
*/
if (config_rom) {
@@ -2617,7 +2620,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* during the atomic update, even on little endian
* architectures. The workaround we use is to put a 0 in the
* header quadlet; 0 is endian agnostic and means that the
- * config rom isn't ready yet. In the bus reset tasklet we
+ * config rom isn't ready yet. In the bus reset work item we
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
@@ -2659,7 +2662,7 @@ static int ohci_set_config_rom(struct fw_card *card,
/*
* Now initiate a bus reset to have the changes take
* effect. We clean up the old config rom memory and DMA
- * mappings in the bus reset tasklet, since the OHCI
+ * mappings in the bus reset work item, since the OHCI
* controller could need to access it before the bus reset
* takes effect.
*/
@@ -2686,11 +2689,14 @@ static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct context *ctx = &ohci->at_request_ctx;
+ struct at_context *ctx = &ohci->at_request_ctx;
struct driver_data *driver_data = packet->driver_data;
int ret = -ENOENT;
- tasklet_disable_in_atomic(&ctx->tasklet);
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return 0;
+ disable_work_sync(&ctx->work);
if (packet->ack != 0)
goto out;
@@ -2709,7 +2715,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
packet->callback(packet, &ohci->card, packet->ack);
ret = 0;
out:
- tasklet_enable(&ctx->tasklet);
+ enable_work(&ctx->work);
return ret;
}
@@ -3767,15 +3773,17 @@ static int pci_probe(struct pci_dev *dev,
if (err < 0)
return err;
- err = context_init(&ohci->at_request_ctx, ohci,
+ err = context_init(&ohci->at_request_ctx.context, ohci,
OHCI1394_AsReqTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
+ INIT_WORK(&ohci->at_request_ctx.work, ohci_at_context_work);
- err = context_init(&ohci->at_response_ctx, ohci,
+ err = context_init(&ohci->at_response_ctx.context, ohci,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
+ INIT_WORK(&ohci->at_response_ctx.work, ohci_at_context_work);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 16baa038d412..d528c94c5859 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -263,6 +263,14 @@ config EFI_COCO_SECRET
virt/coco/efi_secret module to access the secrets, which in turn
allows userspace programs to access the injected secrets.
+config OVMF_DEBUG_LOG
+ bool "Expose OVMF firmware debug log via sysfs"
+ depends on EFI
+ help
+ Recent OVMF versions (edk2-stable202508 + newer) can write
+ their debug log to a memory buffer. This driver exposes the
+ log content via sysfs (/sys/firmware/efi/ovmf_debug_log).
+
config UNACCEPTED_MEMORY
bool
depends on EFI_STUB
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index a2d0009560d0..8efbcf699e4f 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
+obj-$(CONFIG_OVMF_DEBUG_LOG) += ovmf-debug-log.o
obj-$(CONFIG_SYSFB) += sysfb_efi.o
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e57bff702b5f..1ce428e2ac8a 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -45,6 +45,7 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
.tpm_final_log = EFI_INVALID_TABLE_ADDR,
+ .ovmf_debug_log = EFI_INVALID_TABLE_ADDR,
#ifdef CONFIG_LOAD_UEFI_KEYS
.mokvar_table = EFI_INVALID_TABLE_ADDR,
#endif
@@ -473,6 +474,10 @@ static int __init efisubsys_init(void)
platform_device_register_simple("efi_secret", 0, NULL, 0);
#endif
+ if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
+ efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
+ ovmf_log_probe(efi.ovmf_debug_log);
+
return 0;
err_remove_group:
@@ -617,6 +622,9 @@ static const efi_config_table_type_t common_tables[] __initconst = {
{LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
{LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
{EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
+#ifdef CONFIG_OVMF_DEBUG_LOG
+ {OVMF_MEMORY_LOG_TABLE_GUID, &efi.ovmf_debug_log, "OvmfDebugLog" },
+#endif
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
#endif
diff --git a/drivers/firmware/efi/libstub/printk.c b/drivers/firmware/efi/libstub/printk.c
index 3a67a2cea7bd..bc599212c05d 100644
--- a/drivers/firmware/efi/libstub/printk.c
+++ b/drivers/firmware/efi/libstub/printk.c
@@ -5,13 +5,13 @@
#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/kernel.h>
-#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
+#include <linux/kern_levels.h>
#include <asm/efi.h>
#include <asm/setup.h>
#include "efistub.h"
-int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+int efi_loglevel = LOGLEVEL_NOTICE;
/**
* efi_char16_puts() - Write a UCS-2 encoded string to the console
diff --git a/drivers/firmware/efi/ovmf-debug-log.c b/drivers/firmware/efi/ovmf-debug-log.c
new file mode 100644
index 000000000000..5b2471ffaeed
--- /dev/null
+++ b/drivers/firmware/efi/ovmf-debug-log.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#define OVMF_DEBUG_LOG_MAGIC1 0x3167646d666d766f // "ovmfmdg1"
+#define OVMF_DEBUG_LOG_MAGIC2 0x3267646d666d766f // "ovmfmdg2"
+
+struct ovmf_debug_log_header {
+ u64 magic1;
+ u64 magic2;
+ u64 hdr_size;
+ u64 log_size;
+ u64 lock; // edk2 spinlock
+ u64 head_off;
+ u64 tail_off;
+ u64 truncated;
+ u8 fw_version[128];
+};
+
+static struct ovmf_debug_log_header *hdr;
+static u8 *logbuf;
+static u64 logbufsize;
+
+static ssize_t ovmf_log_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ u64 start, end;
+
+ start = hdr->head_off + offset;
+ if (hdr->head_off > hdr->tail_off && start >= hdr->log_size)
+ start -= hdr->log_size;
+
+ end = start + count;
+ if (start > hdr->tail_off) {
+ if (end > hdr->log_size)
+ end = hdr->log_size;
+ } else {
+ if (end > hdr->tail_off)
+ end = hdr->tail_off;
+ }
+
+ if (start > logbufsize || end > logbufsize)
+ return 0;
+ if (start >= end)
+ return 0;
+
+ memcpy(buf, logbuf + start, end - start);
+ return end - start;
+}
+
+static struct bin_attribute ovmf_log_bin_attr = {
+ .attr = {
+ .name = "ovmf_debug_log",
+ .mode = 0444,
+ },
+ .read = ovmf_log_read,
+};
+
+int __init ovmf_log_probe(unsigned long ovmf_debug_log_table)
+{
+ int ret = -EINVAL;
+ u64 size;
+
+ /* map + verify header */
+ hdr = memremap(ovmf_debug_log_table, sizeof(*hdr), MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: header map failed\n");
+ return -EINVAL;
+ }
+
+ if (hdr->magic1 != OVMF_DEBUG_LOG_MAGIC1 ||
+ hdr->magic2 != OVMF_DEBUG_LOG_MAGIC2) {
+ printk(KERN_ERR "OVMF debug log: magic mismatch\n");
+ goto err_unmap;
+ }
+
+ size = hdr->hdr_size + hdr->log_size;
+ pr_info("OVMF debug log: firmware version: \"%s\"\n", hdr->fw_version);
+ pr_info("OVMF debug log: buffer size: %lluk\n", size / 1024);
+
+ /* map complete log buffer */
+ memunmap(hdr);
+ hdr = memremap(ovmf_debug_log_table, size, MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: buffer map failed\n");
+ return -EINVAL;
+ }
+ logbuf = (void *)hdr + hdr->hdr_size;
+ logbufsize = hdr->log_size;
+
+ ovmf_log_bin_attr.size = size;
+ ret = sysfs_create_bin_file(efi_kobj, &ovmf_log_bin_attr);
+ if (ret != 0) {
+ pr_err("OVMF debug log: sysfs register failed\n");
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ memunmap(hdr);
+ return ret;
+}
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 0be0d569589d..b7629a0e4813 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
}
}
- priv->dma_nelms =
- dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
- if (priv->dma_nelms == 0) {
+ err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
+ if (err) {
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
- return -ENOMEM;
+ return err;
}
+ priv->dma_nelms = sgt->nents;
/* enable clock */
err = clk_enable(priv->clk);
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 4dd5c2c330bb..c226524efeba 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -141,8 +141,8 @@ static int gen_74x164_probe(struct spi_device *spi)
chip->gpio_chip.label = spi->modalias;
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
- chip->gpio_chip.set_rv = gen_74x164_set_value;
- chip->gpio_chip.set_multiple_rv = gen_74x164_set_multiple;
+ chip->gpio_chip.set = gen_74x164_set_value;
+ chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
chip->gpio_chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index dc2b941c3726..e5ac2d211013 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -430,7 +430,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
chip->direction_input = adnp_gpio_direction_input;
chip->direction_output = adnp_gpio_direction_output;
chip->get = adnp_gpio_get;
- chip->set_rv = adnp_gpio_set;
+ chip->set = adnp_gpio_set;
chip->can_sleep = true;
if (IS_ENABLED(CONFIG_DEBUG_FS))
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 57d12c10cbda..6305c8b7dc05 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -122,7 +122,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5520_gpio_direction_input;
gc->direction_output = adp5520_gpio_direction_output;
gc->get = adp5520_gpio_get_value;
- gc->set_rv = adp5520_gpio_set_value;
+ gc->set = adp5520_gpio_set_value;
gc->can_sleep = true;
gc->base = pdata->gpio_start;
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
index b2c8836c5f84..0fd3cc26d017 100644
--- a/drivers/gpio/gpio-adp5585.c
+++ b/drivers/gpio/gpio-adp5585.c
@@ -428,7 +428,7 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5585_gpio_direction_input;
gc->direction_output = adp5585_gpio_direction_output;
gc->get = adp5585_gpio_get_value;
- gc->set_rv = adp5585_gpio_set_value;
+ gc->set = adp5585_gpio_set_value;
gc->set_config = adp5585_gpio_set_config;
gc->request = adp5585_gpio_request;
gc->free = adp5585_gpio_free;
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 6f941db02c04..af9d8b3a711d 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -534,8 +534,8 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
chip->direction_output = gpio_fwd_direction_output;
chip->get = gpio_fwd_get;
chip->get_multiple = gpio_fwd_get_multiple_locked;
- chip->set_rv = gpio_fwd_set;
- chip->set_multiple_rv = gpio_fwd_set_multiple_locked;
+ chip->set = gpio_fwd_set;
+ chip->set_multiple = gpio_fwd_set_multiple_locked;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 77a674cf99e4..4524c18a87e7 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -69,7 +69,7 @@ static const struct gpio_chip altr_a10sr_gc = {
.label = "altr_a10sr_gpio",
.owner = THIS_MODULE,
.get = altr_a10sr_gpio_get,
- .set_rv = altr_a10sr_gpio_set,
+ .set = altr_a10sr_gpio_set,
.direction_input = altr_a10sr_gpio_direction_input,
.direction_output = altr_a10sr_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 1b28525726d7..9508d764cce4 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -259,7 +259,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->gc.direction_input = altera_gpio_direction_input;
altera_gc->gc.direction_output = altera_gpio_direction_output;
altera_gc->gc.get = altera_gpio_get;
- altera_gc->gc.set_rv = altera_gpio_set;
+ altera_gc->gc.set = altera_gpio_set;
altera_gc->gc.owner = THIS_MODULE;
altera_gc->gc.parent = &pdev->dev;
altera_gc->gc.base = -1;
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index f8d0cea46049..e6c6c3ec7656 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -165,7 +165,7 @@ static int amd_fch_gpio_probe(struct platform_device *pdev)
priv->gc.direction_output = amd_fch_gpio_direction_output;
priv->gc.get_direction = amd_fch_gpio_get_direction;
priv->gc.get = amd_fch_gpio_get;
- priv->gc.set_rv = amd_fch_gpio_set;
+ priv->gc.set = amd_fch_gpio_set;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 425d8472f744..15fd5e210d74 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -165,7 +165,7 @@ static struct amd_gpio gp = {
.ngpio = 32,
.request = amd_gpio_request,
.free = amd_gpio_free,
- .set_rv = amd_gpio_set,
+ .set = amd_gpio_set,
.get = amd_gpio_get,
.direction_output = amd_gpio_dirout,
.direction_input = amd_gpio_dirin,
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 89ffde693019..a7e98d395d8e 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -138,7 +138,7 @@ static const struct gpio_chip template_chip = {
.direction_input = arizona_gpio_direction_in,
.get = arizona_gpio_get,
.direction_output = arizona_gpio_direction_out,
- .set_rv = arizona_gpio_set,
+ .set = arizona_gpio_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 00b31497ecff..7622f9e9f54a 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -596,7 +596,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
gpio->chip.request = NULL;
gpio->chip.free = NULL;
gpio->chip.get = aspeed_sgpio_get;
- gpio->chip.set_rv = aspeed_sgpio_set;
+ gpio->chip.set = aspeed_sgpio_set;
gpio->chip.set_config = aspeed_sgpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 2d340a343a17..7953a9c4e36d 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1352,7 +1352,7 @@ static int aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.request = aspeed_gpio_request;
gpio->chip.free = aspeed_gpio_free;
gpio->chip.get = aspeed_gpio_get;
- gpio->chip.set_rv = aspeed_gpio_set;
+ gpio->chip.set = aspeed_gpio_set;
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 8f22cb36004d..208b71c59d58 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -339,7 +339,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bcm_kona_gpio_direction_input,
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
- .set_rv = bcm_kona_gpio_set,
+ .set = bcm_kona_gpio_set,
.set_config = bcm_kona_gpio_set_config,
.to_irq = bcm_kona_gpio_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-bd71815.c b/drivers/gpio/gpio-bd71815.c
index 36701500925e..afb18a5a9d79 100644
--- a/drivers/gpio/gpio-bd71815.c
+++ b/drivers/gpio/gpio-bd71815.c
@@ -85,7 +85,7 @@ static const struct gpio_chip bd71815gpo_chip = {
.owner = THIS_MODULE,
.get = bd71815gpo_get,
.get_direction = bd71815gpo_direction_get,
- .set_rv = bd71815gpo_set,
+ .set = bd71815gpo_set,
.set_config = bd71815_gpio_set_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index 4ba151e5cf25..e439dbfffc62 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -109,7 +109,7 @@ static int bd71828_probe(struct platform_device *pdev)
bdgpio->gpio.set_config = bd71828_gpio_set_config;
bdgpio->gpio.can_sleep = true;
bdgpio->gpio.get = bd71828_gpio_get;
- bdgpio->gpio.set_rv = bd71828_gpio_set;
+ bdgpio->gpio.set = bd71828_gpio_set;
bdgpio->gpio.base = -1;
/*
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 8df1361e3e84..7c95bb36511e 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -88,7 +88,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bd9571mwv_gpio_direction_input,
.direction_output = bd9571mwv_gpio_direction_output,
.get = bd9571mwv_gpio_get,
- .set_rv = bd9571mwv_gpio_set,
+ .set = bd9571mwv_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 7c9e81fea37a..05401da03ca3 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -145,7 +145,7 @@ static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
c->direction_input = bt8xxgpio_gpio_direction_input;
c->get = bt8xxgpio_gpio_get;
c->direction_output = bt8xxgpio_gpio_direction_output;
- c->set_rv = bt8xxgpio_gpio_set;
+ c->set = bt8xxgpio_gpio_set;
c->dbg_show = NULL;
c->base = modparam_gpiobase;
c->ngpio = BT8XXGPIO_NR_GPIOS;
diff --git a/drivers/gpio/gpio-cgbc.c b/drivers/gpio/gpio-cgbc.c
index 1495bec62456..0efa1b61001a 100644
--- a/drivers/gpio/gpio-cgbc.c
+++ b/drivers/gpio/gpio-cgbc.c
@@ -171,7 +171,7 @@ static int cgbc_gpio_probe(struct platform_device *pdev)
chip->direction_output = cgbc_gpio_direction_output;
chip->get_direction = cgbc_gpio_get_direction;
chip->get = cgbc_gpio_get;
- chip->set_rv = cgbc_gpio_set;
+ chip->set = cgbc_gpio_set;
chip->ngpio = CGBC_GPIO_NGPIO;
ret = devm_mutex_init(dev, &gpio->lock);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 8b49f02c7896..f8ea961fa1de 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -167,7 +167,7 @@ static int creg_gpio_probe(struct platform_device *pdev)
hcg->gc.label = dev_name(dev);
hcg->gc.base = -1;
hcg->gc.ngpio = ngpios;
- hcg->gc.set_rv = creg_gpio_set;
+ hcg->gc.set = creg_gpio_set;
hcg->gc.direction_output = creg_gpio_dir_out;
ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
index 53cd5ff6247b..435483826c6e 100644
--- a/drivers/gpio/gpio-cros-ec.c
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -188,7 +188,7 @@ static int cros_ec_gpio_probe(struct platform_device *pdev)
gc->can_sleep = true;
gc->label = dev_name(dev);
gc->base = -1;
- gc->set_rv = cros_ec_gpio_set;
+ gc->set = cros_ec_gpio_set;
gc->get = cros_ec_gpio_get;
gc->get_direction = cros_ec_gpio_get_direction;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 8db7cca3a060..0fb5c06d0886 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -349,7 +349,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->chip.direction_input = crystalcove_gpio_dir_in;
cg->chip.direction_output = crystalcove_gpio_dir_out;
cg->chip.get = crystalcove_gpio_get;
- cg->chip.set_rv = crystalcove_gpio_set;
+ cg->chip.set = crystalcove_gpio_set;
cg->chip.base = -1;
cg->chip.ngpio = CRYSTALCOVE_VGPIO_NUM;
cg->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 143d1f4173a6..8affe4e9f90e 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -296,7 +296,7 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
.request = chip_gpio_request,
.get = chip_gpio_get,
- .set_rv = chip_gpio_set,
+ .set = chip_gpio_set,
.direction_input = chip_direction_input,
.direction_output = chip_direction_output,
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 6482c5b267db..495f0ee58505 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -172,7 +172,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9052-gpio",
.owner = THIS_MODULE,
.get = da9052_gpio_get,
- .set_rv = da9052_gpio_set,
+ .set = da9052_gpio_set,
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 3d9d0c700100..a09bd6eb93cf 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -116,7 +116,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9055-gpio",
.owner = THIS_MODULE,
.get = da9055_gpio_get,
- .set_rv = da9055_gpio_set,
+ .set = da9055_gpio_set,
.direction_input = da9055_gpio_direction_input,
.direction_output = da9055_gpio_direction_output,
.to_irq = da9055_gpio_to_irq,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8f3a36d0191d..538f27209ce7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -202,7 +202,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.direction_input = davinci_direction_in;
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
- chips->chip.set_rv = davinci_gpio_set;
+ chips->chip.set = davinci_gpio_set;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 4bd3c47eaf93..4670ffd7ea7f 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -469,7 +469,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.set_rv = dln2_gpio_set;
+ dln2->gpio.set = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
dln2->gpio.free = dln2_gpio_free;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index f2973d0b7138..50fafeda8d7e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -663,7 +663,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.request = sprd_eic_request;
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
- sprd_eic->chip.set_rv = sprd_eic_set;
+ sprd_eic->chip.set = sprd_eic_set;
fallthrough;
case SPRD_EIC_ASYNC:
case SPRD_EIC_SYNC:
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 015f1ac32dd9..a214b0672726 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -306,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
- gpio_chip->set_rv = em_gio_set;
+ gpio_chip->set = em_gio_set;
gpio_chip->to_irq = em_gio_to_irq;
gpio_chip->request = pinctrl_gpio_request;
gpio_chip->free = em_gio_free;
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index beb98286d13e..9053662f1817 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -211,7 +211,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
exar_gpio->gpio_chip.get = exar_get_value;
- exar_gpio->gpio_chip.set_rv = exar_set_value;
+ exar_gpio->gpio_chip.set = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
exar_gpio->index = index;
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index dfcd3634f279..4d5b927ad70f 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -173,7 +173,7 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
- .set_rv = f7188x_gpio_set, \
+ .set = f7188x_gpio_set, \
.set_config = f7188x_gpio_set_config, \
.base = -1, \
.ngpio = _ngpio, \
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index f25283e5239d..121bf29a27f5 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -159,7 +159,7 @@ static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
.request = gnr_gpio_request,
.get = gnr_gpio_get,
- .set_rv = gnr_gpio_set,
+ .set = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
.direction_input = gnr_gpio_direction_input,
.direction_output = gnr_gpio_direction_output,
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index a40ba99a3aea..2e5d97b7363f 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -86,7 +86,7 @@ static int gw_pld_probe(struct i2c_client *client)
gw->chip.direction_input = gw_pld_input8;
gw->chip.get = gw_pld_get8;
gw->chip.direction_output = gw_pld_output8;
- gw->chip.set_rv = gw_pld_set8;
+ gw->chip.set = gw_pld_set8;
gw->client = client;
/*
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index b1844a676c7c..2eaed83214d8 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -324,7 +324,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
- chip->set_rv = egpio_set;
+ chip->set = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
chip->get_direction = egpio_get_direction;
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 67089b2423d8..1802c9116ffe 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -273,7 +273,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
chip->get = ichx_priv.desc->get ?
ichx_priv.desc->get : ichx_gpio_get;
- chip->set_rv = ichx_gpio_set;
+ chip->set = ichx_gpio_set;
chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 1693dbf1b777..0a75afecf9f8 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -102,7 +102,7 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
gc->ngpio = ARRAY_SIZE(scu_rsrc_arr);
gc->label = dev_name(dev);
gc->get = imx_scu_gpio_get;
- gc->set_rv = imx_scu_gpio_set;
+ gc->set = imx_scu_gpio_set;
gc->get_direction = imx_scu_gpio_get_direction;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index d8184b527bac..5d677bcfccf2 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -267,7 +267,7 @@ static const struct gpio_chip it87_template_chip = {
.request = it87_gpio_request,
.get = it87_gpio_get,
.direction_input = it87_gpio_direction_in,
- .set_rv = it87_gpio_set,
+ .set = it87_gpio_set,
.direction_output = it87_gpio_direction_out,
.base = -1
};
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 9f548eda3888..b0c4a3346e7d 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -171,7 +171,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->parent = &pdev->dev;
gpio->label = pdev->name;
gpio->get = ttl_get_value;
- gpio->set_rv = ttl_set_value;
+ gpio->set = ttl_set_value;
gpio->owner = THIS_MODULE;
/* request dynamic allocation */
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index e38e604baa22..923aad3ab4d4 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -169,7 +169,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
chip->direction_output = kempld_gpio_direction_output;
chip->get_direction = kempld_gpio_get_direction;
chip->get = kempld_gpio_get;
- chip->set_rv = kempld_gpio_set;
+ chip->set = kempld_gpio_set;
chip->ngpio = kempld_gpio_pincount(pld);
if (chip->ngpio == 0) {
dev_err(dev, "No GPIO pins detected\n");
diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c
index 3d0ff09284fb..c64aaa896766 100644
--- a/drivers/gpio/gpio-latch.c
+++ b/drivers/gpio/gpio-latch.c
@@ -166,11 +166,11 @@ static int gpio_latch_probe(struct platform_device *pdev)
if (gpio_latch_can_sleep(priv, n_latches)) {
priv->gc.can_sleep = true;
- priv->gc.set_rv = gpio_latch_set_can_sleep;
+ priv->gc.set = gpio_latch_set_can_sleep;
mutex_init(&priv->mutex);
} else {
priv->gc.can_sleep = false;
- priv->gc.set_rv = gpio_latch_set;
+ priv->gc.set = gpio_latch_set;
spin_lock_init(&priv->spinlock);
}
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 61524a9ba765..3b4f8830c741 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -437,7 +437,7 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
ljca_gpio->gc.get = ljca_gpio_get_value;
- ljca_gpio->gc.set_rv = ljca_gpio_set_value;
+ ljca_gpio->gc.set = ljca_gpio_set_value;
ljca_gpio->gc.set_config = ljca_gpio_set_config;
ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
ljca_gpio->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 19cd2847467c..cb9dbcc290ad 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -134,7 +134,7 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
LOGICVC_POWER_CTRL_GPIO_BITS;
logicvc->chip.get = logicvc_gpio_get;
- logicvc->chip.set_rv = logicvc_gpio_set;
+ logicvc->chip.set = logicvc_gpio_set;
logicvc->chip.direction_output = logicvc_gpio_direction_output;
return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index add09971d26a..818c606fbc51 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -157,7 +157,7 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
lgpio->chip.get = loongson_gpio_get;
lgpio->chip.get_direction = loongson_gpio_get_direction;
lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set_rv = loongson_gpio_set;
+ lgpio->chip.set = loongson_gpio_set;
lgpio->chip.parent = dev;
spin_lock_init(&lgpio->lock);
}
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index 8f3668169ebf..f3e0559f969d 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -106,7 +106,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
gc->base = 0;
gc->ngpio = LOONGSON_N_GPIO;
gc->get = loongson_gpio_get_value;
- gc->set_rv = loongson_gpio_set_value;
+ gc->set = loongson_gpio_set_value;
gc->direction_input = loongson_gpio_direction_input;
gc->direction_output = loongson_gpio_direction_output;
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 52ab3ac4844c..e8e00daff7df 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -184,7 +184,7 @@ static const struct gpio_chip lp3943_gpio_chip = {
.direction_input = lp3943_gpio_direction_input,
.get = lp3943_gpio_get,
.direction_output = lp3943_gpio_direction_output,
- .set_rv = lp3943_gpio_set,
+ .set = lp3943_gpio_set,
.base = -1,
.ngpio = LP3943_MAX_GPIO,
.can_sleep = 1,
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 1908ed302e92..5376708a81bf 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -124,7 +124,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp873x_gpio_direction_input,
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
- .set_rv = lp873x_gpio_set,
+ .set = lp873x_gpio_set,
.set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index 8ea687d5d028..0f337c1283b2 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -139,7 +139,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp87565_gpio_direction_input,
.direction_output = lp87565_gpio_direction_output,
.get = lp87565_gpio_get,
- .set_rv = lp87565_gpio_set,
+ .set = lp87565_gpio_set,
.set_config = lp87565_gpio_set_config,
.base = -1,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index 2dbfbf90176c..37a2342eb2e6 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -327,7 +327,7 @@ static const struct gpio_chip lpc18xx_chip = {
.free = gpiochip_generic_free,
.direction_input = lpc18xx_gpio_direction_input,
.direction_output = lpc18xx_gpio_direction_output,
- .set_rv = lpc18xx_gpio_set,
+ .set = lpc18xx_gpio_set,
.get = lpc18xx_gpio_get,
.ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT,
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 6668b8bd9f1e..37fc54fc7385 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -407,7 +407,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
@@ -423,7 +423,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
@@ -439,7 +439,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
@@ -454,7 +454,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p3,
.get = lpc32xx_gpio_get_value_p3,
.direction_output = lpc32xx_gpio_dir_output_p3,
- .set_rv = lpc32xx_gpio_set_value_p3,
+ .set = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
@@ -482,7 +482,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.chip = {
.label = "gpo_p3",
.direction_output = lpc32xx_gpio_dir_out_always,
- .set_rv = lpc32xx_gpo_set_value,
+ .set = lpc32xx_gpo_set_value,
.get = lpc32xx_gpo_get_value,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPO_P3_GRP,
diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c
index 7570d9e89adf..30ef258e7655 100644
--- a/drivers/gpio/gpio-macsmc.c
+++ b/drivers/gpio/gpio-macsmc.c
@@ -261,7 +261,7 @@ static int macsmc_gpio_probe(struct platform_device *pdev)
smcgp->gc.label = "macsmc-pmu-gpio";
smcgp->gc.owner = THIS_MODULE;
smcgp->gc.get = macsmc_gpio_get;
- smcgp->gc.set_rv = macsmc_gpio_set;
+ smcgp->gc.set = macsmc_gpio_set;
smcgp->gc.get_direction = macsmc_gpio_get_direction;
smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask;
smcgp->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index e73e72d62bc8..551faf9655b2 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -109,7 +109,7 @@ static const struct gpio_chip madera_gpio_chip = {
.direction_input = madera_gpio_direction_in,
.get = madera_gpio_get,
.direction_output = madera_gpio_direction_out,
- .set_rv = madera_gpio_set,
+ .set = madera_gpio_set,
.set_config = gpiochip_generic_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 75d414d8c992..84c7c2dca822 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -188,7 +188,7 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.direction_input = max7301_direction_input;
ts->chip.get = max7301_get;
ts->chip.direction_output = max7301_direction_output;
- ts->chip.set_rv = max7301_set;
+ ts->chip.set = max7301_set;
ts->chip.ngpio = PIN_NUMBER;
ts->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index d5ffedb086af..a61d670ceeda 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -585,8 +585,8 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->direction_input = max732x_gpio_direction_input;
if (chip->dir_output) {
gc->direction_output = max732x_gpio_direction_output;
- gc->set_rv = max732x_gpio_set_value;
- gc->set_multiple_rv = max732x_gpio_set_multiple;
+ gc->set = max732x_gpio_set_value;
+ gc->set_multiple = max732x_gpio_set_multiple;
}
gc->get = max732x_gpio_get_value;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index af7af8e40afe..02eca400b307 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -311,7 +311,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set_rv = max77620_gpio_set;
+ mgpio->gpio_chip.set = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
index a553e141059f..4540da4c1418 100644
--- a/drivers/gpio/gpio-max77650.c
+++ b/drivers/gpio/gpio-max77650.c
@@ -166,7 +166,7 @@ static int max77650_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77650_gpio_direction_input;
chip->gc.direction_output = max77650_gpio_direction_output;
- chip->gc.set_rv = max77650_gpio_set_value;
+ chip->gc.set = max77650_gpio_set_value;
chip->gc.get = max77650_gpio_get_value;
chip->gc.get_direction = max77650_gpio_get_direction;
chip->gc.set_config = max77650_gpio_set_config;
diff --git a/drivers/gpio/gpio-max77759.c b/drivers/gpio/gpio-max77759.c
index 7fe8e6f697d0..5e48eb03e7b3 100644
--- a/drivers/gpio/gpio-max77759.c
+++ b/drivers/gpio/gpio-max77759.c
@@ -469,7 +469,7 @@ static int max77759_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77759_gpio_direction_input;
chip->gc.direction_output = max77759_gpio_direction_output;
chip->gc.get = max77759_gpio_get_value;
- chip->gc.set_rv = max77759_gpio_set_value;
+ chip->gc.set = max77759_gpio_set_value;
girq = &chip->gc.irq;
gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 5ee2991ecdfd..581a71872eab 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -180,7 +180,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.request = mb86s70_gpio_request;
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
- gchip->gc.set_rv = mb86s70_gpio_set;
+ gchip->gc.set = mb86s70_gpio_set;
gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index e68956104161..9a40e9579e95 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -103,7 +103,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->spi = spi;
mc->chip.label = DRIVER_NAME;
- mc->chip.set_rv = mc33880_set;
+ mc->chip.set = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
mc->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 12cf36f9ca63..f6af81bf2b13 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -224,7 +224,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->direction_input = ioh_gpio_direction_input;
gpio->get = ioh_gpio_get;
gpio->direction_output = ioh_gpio_direction_output;
- gpio->set_rv = ioh_gpio_set;
+ gpio->set = ioh_gpio_set;
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = num_port;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 6f3dda6b635f..390f2e74a9d8 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -397,7 +397,7 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
gc->ngpio = npins;
gc->owner = THIS_MODULE;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
girq = &gs->gc.irq;
gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index 9875e34bde72..ed29b07d16c1 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
struct gpio_chip *gc;
- char *colon_ptr;
int ret, irq;
- long num;
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
if (!gs)
@@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
- colon_ptr = strchr(dev_name(dev), ':');
- if (!colon_ptr) {
- dev_err(dev, "invalid device name format\n");
- return -EINVAL;
- }
-
- ret = kstrtol(++colon_ptr, 16, &num);
- if (ret) {
- dev_err(dev, "invalid device instance\n");
- return ret;
- }
-
- if (!num) {
- irq = platform_get_irq(pdev, 0);
- if (irq >= 0) {
- girq = &gs->gc.irq;
- gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
- girq->default_type = IRQ_TYPE_NONE;
- /* This will let us handle the parent IRQ in the driver */
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->parent_handler = NULL;
- girq->handler = handle_bad_irq;
-
- /*
- * Directly request the irq here instead of passing
- * a flow-handler because the irq is shared.
- */
- ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
- IRQF_SHARED, dev_name(dev), gs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to request IRQ");
- }
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq >= 0) {
+ girq = &gs->gc.irq;
+ gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
+ girq->default_type = IRQ_TYPE_NONE;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->parent_handler = NULL;
+ girq->handler = handle_bad_irq;
+
+ /*
+ * Directly request the irq here instead of passing
+ * a flow-handler because the irq is shared.
+ */
+ ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
+ IRQF_SHARED, dev_name(dev), gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
}
platform_set_drvdata(pdev, gs);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index 897a1e004681..8f1405733d98 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -111,7 +111,7 @@ static int ltq_mm_probe(struct platform_device *pdev)
chip->mmchip.gc.ngpio = 16;
chip->mmchip.gc.direction_output = ltq_mm_dir_out;
- chip->mmchip.gc.set_rv = ltq_mm_set;
+ chip->mmchip.gc.set = ltq_mm_set;
chip->mmchip.save_regs = ltq_mm_save_regs;
/* store the shadow value if one was passed by the devicetree */
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index cf878c2ea6bf..021ad62778c2 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -367,7 +367,7 @@ static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
@@ -432,14 +432,14 @@ static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
int val)
{
bgpio_dir_out(gc, gpio, val);
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
bgpio_dir_out(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
@@ -528,18 +528,18 @@ static int bgpio_setup_io(struct gpio_chip *gc,
if (set && clr) {
gc->reg_set = set;
gc->reg_clr = clr;
- gc->set_rv = bgpio_set_with_clear;
- gc->set_multiple_rv = bgpio_set_multiple_with_clear;
+ gc->set = bgpio_set_with_clear;
+ gc->set_multiple = bgpio_set_multiple_with_clear;
} else if (set && !clr) {
gc->reg_set = set;
- gc->set_rv = bgpio_set_set;
- gc->set_multiple_rv = bgpio_set_multiple_set;
+ gc->set = bgpio_set_set;
+ gc->set_multiple = bgpio_set_multiple_set;
} else if (flags & BGPIOF_NO_OUTPUT) {
- gc->set_rv = bgpio_set_none;
- gc->set_multiple_rv = NULL;
+ gc->set = bgpio_set_none;
+ gc->set_multiple = NULL;
} else {
- gc->set_rv = bgpio_set;
- gc->set_multiple_rv = bgpio_set_multiple;
+ gc->set = bgpio_set;
+ gc->set_multiple = bgpio_set_multiple;
}
if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
@@ -676,7 +676,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
}
gc->bgpio_data = gc->read_reg(gc->reg_dat);
- if (gc->set_rv == bgpio_set_set &&
+ if (gc->set == bgpio_set_set &&
!(flags & BGPIOF_UNREADABLE_REG_SET))
gc->bgpio_data = gc->read_reg(gc->reg_set);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 266c0953d914..a7d69f3835c1 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -449,9 +449,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->parent = dev;
gc->get = gpio_mockup_get;
- gc->set_rv = gpio_mockup_set;
+ gc->set = gpio_mockup_set;
gc->get_multiple = gpio_mockup_get_multiple;
- gc->set_multiple_rv = gpio_mockup_set_multiple;
+ gc->set_multiple = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 27dd9c3e7b77..4eb9f1a2779b 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -140,7 +140,7 @@ static int moxtet_gpio_probe(struct device *dev)
chip->gpio_chip.direction_input = moxtet_gpio_direction_input;
chip->gpio_chip.direction_output = moxtet_gpio_direction_output;
chip->gpio_chip.get = moxtet_gpio_get_value;
- chip->gpio_chip.set_rv = moxtet_gpio_set_value;
+ chip->gpio_chip.set = moxtet_gpio_set_value;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS;
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 40d587176a75..dad0eca1ca2e 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -153,7 +153,7 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
gc->direction_input = mpc52xx_wkup_gpio_dir_in;
gc->direction_output = mpc52xx_wkup_gpio_dir_out;
gc->get = mpc52xx_wkup_gpio_get;
- gc->set_rv = mpc52xx_wkup_gpio_set;
+ gc->set = mpc52xx_wkup_gpio_set;
ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
if (ret)
@@ -315,7 +315,7 @@ static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
gc->direction_input = mpc52xx_simple_gpio_dir_in;
gc->direction_output = mpc52xx_simple_gpio_dir_out;
gc->get = mpc52xx_simple_gpio_get;
- gc->set_rv = mpc52xx_simple_gpio_set;
+ gc->set = mpc52xx_simple_gpio_set;
ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
if (ret)
diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c
index 3415cb7ebb0f..82d557a7e5d8 100644
--- a/drivers/gpio/gpio-mpfs.c
+++ b/drivers/gpio/gpio-mpfs.c
@@ -150,7 +150,7 @@ static int mpfs_gpio_probe(struct platform_device *pdev)
mpfs_gpio->gc.direction_output = mpfs_gpio_direction_output;
mpfs_gpio->gc.get_direction = mpfs_gpio_get_direction;
mpfs_gpio->gc.get = mpfs_gpio_get;
- mpfs_gpio->gc.set_rv = mpfs_gpio_set;
+ mpfs_gpio->gc.set = mpfs_gpio_set;
mpfs_gpio->gc.base = -1;
mpfs_gpio->gc.ngpio = ngpios;
mpfs_gpio->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index b17de08e9e03..9f42bb30b4ec 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -448,9 +448,9 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.direction_input = gpio_mpsse_direction_input;
priv->gpio.direction_output = gpio_mpsse_direction_output;
priv->gpio.get = gpio_mpsse_gpio_get;
- priv->gpio.set_rv = gpio_mpsse_gpio_set;
+ priv->gpio.set = gpio_mpsse_gpio_set;
priv->gpio.get_multiple = gpio_mpsse_get_multiple;
- priv->gpio.set_multiple_rv = gpio_mpsse_set_multiple;
+ priv->gpio.set_multiple = gpio_mpsse_set_multiple;
priv->gpio.base = -1;
priv->gpio.ngpio = 16;
priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 992339a89d19..b0cccd856840 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -658,7 +658,7 @@ static int msc313_gpio_probe(struct platform_device *pdev)
gpiochip->direction_input = msc313_gpio_direction_input;
gpiochip->direction_output = msc313_gpio_direction_output;
gpiochip->get = msc313_gpio_get;
- gpiochip->set_rv = msc313_gpio_set;
+ gpiochip->set = msc313_gpio_set;
gpiochip->base = -1;
gpiochip->ngpio = gpio->gpio_data->num;
gpiochip->names = gpio->gpio_data->names;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 24792b8eb083..5e3f54cb8bc4 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1168,7 +1168,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.direction_input = mvebu_gpio_direction_input;
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
- mvchip->chip.set_rv = mvebu_gpio_set;
+ mvchip->chip.set = mvebu_gpio_set;
if (have_irqs)
mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 296d13845b30..bcf4b07dd458 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -674,7 +674,7 @@ static int nmk_gpio_probe(struct platform_device *pdev)
chip->direction_input = nmk_gpio_make_input;
chip->get = nmk_gpio_get_input;
chip->direction_output = nmk_gpio_make_output;
- chip->set_rv = nmk_gpio_set_output;
+ chip->set = nmk_gpio_set_output;
chip->dbg_show = nmk_gpio_dbg_show;
chip->can_sleep = false;
chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
index 25b203a89e38..83c77a2c0623 100644
--- a/drivers/gpio/gpio-npcm-sgpio.c
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -211,7 +211,7 @@ static int npcm_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- return gc->set_rv(gc, offset, val);
+ return gc->set(gc, offset, val);
}
static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -546,7 +546,7 @@ static int npcm_sgpio_probe(struct platform_device *pdev)
gpio->chip.direction_output = npcm_sgpio_dir_out;
gpio->chip.get_direction = npcm_sgpio_get_direction;
gpio->chip.get = npcm_sgpio_get;
- gpio->chip.set_rv = npcm_sgpio_set;
+ gpio->chip.set = npcm_sgpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 24966161742a..777e20c608dc 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -108,7 +108,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip->direction_input = octeon_gpio_dir_in;
chip->get = octeon_gpio_get;
chip->direction_output = octeon_gpio_dir_out;
- chip->set_rv = octeon_gpio_set;
+ chip->set = octeon_gpio_set;
err = devm_gpiochip_add_data(&pdev->dev, chip, gpio);
if (err)
return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index ed5c88a5c520..a268c76bdca6 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1046,8 +1046,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev)
bank->chip.get_multiple = omap_gpio_get_multiple;
bank->chip.direction_output = omap_gpio_output;
bank->chip.set_config = omap_gpio_set_config;
- bank->chip.set_rv = omap_gpio_set;
- bank->chip.set_multiple_rv = omap_gpio_set_multiple;
+ bank->chip.set = omap_gpio_set;
+ bank->chip.set_multiple = omap_gpio_set_multiple;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 9329d8ce8f59..e377f6dd4ccf 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -166,7 +166,7 @@ static int palmas_gpio_probe(struct platform_device *pdev)
palmas_gpio->gpio_chip.direction_input = palmas_gpio_input;
palmas_gpio->gpio_chip.direction_output = palmas_gpio_output;
palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq;
- palmas_gpio->gpio_chip.set_rv = palmas_gpio_set;
+ palmas_gpio->gpio_chip.set = palmas_gpio_set;
palmas_gpio->gpio_chip.get = palmas_gpio_get;
palmas_gpio->gpio_chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 69906a9af7e6..b46927f55038 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -789,10 +789,10 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->direction_input = pca953x_gpio_direction_input;
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
- gc->set_rv = pca953x_gpio_set_value;
+ gc->set = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
gc->get_multiple = pca953x_gpio_get_multiple;
- gc->set_multiple_rv = pca953x_gpio_set_multiple;
+ gc->set_multiple = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index a33246f20fd8..c5a1287079a0 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -126,7 +126,7 @@ static int pca9570_probe(struct i2c_client *client)
gpio->chip.owner = THIS_MODULE;
gpio->chip.get_direction = pca9570_get_direction;
gpio->chip.get = pca9570_get;
- gpio->chip.set_rv = pca9570_set;
+ gpio->chip.set = pca9570_set;
gpio->chip.base = -1;
gpio->chip_data = device_get_match_data(&client->dev);
gpio->chip.ngpio = gpio->chip_data->ngpio;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a04203680333..3b9de8c3d924 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -295,8 +295,8 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.owner = THIS_MODULE;
gpio->chip.get = pcf857x_get;
gpio->chip.get_multiple = pcf857x_get_multiple;
- gpio->chip.set_rv = pcf857x_set;
- gpio->chip.set_multiple_rv = pcf857x_set_multiple;
+ gpio->chip.set = pcf857x_set;
+ gpio->chip.set_multiple = pcf857x_set_multiple;
gpio->chip.direction_input = pcf857x_input;
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index c6f313342ba0..9925687e05fb 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -219,7 +219,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
gpio->direction_input = pch_gpio_direction_input;
gpio->get = pch_gpio_get;
gpio->direction_output = pch_gpio_direction_output;
- gpio->set_rv = pch_gpio_set;
+ gpio->set = pch_gpio_set;
gpio->base = -1;
gpio->ngpio = gpio_pins[chip->ioh];
gpio->can_sleep = false;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 98cfac4eac85..02e4ffcf5a6f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -330,7 +330,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
pl061->gc.direction_input = pl061_direction_input;
pl061->gc.direction_output = pl061_direction_output;
pl061->gc.get = pl061_get_value;
- pl061->gc.set_rv = pl061_set_value;
+ pl061->gc.set = pl061_set_value;
pl061->gc.ngpio = PL061_GPIO_NR;
pl061->gc.label = dev_name(dev);
pl061->gc.parent = dev;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 13f7da2a9486..fa22f3faa163 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -355,7 +355,7 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio, void __iom
pchip->chip.direction_input = pxa_gpio_direction_input;
pchip->chip.direction_output = pxa_gpio_direction_output;
pchip->chip.get = pxa_gpio_get;
- pchip->chip.set_rv = pxa_gpio_set;
+ pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
pchip->chip.request = gpiochip_generic_request;
@@ -499,8 +499,6 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
writel_relaxed(grer, base + GRER_OFFSET);
writel_relaxed(gfer, base + GFER_OFFSET);
-
- gpiochip_disable_irq(&pchip->chip, gpio);
}
static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
@@ -520,21 +518,17 @@ static void pxa_unmask_muxed_gpio(struct irq_data *d)
unsigned int gpio = irqd_to_hwirq(d);
struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
- gpiochip_enable_irq(&pchip->chip, gpio);
-
c->irq_mask |= GPIO_bit(gpio);
update_edge_detect(c);
}
-static const struct irq_chip pxa_muxed_gpio_chip = {
+static struct irq_chip pxa_muxed_gpio_chip = {
.name = "GPIO",
.irq_ack = pxa_ack_muxed_gpio,
.irq_mask = pxa_mask_muxed_gpio,
.irq_unmask = pxa_unmask_muxed_gpio,
.irq_set_type = pxa_gpio_irq_type,
.irq_set_wake = pxa_gpio_set_wake,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int pxa_gpio_nums(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index b4b607515a04..40413e06b69c 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -232,7 +232,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev)
rpi_gpio->gc.direction_output = rpi_exp_gpio_dir_out;
rpi_gpio->gc.get_direction = rpi_exp_gpio_get_direction;
rpi_gpio->gc.get = rpi_exp_gpio_get;
- rpi_gpio->gc.set_rv = rpi_exp_gpio_set;
+ rpi_gpio->gc.set = rpi_exp_gpio_set;
rpi_gpio->gc.can_sleep = true;
return devm_gpiochip_add_data(dev, &rpi_gpio->gc, rpi_gpio);
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index cf3e91d235df..5a69e4534591 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -118,7 +118,7 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free,
rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input,
rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output,
- rc5t583_gpio->gpio_chip.set_rv = rc5t583_gpio_set,
+ rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set,
rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get,
rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq,
rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index cd31580effa9..86777e097fd8 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -535,8 +535,8 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->get = gpio_rcar_get;
gpio_chip->get_multiple = gpio_rcar_get_multiple;
gpio_chip->direction_output = gpio_rcar_direction_output;
- gpio_chip->set_rv = gpio_rcar_set;
- gpio_chip->set_multiple_rv = gpio_rcar_set_multiple;
+ gpio_chip->set = gpio_rcar_set;
+ gpio_chip->set_multiple = gpio_rcar_set_multiple;
gpio_chip->label = name;
gpio_chip->parent = dev;
gpio_chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index a75ed8021de5..ba62b81aa8ae 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -159,7 +159,7 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
- rdc321x_gpio_dev->chip.set_rv = rdc_gpio_set_value;
+ rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
rdc321x_gpio_dev->chip.base = 0;
rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index d8da99f97385..f2238196faf1 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -46,7 +46,7 @@ static int gpio_reg_direction_output(struct gpio_chip *gc, unsigned offset,
if (r->direction & BIT(offset))
return -ENOTSUPP;
- gc->set_rv(gc, offset, value);
+ gc->set(gc, offset, value);
return 0;
}
@@ -161,9 +161,9 @@ struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
r->gc.get_direction = gpio_reg_get_direction;
r->gc.direction_input = gpio_reg_direction_input;
r->gc.direction_output = gpio_reg_direction_output;
- r->gc.set_rv = gpio_reg_set;
+ r->gc.set = gpio_reg_set;
r->gc.get = gpio_reg_get;
- r->gc.set_multiple_rv = gpio_reg_set_multiple;
+ r->gc.set_multiple = gpio_reg_set_multiple;
if (irqs)
r->gc.to_irq = gpio_reg_to_irq;
r->gc.base = base;
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 87c4225784cf..e8a32dfebdcb 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -260,9 +260,9 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
- chip->set_rv = gpio_regmap_set_with_clear;
+ chip->set = gpio_regmap_set_with_clear;
else if (gpio->reg_set_base)
- chip->set_rv = gpio_regmap_set;
+ chip->set = gpio_regmap_set;
chip->get_direction = gpio_regmap_get_direction;
if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index ecd60ff9e1dd..bcfc323a8315 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -327,7 +327,7 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
static const struct gpio_chip rockchip_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = rockchip_gpio_set,
+ .set = rockchip_gpio_set,
.get = rockchip_gpio_get,
.get_direction = rockchip_gpio_get_direction,
.direction_input = rockchip_gpio_direction_input,
diff --git a/drivers/gpio/gpio-rtd.c b/drivers/gpio/gpio-rtd.c
index 25bbd749b019..d46b40dd5283 100644
--- a/drivers/gpio/gpio-rtd.c
+++ b/drivers/gpio/gpio-rtd.c
@@ -565,7 +565,7 @@ static int rtd_gpio_probe(struct platform_device *pdev)
data->gpio_chip.get_direction = rtd_gpio_get_direction;
data->gpio_chip.direction_input = rtd_gpio_direction_input;
data->gpio_chip.direction_output = rtd_gpio_direction_output;
- data->gpio_chip.set_rv = rtd_gpio_set;
+ data->gpio_chip.set = rtd_gpio_set;
data->gpio_chip.get = rtd_gpio_get;
data->gpio_chip.set_config = rtd_gpio_set_config;
data->gpio_chip.parent = dev;
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index e9d054d78ccb..7f6a62f5d1ee 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -99,7 +99,7 @@ static struct sa1100_gpio_chip sa1100_gpio_chip = {
.get_direction = sa1100_get_direction,
.direction_input = sa1100_direction_input,
.direction_output = sa1100_direction_output,
- .set_rv = sa1100_gpio_set,
+ .set = sa1100_gpio_set,
.get = sa1100_gpio_get,
.to_irq = sa1100_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index c31244cf5e89..5005688f6e67 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -196,7 +196,7 @@ static int sama5d2_piobu_probe(struct platform_device *pdev)
piobu->chip.direction_input = sama5d2_piobu_direction_input;
piobu->chip.direction_output = sama5d2_piobu_direction_output;
piobu->chip.get = sama5d2_piobu_get;
- piobu->chip.set_rv = sama5d2_piobu_set;
+ piobu->chip.set = sama5d2_piobu_set;
piobu->chip.base = -1;
piobu->chip.ngpio = PIOBU_NUM;
piobu->chip.can_sleep = 0;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 833ffdd98d74..966d16a6d515 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -167,7 +167,7 @@ static const struct gpio_chip sch_gpio_chip = {
.direction_input = sch_gpio_direction_in,
.get = sch_gpio_get,
.direction_output = sch_gpio_direction_out,
- .set_rv = sch_gpio_set,
+ .set = sch_gpio_set,
.get_direction = sch_gpio_get_direction,
};
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index 44fb5fc21fb8..f95566998d30 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -297,7 +297,7 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
block->chip.get_direction = sch311x_gpio_get_direction;
block->chip.set_config = sch311x_gpio_set_config;
block->chip.get = sch311x_gpio_get;
- block->chip.set_rv = sch311x_gpio_set;
+ block->chip.set = sch311x_gpio_set;
block->chip.ngpio = 8;
block->chip.parent = &pdev->dev;
block->chip.base = sch311x_gpio_blocks[i].base;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 9503296422fd..050092583f79 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -486,9 +486,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->parent = dev;
gc->fwnode = swnode;
gc->get = gpio_sim_get;
- gc->set_rv = gpio_sim_set;
+ gc->set = gpio_sim_set;
gc->get_multiple = gpio_sim_get_multiple;
- gc->set_multiple_rv = gpio_sim_set_multiple;
+ gc->set_multiple = gpio_sim_set_multiple;
gc->direction_output = gpio_sim_direction_output;
gc->direction_input = gpio_sim_direction_input;
gc->get_direction = gpio_sim_get_direction;
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 95355dda621b..958034b9f3f3 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -237,7 +237,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
gc->parent = dev;
gc->owner = THIS_MODULE;
gc->get = gpio_siox_get;
- gc->set_rv = gpio_siox_set;
+ gc->set = gpio_siox_set;
gc->direction_input = gpio_siox_direction_input;
gc->direction_output = gpio_siox_direction_output;
gc->get_direction = gpio_siox_get_direction;
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 55f0e8afa291..96a0e1211500 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -140,7 +140,7 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.request = spics_request;
spics->chip.free = spics_free;
spics->chip.direction_output = spics_direction_output;
- spics->chip.set_rv = spics_set_value;
+ spics->chip.set = spics_set_value;
spics->chip.label = dev_name(&pdev->dev);
spics->chip.parent = &pdev->dev;
spics->chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index bbd5bf51c088..413bcd0a4240 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -245,7 +245,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
sprd_gpio->chip.request = sprd_gpio_request;
sprd_gpio->chip.free = sprd_gpio_free;
sprd_gpio->chip.get = sprd_gpio_get;
- sprd_gpio->chip.set_rv = sprd_gpio_set;
+ sprd_gpio->chip.set = sprd_gpio_set;
sprd_gpio->chip.direction_input = sprd_gpio_direction_input;
sprd_gpio->chip.direction_output = sprd_gpio_direction_output;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 0a270156e0be..5dd4c21a8e60 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -136,7 +136,7 @@ static const struct gpio_chip template_chip = {
.direction_input = stmpe_gpio_direction_input,
.get = stmpe_gpio_get,
.direction_output = stmpe_gpio_direction_output,
- .set_rv = stmpe_gpio_set,
+ .set = stmpe_gpio_set,
.request = stmpe_gpio_request,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index fdda8de6ca36..493c027afdd6 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -249,7 +249,7 @@ static int xway_stp_probe(struct platform_device *pdev)
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
chip->gc.get = xway_stp_get;
- chip->gc.set_rv = xway_stp_set;
+ chip->gc.set = xway_stp_set;
chip->gc.request = xway_stp_request;
chip->gc.base = -1;
chip->gc.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index f86f78655c24..40064d4cf47f 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -115,7 +115,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
- return chip->set_rv(chip, offset, val);
+ return chip->set(chip, offset, val);
}
static const struct syscon_gpio_data clps711x_mctrl_gpio = {
@@ -251,7 +251,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
if (priv->data->flags & GPIO_SYSCON_FEAT_IN)
priv->chip.direction_input = syscon_gpio_dir_in;
if (priv->data->flags & GPIO_SYSCON_FEAT_OUT) {
- priv->chip.set_rv = priv->data->set ? : syscon_gpio_set;
+ priv->chip.set = priv->data->set ? : syscon_gpio_set;
priv->chip.direction_output = syscon_gpio_dir_out;
}
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index ce17b98e0623..ba5a8ede8912 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -430,7 +430,7 @@ int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio)
gpio->chip.direction_input = tng_gpio_direction_input;
gpio->chip.direction_output = tng_gpio_direction_output;
gpio->chip.get = tng_gpio_get;
- gpio->chip.set_rv = tng_gpio_set;
+ gpio->chip.set = tng_gpio_set;
gpio->chip.get_direction = tng_gpio_get_direction;
gpio->chip.set_config = tng_gpio_set_config;
gpio->chip.base = info->base;
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 0bd32809fd68..90d048f9da08 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -149,7 +149,7 @@ static const struct gpio_chip template_chip = {
.label = "tc3589x",
.owner = THIS_MODULE,
.get = tc3589x_gpio_get,
- .set_rv = tc3589x_gpio_set,
+ .set = tc3589x_gpio_set,
.direction_output = tc3589x_gpio_direction_output,
.direction_input = tc3589x_gpio_direction_input,
.get_direction = tc3589x_gpio_get_direction,
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 126fd12550aa..15a5762a82c2 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -720,7 +720,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tgi->gc.direction_input = tegra_gpio_direction_input;
tgi->gc.get = tegra_gpio_get;
tgi->gc.direction_output = tegra_gpio_direction_output;
- tgi->gc.set_rv = tegra_gpio_set;
+ tgi->gc.set = tegra_gpio_set;
tgi->gc.get_direction = tegra_gpio_get_direction;
tgi->gc.base = 0;
tgi->gc.ngpio = tgi->bank_count * 32;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index f902da15c419..5fd3ec3e2c53 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -891,7 +891,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.direction_input = tegra186_gpio_direction_input;
gpio->gpio.direction_output = tegra186_gpio_direction_output;
gpio->gpio.get = tegra186_gpio_get;
- gpio->gpio.set_rv = tegra186_gpio_set;
+ gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
gpio->gpio.init_valid_mask = tegra186_init_valid_mask;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index eb6a1f0279c0..be96853063ba 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -533,8 +533,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
chip->direction_input = thunderx_gpio_dir_in;
chip->get = thunderx_gpio_get;
chip->direction_output = thunderx_gpio_dir_out;
- chip->set_rv = thunderx_gpio_set;
- chip->set_multiple_rv = thunderx_gpio_set_multiple;
+ chip->set = thunderx_gpio_set;
+ chip->set_multiple = thunderx_gpio_set_multiple;
chip->set_config = thunderx_gpio_set_config;
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index fbb883089189..679e27f00ff6 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -253,7 +253,7 @@ static int timbgpio_probe(struct platform_device *pdev)
gc->direction_input = timbgpio_gpio_direction_input;
gc->get = timbgpio_gpio_get;
gc->direction_output = timbgpio_gpio_direction_output;
- gc->set_rv = timbgpio_gpio_set;
+ gc->set = timbgpio_gpio_set;
gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
gc->dbg_show = NULL;
gc->base = pdata->gpio_base;
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index d5b8568ab061..866ff2d436d5 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -80,8 +80,8 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.get_direction = tpic2810_get_direction,
.direction_output = tpic2810_direction_output,
- .set_rv = tpic2810_set,
- .set_multiple_rv = tpic2810_set_multiple,
+ .set = tpic2810_set,
+ .set_multiple = tpic2810_set_multiple,
.base = -1,
.ngpio = 8,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 08fa061b73ef..84b17b83476f 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -69,7 +69,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65086_gpio_direction_input,
.direction_output = tps65086_gpio_direction_output,
.get = tps65086_gpio_get,
- .set_rv = tps65086_gpio_set,
+ .set = tps65086_gpio_set,
.base = -1,
.ngpio = 4,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 49cd7754ed05..3b4c41f5ef55 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -169,7 +169,7 @@ static const struct gpio_chip template_chip = {
.request = tps65218_gpio_request,
.direction_output = tps65218_gpio_output,
.get = tps65218_gpio_get,
- .set_rv = tps65218_gpio_set,
+ .set = tps65218_gpio_set,
.set_config = tps65218_gpio_set_config,
.can_sleep = true,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index c0177088c54c..158f63bcf10c 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -203,7 +203,7 @@ static const struct gpio_chip tps65214_template_chip = {
.direction_input = tps65219_gpio_direction_input,
.direction_output = tps65219_gpio_direction_output,
.get = tps65219_gpio_get,
- .set_rv = tps65219_gpio_set,
+ .set = tps65219_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
@@ -216,7 +216,7 @@ static const struct gpio_chip tps65219_template_chip = {
.direction_input = tps65219_gpio_direction_input,
.direction_output = tps65219_gpio_direction_output,
.get = tps65219_gpio_get,
- .set_rv = tps65219_gpio_set,
+ .set = tps65219_gpio_set,
.base = -1,
.ngpio = 3,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index f1ced092f38a..aaacbb54bf5d 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -98,7 +98,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
/* FIXME: add handling of GPIOs as dedicated inputs */
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
- tps6586x_gpio->gpio_chip.set_rv = tps6586x_gpio_set;
+ tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 3204f55394cf..25e9f41efe78 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -139,7 +139,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio->gpio_chip.can_sleep = true;
tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
- tps65910_gpio->gpio_chip.set_rv = tps65910_gpio_set;
+ tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
tps65910_gpio->gpio_chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index d586ccfbfc56..7a2c5685c2fd 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -92,7 +92,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65912_gpio_direction_input,
.direction_output = tps65912_gpio_direction_output,
.get = tps65912_gpio_get,
- .set_rv = tps65912_gpio_set,
+ .set = tps65912_gpio_set,
.base = -1,
.ngpio = 5,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index 3b8805c854f7..d4fbdf90e190 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -142,7 +142,7 @@ static int tps68470_gpio_probe(struct platform_device *pdev)
tps68470_gpio->gc.direction_output = tps68470_gpio_output;
tps68470_gpio->gc.get = tps68470_gpio_get;
tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction;
- tps68470_gpio->gc.set_rv = tps68470_gpio_set;
+ tps68470_gpio->gc.set = tps68470_gpio_set;
tps68470_gpio->gc.can_sleep = true;
tps68470_gpio->gc.names = tps68470_names;
tps68470_gpio->gc.ngpio = TPS68470_N_GPIO;
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 056799ecce6a..27dd09273292 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -370,7 +370,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
chip->direction_output = tqmx86_gpio_direction_output;
chip->get_direction = tqmx86_gpio_get_direction;
chip->get = tqmx86_gpio_get;
- chip->set_rv = tqmx86_gpio_set;
+ chip->set = tqmx86_gpio_set;
chip->ngpio = TQMX86_NGPIO;
chip->parent = pdev->dev.parent;
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 35dd2d09b4d4..d9ee8fc77ccd 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -119,7 +119,7 @@ static const struct gpio_chip template_chip = {
.direction_input = ts4900_gpio_direction_input,
.direction_output = ts4900_gpio_direction_output,
.get = ts4900_gpio_get,
- .set_rv = ts4900_gpio_set,
+ .set = ts4900_gpio_set,
.base = -1,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index bb432ed73698..3c7f2efe10fd 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -340,7 +340,7 @@ static int ts5500_dio_probe(struct platform_device *pdev)
priv->gpio_chip.direction_input = ts5500_gpio_input;
priv->gpio_chip.direction_output = ts5500_gpio_output;
priv->gpio_chip.get = ts5500_gpio_get;
- priv->gpio_chip.set_rv = ts5500_gpio_set;
+ priv->gpio_chip.set = ts5500_gpio_set;
priv->gpio_chip.to_irq = ts5500_gpio_to_irq;
priv->gpio_chip.base = -1;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index e39e39e3ef85..a33dc7c7e7a0 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -419,7 +419,7 @@ static const struct gpio_chip template_chip = {
.direction_output = twl_direction_out,
.get_direction = twl_get_direction,
.get = twl_get,
- .set_rv = twl_set,
+ .set = twl_set,
.to_irq = twl_to_irq,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index b2196b62b528..4ec9bcd40439 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -69,7 +69,7 @@ static struct gpio_chip twl6040gpo_chip = {
.get = twl6040gpo_get,
.direction_output = twl6040gpo_direction_out,
.get_direction = twl6040gpo_get_direction,
- .set_rv = twl6040gpo_set,
+ .set = twl6040gpo_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 8939556f42b6..197bb1d22b3c 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -386,8 +386,8 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
chip->direction_input = uniphier_gpio_direction_input;
chip->direction_output = uniphier_gpio_direction_output;
chip->get = uniphier_gpio_get;
- chip->set_rv = uniphier_gpio_set;
- chip->set_multiple_rv = uniphier_gpio_set_multiple;
+ chip->set = uniphier_gpio_set;
+ chip->set_multiple = uniphier_gpio_set_multiple;
chip->to_irq = uniphier_gpio_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index e8e906b54d51..15e495c109d2 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -408,7 +408,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpioa.base = -1;
vb_gpio->gpioa.ngpio = 16;
vb_gpio->gpioa.can_sleep = true;
- vb_gpio->gpioa.set_rv = vprbrd_gpioa_set;
+ vb_gpio->gpioa.set = vprbrd_gpioa_set;
vb_gpio->gpioa.get = vprbrd_gpioa_get;
vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
@@ -424,7 +424,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpiob.base = -1;
vb_gpio->gpiob.ngpio = 16;
vb_gpio->gpiob.can_sleep = true;
- vb_gpio->gpiob.set_rv = vprbrd_gpiob_set;
+ vb_gpio->gpiob.set = vprbrd_gpiob_set;
vb_gpio->gpiob.get = vprbrd_gpiob_get;
vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 07552611da98..17e040991e46 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -567,7 +567,7 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
vgpio->gc.direction_input = virtio_gpio_direction_input;
vgpio->gc.direction_output = virtio_gpio_direction_output;
vgpio->gc.get = virtio_gpio_get;
- vgpio->gc.set_rv = virtio_gpio_set;
+ vgpio->gc.set = virtio_gpio_set;
vgpio->gc.ngpio = ngpio;
vgpio->gc.base = -1; /* Allocate base dynamically */
vgpio->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index a3bceac7854c..84b3a973a503 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -216,7 +216,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_input = vx855gpio_direction_input;
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
- c->set_rv = vx855gpio_set;
+ c->set = vx855gpio_set;
c->set_config = vx855gpio_set_config;
c->dbg_show = NULL;
c->base = 0;
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index c89da9a22016..4af504c23e6f 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -98,7 +98,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->direction_output = wcd_gpio_direction_output;
chip->get_direction = wcd_gpio_get_direction;
chip->get = wcd_gpio_get;
- chip->set_rv = wcd_gpio_set;
+ chip->set = wcd_gpio_set;
chip->parent = dev;
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index f7df3d5fc71c..4a5e20e936a9 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -439,7 +439,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
wg->chip.direction_output = wcove_gpio_dir_out;
wg->chip.get_direction = wcove_gpio_get_direction;
wg->chip.get = wcove_gpio_get;
- wg->chip.set_rv = wcove_gpio_set;
+ wg->chip.set = wcove_gpio_set;
wg->chip.set_config = wcove_gpio_set_config;
wg->chip.base = -1;
wg->chip.ngpio = WCOVE_VGPIO_NUM;
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
index 421655b5d4c2..dcfda738fd69 100644
--- a/drivers/gpio/gpio-winbond.c
+++ b/drivers/gpio/gpio-winbond.c
@@ -494,7 +494,7 @@ static struct gpio_chip winbond_gpio_chip = {
.can_sleep = true,
.get = winbond_gpio_get,
.direction_input = winbond_gpio_direction_in,
- .set_rv = winbond_gpio_set,
+ .set = winbond_gpio_set,
.direction_output = winbond_gpio_direction_out,
};
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index ab58aa7c0b99..f03c0e808fab 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -253,7 +253,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm831x_gpio_direction_in,
.get = wm831x_gpio_get,
.direction_output = wm831x_gpio_direction_out,
- .set_rv = wm831x_gpio_set,
+ .set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
.set_config = wm831x_set_config,
.dbg_show = wm831x_gpio_dbg_show,
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 9a7677f841fc..46923b23a72e 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -93,7 +93,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm8350_gpio_direction_in,
.get = wm8350_gpio_get,
.direction_output = wm8350_gpio_direction_out,
- .set_rv = wm8350_gpio_set,
+ .set = wm8350_gpio_set,
.to_irq = wm8350_gpio_to_irq,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index ccc005628dd2..df47a27f508d 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -256,7 +256,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm8994_gpio_direction_in,
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
- .set_rv = wm8994_gpio_set,
+ .set = wm8994_gpio_set,
.set_config = wm8994_gpio_set_config,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 28f794e5eb26..4f627de3f56c 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -178,7 +178,7 @@ static int xgene_gpio_probe(struct platform_device *pdev)
gpio->chip.direction_input = xgene_gpio_dir_in;
gpio->chip.direction_output = xgene_gpio_dir_out;
gpio->chip.get = xgene_gpio_get;
- gpio->chip.set_rv = xgene_gpio_set;
+ gpio->chip.set = xgene_gpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 36d91cacc2d9..83675ac81077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -604,10 +604,10 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
chip->gc.get = xgpio_get;
- chip->gc.set_rv = xgpio_set;
+ chip->gc.set = xgpio_set;
chip->gc.request = xgpio_request;
chip->gc.free = xgpio_free;
- chip->gc.set_multiple_rv = xgpio_set_multiple;
+ chip->gc.set_multiple = xgpio_set_multiple;
chip->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index bcd2dfec462d..aede6324387f 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -274,7 +274,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
gc->ngpio = 70;
gc->direction_output = xlp_gpio_dir_output;
gc->direction_input = xlp_gpio_dir_input;
- gc->set_rv = xlp_gpio_set;
+ gc->set = xlp_gpio_set;
gc->get = xlp_gpio_get;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 70402c6b5407..faadcb4b0b2d 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -164,7 +164,7 @@ static int xra1403_probe(struct spi_device *spi)
xra->chip.direction_output = xra1403_direction_output;
xra->chip.get_direction = xra1403_get_direction;
xra->chip.get = xra1403_get;
- xra->chip.set_rv = xra1403_set;
+ xra->chip.set = xra1403_set;
xra->chip.dbg_show = xra1403_dbg_show;
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index e7ff3c60324d..4418947a10e5 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -132,7 +132,7 @@ static struct gpio_chip expstate_chip = {
.ngpio = 32,
.get_direction = xtensa_expstate_get_direction,
.get = xtensa_expstate_get_value,
- .set_rv = xtensa_expstate_set_value,
+ .set = xtensa_expstate_set_value,
};
static int xtensa_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index 0799f7976710..29375bea2289 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -161,7 +161,7 @@ static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
static const struct gpio_chip zevio_gpio_chip = {
.direction_input = zevio_gpio_direction_input,
.direction_output = zevio_gpio_direction_output,
- .set_rv = zevio_gpio_set,
+ .set = zevio_gpio_set,
.get = zevio_gpio_get,
.to_irq = zevio_gpio_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index b22b4e25c68d..0ffd76e8951f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -932,7 +932,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
chip->owner = THIS_MODULE;
chip->parent = &pdev->dev;
chip->get = zynq_gpio_get_value;
- chip->set_rv = zynq_gpio_set_value;
+ chip->set = zynq_gpio_set_value;
chip->request = zynq_gpio_request;
chip->free = zynq_gpio_free;
chip->direction_input = zynq_gpio_dir_in;
diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
index 6dc5d7acb89c..5e651482e985 100644
--- a/drivers/gpio/gpio-zynqmp-modepin.c
+++ b/drivers/gpio/gpio-zynqmp-modepin.c
@@ -130,7 +130,7 @@ static int modepin_gpio_probe(struct platform_device *pdev)
chip->owner = THIS_MODULE;
chip->parent = &pdev->dev;
chip->get = modepin_gpio_get_value;
- chip->set_rv = modepin_gpio_set_value;
+ chip->set = modepin_gpio_set_value;
chip->direction_input = modepin_gpio_dir_in;
chip->direction_output = modepin_gpio_dir_out;
chip->label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index c13545dce349..bfb04e67c4bc 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -344,6 +344,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "AMDI0030:00@8",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 5.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4482
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ASCP1A00:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a93d2a9355e2..0d2b470a252e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1037,11 +1037,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
int base = 0;
int ret;
- /* Only allow one set() and one set_multiple(). */
- if ((gc->set && gc->set_rv) ||
- (gc->set_multiple && gc->set_multiple_rv))
- return -EINVAL;
-
/*
* First: allocate and populate the internal stat container, and
* set up the struct device.
@@ -2891,19 +2886,14 @@ static int gpiochip_set(struct gpio_chip *gc, unsigned int offset, int value)
lockdep_assert_held(&gc->gpiodev->srcu);
- if (WARN_ON(unlikely(!gc->set && !gc->set_rv)))
+ if (WARN_ON(unlikely(!gc->set)))
return -EOPNOTSUPP;
- if (gc->set_rv) {
- ret = gc->set_rv(gc, offset, value);
- if (ret > 0)
- ret = -EBADE;
-
- return ret;
- }
+ ret = gc->set(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
- gc->set(gc, offset, value);
- return 0;
+ return ret;
}
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
@@ -2919,7 +2909,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* output-only, but if there is then not even a .set() operation it
* is pretty tricky to drive the output line.
*/
- if (!guard.gc->set && !guard.gc->set_rv && !guard.gc->direction_output) {
+ if (!guard.gc->set && !guard.gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() and direction_output() operations\n",
__func__);
@@ -3665,19 +3655,14 @@ static int gpiochip_set_multiple(struct gpio_chip *gc,
lockdep_assert_held(&gc->gpiodev->srcu);
- if (gc->set_multiple_rv) {
- ret = gc->set_multiple_rv(gc, mask, bits);
+ if (gc->set_multiple) {
+ ret = gc->set_multiple(gc, mask, bits);
if (ret > 0)
ret = -EBADE;
return ret;
}
- if (gc->set_multiple) {
- gc->set_multiple(gc, mask, bits);
- return 0;
- }
-
/* set outputs if the corresponding mask bit is set */
for_each_set_bit(i, mask, gc->ngpio) {
ret = gpiochip_set(gc, i, test_bit(i, bits));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a1737556a77e..ef3af170dda4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -883,6 +883,7 @@ struct amdgpu_mqd_prop {
uint64_t csa_addr;
uint64_t fence_address;
bool tmz_queue;
+ bool kernel_queue;
};
struct amdgpu_mqd {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 15dde1f50328..25252231a68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -459,7 +459,7 @@ calc:
void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
{
- u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
+ u64 pos, wptr_old, rptr;
int rec_cnt_dw = count >> 2;
u32 chunk, ent_sz;
u8 *s = (u8 *)src;
@@ -472,9 +472,11 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
return;
}
+ mutex_lock(&ring->adev->cper.ring_lock);
+
wptr_old = ring->wptr;
+ rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
- mutex_lock(&ring->adev->cper.ring_lock);
while (count) {
ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
chunk = umin(ent_sz, count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a2adaacf6adb..d3f220be2ef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1139,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
+ if (!amdgpu_vm_ready(vm))
+ return -EINVAL;
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6f93473436be..01d234cf8156 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2570,9 +2570,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin)
- return 0;
-
switch (adev->asic_type) {
default:
return 0;
@@ -2594,6 +2591,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
+ if (adev->mman.discovery_bin)
+ return 0;
chip_name = "navi12";
break;
}
@@ -3271,6 +3270,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LEGACY:
case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 81b3443c8d7f..efe0058b48ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -276,7 +276,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
u32 msg;
if (!amdgpu_sriov_vf(adev)) {
- /* It can take up to a second for IFWI init to complete on some dGPUs,
+ /* It can take up to two second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
@@ -284,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
@@ -2555,40 +2555,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_RAVEN:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- /* this is not fatal. We have a fallback below
- * if the new firmwares are not present. some of
- * this will be overridden below to keep things
- * consistent with the current behavior.
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
*/
- r = amdgpu_discovery_reg_base_init(adev);
- if (!r) {
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- }
- break;
- default:
- r = amdgpu_discovery_reg_base_init(adev);
- if (r) {
- drm_err(&adev->ddev, "discovery failed: %d\n", r);
- return r;
- }
-
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- break;
- }
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2611,6 +2582,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2633,6 +2609,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
@@ -2674,6 +2655,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
}
break;
case CHIP_VEGA20:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 8;
@@ -2697,6 +2683,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
@@ -2725,6 +2716,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
@@ -2751,6 +2747,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ff98c87b2e0b..ce27cb5bb05e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -285,6 +285,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
+static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int ret;
+
+ /*
+ * Pin to keep buffer in place while it's vmap'ed. The actual
+ * domain is not that important as long as it's mapable. Using
+ * GTT and VRAM should be compatible with most use cases.
+ */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
+ if (ret)
+ return ret;
+ ret = drm_gem_dmabuf_vmap(dma_buf, map);
+ if (ret)
+ amdgpu_bo_unpin(bo);
+
+ return ret;
+}
+
+static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ drm_gem_dmabuf_vunmap(dma_buf, map);
+ amdgpu_bo_unpin(bo);
+}
+
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
.pin = amdgpu_dma_buf_pin,
@@ -294,8 +324,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
+ .vmap = amdgpu_dma_buf_vmap,
+ .vunmap = amdgpu_dma_buf_vunmap,
};
/**
@@ -514,7 +544,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
return false;
if (drm_gem_is_imported(obj)) {
- struct dma_buf *dma_buf = obj->dma_buf;
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* No XGMI with non AMD GPUs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 6626a6e64ff5..d1ccbfcf21fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -317,7 +317,8 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/
if (!vm->is_compute_context || !vm->process_info)
return 0;
- if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf))
+ if (!drm_gem_is_imported(obj) ||
+ !dma_buf_is_dynamic(obj->import_attach->dmabuf))
return 0;
mutex_lock_nested(&vm->process_info->lock, 1);
if (!WARN_ON(!vm->process_info->eviction_fence)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index e6061d45f142..9b1c55115921 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -365,13 +365,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
- /*
- * The VM structure might be released after the VMID is
- * assigned, we had multiple problems with people trying to use
- * the VM pointer so better set it to NULL.
- */
- if (!fence)
- job->vm = NULL;
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index e56ba93a8df6..a974265837f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -55,7 +55,8 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev) || !adev->asic_funcs->get_pcie_replay_count ||
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
+ !adev->asic_funcs->get_pcie_replay_count ||
(!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 0bd51a04be79..23484317a5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1039,15 +1039,28 @@ int psp_update_fw_reservation(struct psp_context *psp)
{
int ret;
uint64_t reserv_addr, reserv_addr_ext;
- uint32_t reserv_size, reserv_size_ext;
+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
struct amdgpu_device *adev = psp->adev;
+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
+
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&
- (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))
+ switch (mp0_ip_ver) {
+ case IP_VERSION(14, 0, 2):
+ if (adev->psp.sos.fw_version < 0x3b0e0d)
+ return 0;
+ break;
+
+ case IP_VERSION(14, 0, 3):
+ if (adev->psp.sos.fw_version < 0x3a0e14)
+ return 0;
+ break;
+
+ default:
return 0;
+ }
ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a5c3f64cbce6..6379bb25bf5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -719,6 +719,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
prop->eop_gpu_addr = ring->eop_gpu_addr;
prop->use_doorbell = ring->use_doorbell;
prop->doorbell_index = ring->doorbell_index;
+ prop->kernel_queue = true;
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index a0b50a8ac9c4..e96f24e9ad57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -32,6 +32,7 @@
static const struct kicker_device kicker_device_list[] = {
{0x744B, 0x00},
+ {0x7551, 0xC8}
};
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index c3ace8030530..8190c24a649a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -471,6 +471,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
kfree(queue);
+ r = -EINVAL;
goto unlock;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d5c0637d7392..c39bb06ebda1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if VM is not evicting.
+ * True if VM is not evicting and all VM entities are not stopped
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool empty;
bool ret;
amdgpu_vm_eviction_lock(vm);
@@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
- empty = list_empty(&vm->evicted);
+ ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
- return ret && empty;
+ spin_lock(&vm->immediate.lock);
+ ret &= !vm->immediate.stopped;
+ spin_unlock(&vm->immediate.lock);
+
+ spin_lock(&vm->delayed.lock);
+ ret &= !vm->delayed.stopped;
+ spin_unlock(&vm->delayed.lock);
+
+ return ret;
}
/**
@@ -1276,7 +1283,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *obj = &bo->tbo.base;
if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
- struct dma_buf *dma_buf = obj->dma_buf;
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -2414,13 +2421,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
- true, timeout);
+ timeout = drm_sched_entity_flush(&vm->immediate, timeout);
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
+ return drm_sched_entity_flush(&vm->delayed, timeout);
}
static void amdgpu_vm_destroy_task_info(struct kref *kref)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 07c936e90d8e..78f9e86ccc09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
- amdgpu_vram_mgr_do_reserve(man);
-
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
+ amdgpu_vram_mgr_do_reserve(man);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 914cf4bfb033..811124ff88a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -227,6 +227,7 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
@@ -250,12 +251,14 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcp = 4;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
- if (amdgpu_sriov_vf(adev))
+ if (gc_ver == IP_VERSION(9, 5, 0))
*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c01c241a1b06..c85de8c8f6f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1612,9 +1612,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if (!adev->gfx.disable_uq &&
- adev->gfx.me_fw_version >= 2390 &&
- adev->gfx.pfp_fw_version >= 2530 &&
- adev->gfx.mec_fw_version >= 2600 &&
+ adev->gfx.me_fw_version >= 2420 &&
+ adev->gfx.pfp_fw_version >= 2580 &&
+ adev->gfx.mec_fw_version >= 2650 &&
adev->mes.fw_version[0] >= 120) {
adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
@@ -4129,6 +4129,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4281,8 +4283,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 09bf72237d1d..fd44d5503e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -79,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin");
static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
@@ -586,7 +587,7 @@ out:
static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -613,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
if (!amdgpu_sriov_vf(adev)) {
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
- AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_rlc.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -3020,6 +3026,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -3169,8 +3177,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index df898dbb746e..58cd87db8061 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -34,12 +34,13 @@
MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin");
#define TRANSFER_RAM_MASK 0x001c0000
static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;
struct amdgpu_firmware_info *info = NULL;
@@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
@@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev,
static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev)
{
u32 reg_data, size = 0;
- const u32 *data;
+ const u32 *data = NULL;
int r = -EINVAL;
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 134c4ec10887..910337dc28d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -36,40 +36,47 @@
static const char *mmhub_client_ids_v3_0_1[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
- [8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [8][0] = "MPM",
+ [12][0] = "ISPTNR",
+ [14][0] = "ISPCRD0",
+ [15][0] = "ISPCRD1",
+ [16][0] = "ISPCRD2",
+ [22][0] = "HDP",
+ [23][0] = "LSDMA",
+ [24][0] = "JPEG",
+ [27][0] = "VSCH",
+ [28][0] = "VCNU",
+ [29][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
- [8][1] = "MPIO",
- [10][1] = "DBGU0",
- [11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
- [14][1] = "XDP",
- [15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [8][1] = "MPM",
+ [10][1] = "ISPMWR0",
+ [11][1] = "ISPMWR1",
+ [12][1] = "ISPTNR",
+ [13][1] = "ISPSWR",
+ [14][1] = "ISPCWR0",
+ [15][1] = "ISPCWR1",
+ [16][1] = "ISPCWR2",
+ [17][1] = "ISPCWR3",
+ [18][1] = "XDP",
+ [21][1] = "OSSSYS",
+ [22][1] = "HDP",
+ [23][1] = "LSDMA",
+ [24][1] = "JPEG",
+ [27][1] = "VSCH",
+ [28][1] = "VCNU",
+ [29][1] = "VCN",
};
static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index bc3d6c2fc87a..f6fc9778bc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -40,30 +40,129 @@
static const char *mmhub_client_ids_v3_3[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPM",
+ [9][0] = "ISPPDPRD",
+ [10][0] = "ISPCSTATRD",
+ [11][0] = "ISPBYRPRD",
+ [12][0] = "ISPRGBPRD",
+ [13][0] = "ISPMCFPRD",
+ [14][0] = "ISPMCFPRD1",
+ [15][0] = "ISPYUVPRD",
+ [16][0] = "ISPMCSCRD",
+ [17][0] = "ISPGDCRD",
+ [18][0] = "ISPLMERD",
+ [22][0] = "ISPXT1",
+ [23][0] = "ISPIXT1",
[24][0] = "HDP",
[25][0] = "LSDMA",
[26][0] = "JPEG",
[27][0] = "VPE",
+ [28][0] = "VSCH",
[29][0] = "VCNU",
[30][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
+ [5][1] = "ISPCSISWR",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPM",
+ [9][1] = "ISPPDPWR",
+ [10][1] = "ISPCSTATWR",
+ [11][1] = "ISPBYRPWR",
+ [12][1] = "ISPRGBPWR",
+ [13][1] = "ISPMCFPWR",
+ [14][1] = "ISPMWR0",
+ [15][1] = "ISPYUVPWR",
+ [16][1] = "ISPMCSCWR",
+ [17][1] = "ISPGDCWR",
+ [18][1] = "ISPLMEWR",
+ [20][1] = "ISPMWR2",
[21][1] = "OSSSYS",
+ [22][1] = "ISPXT1",
+ [23][1] = "ISPIXT1",
[24][1] = "HDP",
[25][1] = "LSDMA",
[26][1] = "JPEG",
[27][1] = "VPE",
+ [28][1] = "VSCH",
[29][1] = "VCNU",
[30][1] = "VCN",
};
+static const char *mmhub_client_ids_v3_3_1[][2] = {
+ [0][0] = "VMC",
+ [4][0] = "DCEDMC",
+ [6][0] = "MP0",
+ [7][0] = "MP1",
+ [8][0] = "MPM",
+ [24][0] = "HDP",
+ [25][0] = "LSDMA",
+ [26][0] = "JPEG0",
+ [27][0] = "VPE0",
+ [28][0] = "VSCH",
+ [29][0] = "VCNU0",
+ [30][0] = "VCN0",
+ [32+1][0] = "ISPXT",
+ [32+2][0] = "ISPIXT",
+ [32+9][0] = "ISPPDPRD",
+ [32+10][0] = "ISPCSTATRD",
+ [32+11][0] = "ISPBYRPRD",
+ [32+12][0] = "ISPRGBPRD",
+ [32+13][0] = "ISPMCFPRD",
+ [32+14][0] = "ISPMCFPRD1",
+ [32+15][0] = "ISPYUVPRD",
+ [32+16][0] = "ISPMCSCRD",
+ [32+17][0] = "ISPGDCRD",
+ [32+18][0] = "ISPLMERD",
+ [32+22][0] = "ISPXT1",
+ [32+23][0] = "ISPIXT1",
+ [32+26][0] = "JPEG1",
+ [32+27][0] = "VPE1",
+ [32+29][0] = "VCNU1",
+ [32+30][0] = "VCN1",
+ [3][1] = "DCEDWB",
+ [4][1] = "DCEDMC",
+ [6][1] = "MP0",
+ [7][1] = "MP1",
+ [8][1] = "MPM",
+ [21][1] = "OSSSYS",
+ [24][1] = "HDP",
+ [25][1] = "LSDMA",
+ [26][1] = "JPEG0",
+ [27][1] = "VPE0",
+ [28][1] = "VSCH",
+ [29][1] = "VCNU0",
+ [30][1] = "VCN0",
+ [32+1][1] = "ISPXT",
+ [32+2][1] = "ISPIXT",
+ [32+5][1] = "ISPCSISWR",
+ [32+9][1] = "ISPPDPWR",
+ [32+10][1] = "ISPCSTATWR",
+ [32+11][1] = "ISPBYRPWR",
+ [32+12][1] = "ISPRGBPWR",
+ [32+13][1] = "ISPMCFPWR",
+ [32+14][1] = "ISPMWR0",
+ [32+15][1] = "ISPYUVPWR",
+ [32+16][1] = "ISPMCSCWR",
+ [32+17][1] = "ISPGDCWR",
+ [32+18][1] = "ISPLMEWR",
+ [32+19][1] = "ISPMWR1",
+ [32+20][1] = "ISPMWR2",
+ [32+22][1] = "ISPXT1",
+ [32+23][1] = "ISPIXT1",
+ [32+26][1] = "JPEG1",
+ [32+27][1] = "VPE1",
+ [32+29][1] = "VCNU1",
+ [32+30][1] = "VCN1",
+};
+
static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
@@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
- case IP_VERSION(3, 3, 1):
case IP_VERSION(3, 3, 2):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break;
+ case IP_VERSION(3, 3, 1):
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ?
+ mmhub_client_ids_v3_3_1[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
+ break;
default:
mmhub_cid = NULL;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
index f2ab5001b492..951998454b25 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
@@ -37,39 +37,31 @@
static const char *mmhub_client_ids_v4_1_0[][2] = {
[0][0] = "VMC",
[4][0] = "DCEDMC",
- [5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [16][0] = "LSDMA",
+ [17][0] = "JPEG",
+ [19][0] = "VCNU",
+ [22][0] = "VSCH",
+ [23][0] = "HDP",
+ [32+23][0] = "VCNRD",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
- [5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPIO",
[10][1] = "DBGU0",
[11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
+ [12][1] = "DBGUNBIO",
[14][1] = "XDP",
[15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [16][1] = "LSDMA",
+ [17][1] = "JPEG",
+ [18][1] = "VCNWR",
+ [19][1] = "VCNU",
+ [22][1] = "VSCH",
+ [23][1] = "HDP",
};
static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 36ef4a72ad1d..38dfc5c19f2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -34,7 +34,9 @@
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index b8b06d4c5882..326ecc8d37d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1353,7 +1353,7 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
- if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c457be3a3c56..9e74c9822e62 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1218,6 +1218,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_JPEG;
/*TODO: need a new external_rev_id for GC 9.4.4? */
adev->external_rev_id = adev->rev_id + 0x46;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->external_rev_id = adev->rev_id + 0x50;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 2d91027e2a74..6c5c7c1bf5ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2725,7 +2725,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
- *mqd_size = mqd_mgr->mqd_size;
+ *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask);
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index aee2212e52f6..33aa23450b3f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -78,8 +78,8 @@ err_ioctl:
static void kfd_exit(void)
{
kfd_cleanup_processes();
- kfd_debugfs_fini();
kfd_process_destroy_wq();
+ kfd_debugfs_fini();
kfd_procfs_shutdown();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 97933d2a3803..f2dee320fada 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac
{
struct v9_mqd *m = get_mqd(mqd);
- *ctl_stack_size = m->cp_hqd_cntl_stack_size;
+ *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
@@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi
memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
}
+static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
+ void *mqd,
+ void *mqd_dst,
+ void *ctl_stack_dst)
+{
+ struct v9_mqd *m;
+ int xcc;
+ uint64_t size = get_mqd(mqd)->cp_mqd_stride_size;
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ m = get_mqd(mqd + size * xcc);
+
+ checkpoint_mqd(mm, m,
+ (uint8_t *)mqd_dst + sizeof(*m) * xcc,
+ (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc);
+ }
+}
+
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
@@ -764,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
const void *mqd_src,
const void *ctl_stack_src, u32 ctl_stack_size)
{
- restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size);
- if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) {
- struct v9_mqd *m;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ u32 mqd_ctl_stack_size;
+ struct v9_mqd *m;
+ u32 num_xcc;
+ int xcc;
- m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
- m->cp_hqd_pq_doorbell_control |= 1 <<
- CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
+ uint64_t offset = mm->mqd_stride(mm, qp);
+
+ mm->dev->dqm->current_logical_xcc_start++;
+
+ num_xcc = NUM_XCC(mm->dev->xcc_mask);
+ mqd_ctl_stack_size = ctl_stack_size / num_xcc;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = mqd_mem_obj->cpu_ptr;
+ if (gart_addr)
+ *gart_addr = mqd_mem_obj->gpu_addr;
+
+ for (xcc = 0; xcc < num_xcc; xcc++) {
+ get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc);
+ restore_mqd(mm, (void **)&m,
+ &xcc_mqd_mem_obj,
+ NULL,
+ qp,
+ (uint8_t *)mqd_src + xcc * sizeof(*m),
+ (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size,
+ mqd_ctl_stack_size);
}
}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
@@ -906,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->free_mqd = kfd_free_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
- mqd->checkpoint_mqd = checkpoint_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
@@ -918,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
- mqd->restore_mqd = restore_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
+ mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3;
+ mqd->restore_mqd = restore_mqd_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->restore_mqd = restore_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
}
break;
case KFD_MQD_TYPE_HIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index c643e0ccec52..7fbb5c274ccc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -914,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
- /* data stored in this order: priv_data, mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
q_data->mqd_size = mqd_size;
q_data->ctl_stack_size = ctl_stack_size;
@@ -963,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p,
}
static void set_queue_properties_from_criu(struct queue_properties *qp,
- struct kfd_criu_queue_priv_data *q_data)
+ struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc)
{
qp->is_interop = false;
qp->queue_percent = q_data->q_percent;
@@ -976,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
- qp->ctl_stack_size = q_data->ctl_stack_size;
+ if (q_data->type == KFD_QUEUE_TYPE_COMPUTE)
+ qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc;
+ else
+ qp->ctl_stack_size = q_data->ctl_stack_size;
+
qp->type = q_data->type;
qp->format = q_data->format;
}
@@ -1036,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
goto exit;
}
- /* data stored in this order: mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
memset(&qp, 0, sizeof(qp));
- set_queue_properties_from_criu(&qp, q_data);
+ set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask));
print_queue_properties(&qp);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 096b23ad4845..a0ca3b2c6bd8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3398,8 +3398,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
r = dm_dmub_hw_init(adev);
- if (r)
+ if (r) {
drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
+ return r;
+ }
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -4754,16 +4756,16 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
-/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */
+/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
static inline u32 scale_input_to_fw(int min, int max, u64 input)
{
- return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min);
+ return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
}
-/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */
+/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
static inline u32 scale_fw_to_input(int min, int max, u64 input)
{
- return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL);
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
}
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
@@ -4983,9 +4985,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
caps = &dm->backlight_caps[aconnector->bl_idx];
if (get_brightness_range(caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
- props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->ac_level, 100);
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100);
else
- props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->dc_level, 100);
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100);
/* min is zero, so max needs to be adjusted */
props.max_brightness = max - min;
drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
@@ -5410,7 +5412,8 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
- drm_atomic_private_obj_fini(&dm->atomic_obj);
+ if (dm->atomic_obj.state)
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
}
/******************************************************************************
@@ -7789,6 +7792,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret;
+ if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
+ return -EINVAL;
+
trace_amdgpu_dm_connector_atomic_check(new_con_state);
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 2551823382f8..45feb404b097 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -299,6 +299,25 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
if (enable) {
+ struct dc *dc = adev->dm.dc;
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
+ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
+ pr->config.replay_supported;
+
+ /*
+ * IPS & self-refresh feature can cause vblank counter resets between
+ * vblank disable and enable.
+ * It may cause system stuck due to waiting for the vblank counter.
+ * Call this function to estimate missed vblanks by using timestamps and
+ * update the vblank counter in DRM.
+ */
+ if (dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
+ sr_supported && vblank->config.disable_immediate)
+ drm_crtc_vblank_restore(crtc);
+
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_crtc_vrr_active(acrtc_state))
rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
@@ -661,6 +680,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
+ struct drm_plane_state *primary_state;
+
+ /* Pull in primary plane for correct VRR handling */
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index c7d13e743e6c..b726bcd18e29 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3988,7 +3988,7 @@ static int capabilities_show(struct seq_file *m, void *unused)
struct hubbub *hubbub = dc->res_pool->hubbub;
- if (hubbub->funcs->get_mall_en)
+ if (hubbub && hubbub->funcs->get_mall_en)
hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
if (dc->cap_funcs.get_subvp_en)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f984cb0cb889..ff7b867ae98b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -119,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
- if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
- return false;
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+ return false;
+ }
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 67f08495b7e6..154fd2c18e88 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
return object_id;
}
- if (tbl->ucNumberOfObjects <= i) {
- dm_error("Can't find connector id %d in connector table of size %d.\n",
- i, tbl->ucNumberOfObjects);
+ if (tbl->ucNumberOfObjects <= i)
return object_id;
- }
id = le16_to_cpu(tbl->asObjects[i].usObjectID);
object_id = object_id_from_bios_object_id(id);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 2bcae0643e61..58e88778da7f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3(
allocation.sPCLKInput.usFbDiv =
cpu_to_le16((uint16_t)bp_params->feedback_divider);
allocation.sPCLKInput.ucFracFbDiv =
- (uint8_t)bp_params->fractional_feedback_divider;
+ (uint8_t)(bp_params->fractional_feedback_divider / 100000);
allocation.sPCLKInput.ucPostDiv =
(uint8_t)bp_params->pixel_clock_post_divider;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 33b9d36619ff..4071851f9e86 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return NULL;
}
dce60_clk_mgr_construct(ctx, clk_mgr);
- dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26feefbb8990..dbd6ef1b60a0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } };
int dentist_get_divider_from_did(int did)
{
@@ -245,6 +245,11 @@ int dce_set_clock(
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */
+ if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 ||
+ clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4)
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0;
+
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
@@ -386,8 +391,6 @@ static void dce_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -400,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index f8409453434c..13cf415e38e5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -120,9 +120,15 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg)
{
+ struct dc *dc = context->clk_mgr->ctx->dc;
int j;
int num_cfgs = 0;
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+ pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+ pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator;
+
for (j = 0; j < context->stream_count; j++) {
int k;
@@ -164,6 +170,23 @@ void dce110_fill_display_configs(
cfg->v_refresh /= stream->timing.h_total;
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
/ stream->timing.v_total;
+
+ /* Find first CRTC index and calculate its line time.
+ * This is necessary for DPM on SI GPUs.
+ */
+ if (cfg->pipe_idx < pp_display_cfg->crtc_index) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index = cfg->pipe_idx;
+ pp_display_cfg->line_time_in_us =
+ timing->h_total * 10000 / timing->pix_clk_100hz;
+ }
+ }
+
+ if (!num_cfgs) {
+ pp_display_cfg->crtc_index = 0;
+ pp_display_cfg->line_time_in_us = 0;
}
pp_display_cfg->display_count = num_cfgs;
@@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /* TODO: dce11.2*/
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
- pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
-
dce110_fill_display_configs(context, pp_display_cfg);
- /* TODO: is this still applicable?*/
- if (pp_display_cfg->display_count == 1) {
- const struct dc_crtc_timing *timing =
- &context->streams[0]->timing;
-
- pp_display_cfg->crtc_index =
- pp_display_cfg->disp_configs[0].pipe_idx;
- pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
- }
-
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
index 0267644717b2..a39641a0ff09 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- int dprefclk_wdivider;
- int dp_ref_clk_khz;
- int target_div;
+ struct dc_context *ctx = clk_mgr_base->ctx;
+ int dp_ref_clk_khz = 0;
- /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
-
- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+ if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev))
+ dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency;
+ else
+ dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c31f7f8e409f..dcc48b5238e5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -217,11 +217,24 @@ static bool create_links(
connectors_num,
num_virtual_links);
- // condition loop on link_count to allow skipping invalid indices
+ /* When getting the number of connectors, the VBIOS reports the number of valid indices,
+ * but it doesn't say which indices are valid, and not every index has an actual connector.
+ * So, if we don't find a connector on an index, that is not an error.
+ *
+ * - There is no guarantee that the first N indices will be valid
+ * - VBIOS may report a higher amount of valid indices than there are actual connectors
+ * - Some VBIOS have valid configurations for more connectors than there actually are
+ * on the card. This may be because the manufacturer used the same VBIOS for different
+ * variants of the same card.
+ */
for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
+ struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
struct link_init_data link_init_params = {0};
struct dc_link *link;
+ if (connector_id.id == CONNECTOR_ID_UNKNOWN)
+ continue;
+
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
link_init_params.ctx = dc->ctx;
@@ -938,17 +951,18 @@ static void dc_destruct(struct dc *dc)
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
- if (dc->ctx->gpio_service)
- dal_gpio_service_destroy(&dc->ctx->gpio_service);
+ if (dc->ctx) {
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
- if (dc->ctx->created_bios)
- dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ kfree(dc->ctx->logger);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
- kfree(dc->ctx->logger);
- dc_perf_trace_destroy(&dc->ctx->perf_trace);
-
- kfree(dc->ctx);
- dc->ctx = NULL;
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+ }
kfree(dc->bw_vbios);
dc->bw_vbios = NULL;
@@ -5443,7 +5457,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
else
ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
- if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
+ if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_01))
clear_update_flags(srf_updates, surface_count, stream);
return ret;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 4e06468a6284..0421b267a0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -377,10 +377,16 @@ static bool setup_engine(
}
/**
+ * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state
+ * @dce_i2c_hw: Pointer to dce_i2c_hw structure
+ *
* If we boot without an HDMI display, the I2C engine does not get initialized
* correctly. One of its symptoms is that SW_USE_I2C does not get cleared after
- * acquire, so that after setting SW_DONE_USING_I2C on release, the engine gets
+ * acquire. After setting SW_DONE_USING_I2C on release, the engine gets
* immediately reacquired by SW, preventing DMUB from using it.
+ *
+ * This function checks the I2C arbitration status and applies a release
+ * workaround if necessary.
*/
static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 4a9d07c31bc5..0c50fe266c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
@@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index e7a318e26d38..fcd3d86ad517 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -4,7 +4,6 @@
#include "dc.h"
#include "dc_dmub_srv.h"
-#include "dc_dp_types.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
#include "dmub_replay.h"
@@ -44,45 +43,21 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
/*
* Enable/Disable Replay.
*/
-static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst,
- struct dc_link *link)
+static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
uint32_t retry_count;
enum replay_state state = REPLAY_STATE_0;
- struct pipe_ctx *pipe_ctx = NULL;
- struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
- uint8_t i;
memset(&cmd, 0, sizeof(cmd));
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
cmd.replay_enable.data.panel_inst = panel_inst;
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
- if (enable) {
+ if (enable)
cmd.replay_enable.data.enable = REPLAY_ENABLE;
- // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay
- if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
- for (i = 0; i < MAX_PIPES; i++) {
- if (res_ctx &&
- res_ctx->pipe_ctx[i].stream &&
- res_ctx->pipe_ctx[i].stream->link &&
- res_ctx->pipe_ctx[i].stream->link == link &&
- res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
- pipe_ctx = &res_ctx->pipe_ctx[i];
- //TODO: refactor for multi edp support
- break;
- }
- }
-
- if (!pipe_ctx)
- return;
-
- cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
- }
- } else
+ else
cmd.replay_enable.data.enable = REPLAY_DISABLE;
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
@@ -174,17 +149,6 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->digbe_inst = replay_context->digbe_inst;
copy_settings_data->digfe_inst = replay_context->digfe_inst;
- if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
- if (pipe_ctx->stream_res.hpo_dp_stream_enc)
- copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- else
- copy_settings_data->hpo_stream_enc_inst = 0;
- if (pipe_ctx->link_res.hpo_dp_link_enc)
- copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
- else
- copy_settings_data->hpo_link_enc_inst = 0;
- }
-
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -247,7 +211,6 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
- pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index ccbe385e132c..e6346c0ffc0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -19,7 +19,7 @@ struct dmub_replay_funcs {
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
uint8_t panel_inst);
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
- uint8_t panel_inst, struct dc_link *link);
+ uint8_t panel_inst);
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index a454d16e6586..1f53a9f0c0ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -152,7 +152,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
}
/* Forward Declerations */
-static unsigned int get_min_slice_count_for_odm(
+static unsigned int get_min_dsc_slice_count_for_odm(
const struct display_stream_compressor *dsc,
const struct dsc_enc_caps *dsc_enc_caps,
const struct dc_crtc_timing *timing);
@@ -466,7 +466,7 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
- unsigned int min_slice_count;
+ unsigned int min_dsc_slice_count;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config = {0};
@@ -478,14 +478,14 @@ bool dc_dsc_compute_bandwidth_range(
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
- min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, link_encoding, min_slice_count, &config);
+ &options, link_encoding, min_dsc_slice_count, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -593,14 +593,12 @@ static void build_dsc_enc_caps(
struct dc *dc;
- memset(&single_dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
-
if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps)
return;
dc = dsc->ctx->dc;
- if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool)
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc)
return;
/* get max DSCCLK from clk_mgr */
@@ -634,7 +632,7 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
return (value + 9) / 10;
}
-static unsigned int get_min_slice_count_for_odm(
+static unsigned int get_min_dsc_slice_count_for_odm(
const struct display_stream_compressor *dsc,
const struct dsc_enc_caps *dsc_enc_caps,
const struct dc_crtc_timing *timing)
@@ -651,6 +649,10 @@ static unsigned int get_min_slice_count_for_odm(
}
}
+ /* validate parameters */
+ if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0)
+ return 1;
+
/* consider minimum odm slices required due to
* 1) display pipe throughput (dispclk)
* 2) max image width per slice
@@ -669,13 +671,12 @@ static void get_dsc_enc_caps(
{
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
- if (!dsc)
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc)
return;
/* check if reported cap global or only for a single DCN DSC enc */
if (dsc->funcs->dsc_get_enc_caps) {
- if (!dsc->ctx->dc->debug.disable_dsc)
- dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
} else {
build_dsc_enc_caps(dsc, dsc_enc_caps);
}
@@ -1295,10 +1296,10 @@ bool dc_dsc_compute_config(
{
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
- unsigned int min_slice_count;
+ unsigned int min_dsc_slice_count;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
- min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
@@ -1306,7 +1307,7 @@ bool dc_dsc_compute_config(
timing,
options,
link_encoding,
- min_slice_count,
+ min_dsc_slice_count,
dsc_cfg);
return is_dsc_possible;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index e7927b8f5ba3..98ec9b5a559c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -944,7 +944,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
// TODO: Handle mux change case if force_static is set
// If force_static is set, just change the replay_allow_active state directly
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link);
+ replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
link->replay_settings.replay_allow_active = *allow_active;
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 58b59d52dc9d..53b60044653f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -373,7 +373,7 @@ static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
.num_stream_encoder = 6,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 6,
};
@@ -389,7 +389,7 @@ static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
.num_stream_encoder = 2,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 2,
};
@@ -973,21 +973,24 @@ static bool dce60_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
@@ -1365,21 +1368,24 @@ static bool dce64_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index de708fdc1e80..663c49cce4aa 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -926,6 +926,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.seamless_boot_odm_combine = true,
.enable_legacy_fast_update = true,
.using_dml2 = false,
+ .disable_dsc_power_gate = true,
};
static const struct dc_panel_config panel_config_defaults = {
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c587b3441e07..6a69a788abe8 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -4048,14 +4048,6 @@ struct dmub_cmd_replay_copy_settings_data {
*/
uint8_t digbe_inst;
/**
- * @hpo_stream_enc_inst: HPO stream encoder instance
- */
- uint8_t hpo_stream_enc_inst;
- /**
- * @hpo_link_enc_inst: HPO link encoder instance
- */
- uint8_t hpo_link_enc_inst;
- /**
* AUX HW instance.
*/
uint8_t aux_inst;
@@ -4159,18 +4151,6 @@ struct dmub_rb_cmd_replay_enable_data {
* This does not support HDMI/DP2 for now.
*/
uint8_t phy_rate;
- /**
- * @hpo_stream_enc_inst: HPO stream encoder instance
- */
- uint8_t hpo_stream_enc_inst;
- /**
- * @hpo_link_enc_inst: HPO link encoder instance
- */
- uint8_t hpo_link_enc_inst;
- /**
- * @pad: Align structure to 4 byte boundary.
- */
- uint8_t pad[2];
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index e58e7b93810b..6b7db8ec9a53 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_FAILURE;
}
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
mutex_lock(&psp->hdcp_context.mutex);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 4b64851fdb42..5fbfe7333b54 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3458,14 +3458,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
- if (((adev->family == AMDGPU_FAMILY_SI) ||
- ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
- (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
- return 0;
+ if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
+ if (adev->family == AMDGPU_FAMILY_SI ||
+ ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
+ (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
+ (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
+ return 0;
+ }
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 756afe78a6e5..b47cb4a5f488 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -77,6 +77,9 @@ static void smu_power_profile_mode_get(struct smu_context *smu,
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
+static int smu_od_edit_dpm_table(void *handle,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -2195,6 +2198,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
int ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
@@ -2226,6 +2230,18 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
adev->pm.dpm_enabled = true;
+ if (smu->current_power_limit) {
+ ret = smu_set_power_limit(smu, smu->current_power_limit);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 76c1adda83db..f9b0938c57ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
+MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin");
#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
int smu_v14_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 3aea32baea3d..f32474af90b3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1697,9 +1697,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *min_power_limit)
{
struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_14_0_2_powerplay_table *powerplay_table =
+ table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
- uint32_t power_limit;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
@@ -1712,11 +1714,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit)
- *max_power_limit = msg_limit;
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt;
+ od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
+ } else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
+ }
+ }
+
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
+
+ if (max_power_limit) {
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
- if (min_power_limit)
- *min_power_limit = 0;
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 + od_percent_lower);
+ *min_power_limit /= 100;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index ed35e567d117..efe534977d12 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1474,8 +1474,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
&analogix_dp_bridge_funcs);
- if (!dp)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(dp))
+ return ERR_CAST(dp);
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index b63304d3a80f..b3e4cdff61d6 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -18,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
@@ -65,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent)
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
return ret;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index e3a8c0c0c945..464390372b34 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1836,7 +1836,7 @@ static int ti_sn_gpio_probe(struct auxiliary_device *adev,
pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input;
pdata->gchip.direction_output = ti_sn_bridge_gpio_direction_output;
pdata->gchip.get = ti_sn_bridge_gpio_get;
- pdata->gchip.set_rv = ti_sn_bridge_gpio_set;
+ pdata->gchip.set = ti_sn_bridge_gpio_set;
pdata->gchip.can_sleep = true;
pdata->gchip.names = ti_sn_bridge_gpio_names;
pdata->gchip.ngpio = SN_NUM_GPIOS;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index dd45d9b504d8..4bde00083047 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -1227,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
/**
* drm_bridge_detect - check if anything is attached to the bridge output
* @bridge: bridge control structure
+ * @connector: attached connector
*
* If the bridge supports output detection, as reported by the
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index bbc7fecb6f4a..db9b089ef62c 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -40,7 +40,7 @@
* mapping's backing &drm_gem_object buffers.
*
* &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
- * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
+ * all existing GPU VA mappings using this &drm_gem_object as backing buffer.
*
* GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
* keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
@@ -72,7 +72,7 @@
* but it can also be a 'dummy' object, which can be allocated with
* drm_gpuvm_resv_object_alloc().
*
- * In order to connect a struct drm_gpuva its backing &drm_gem_object each
+ * In order to connect a struct drm_gpuva to its backing &drm_gem_object each
* &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
* &drm_gpuvm_bo contains a list of &drm_gpuva structures.
*
@@ -81,7 +81,7 @@
* This is ensured by the API through drm_gpuvm_bo_obtain() and
* drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
* &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
- * particular combination. If not existent a new instance is created and linked
+ * particular combination. If not present, a new instance is created and linked
* to the &drm_gem_object.
*
* &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
@@ -108,7 +108,7 @@
* sequence of operations to satisfy a given map or unmap request.
*
* Therefore the DRM GPU VA manager provides an algorithm implementing splitting
- * and merging of existent GPU VA mappings with the ones that are requested to
+ * and merging of existing GPU VA mappings with the ones that are requested to
* be mapped or unmapped. This feature is required by the Vulkan API to
* implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
* as VM BIND.
@@ -119,7 +119,7 @@
* execute in order to integrate the new mapping cleanly into the current state
* of the GPU VA space.
*
- * Depending on how the new GPU VA mapping intersects with the existent mappings
+ * Depending on how the new GPU VA mapping intersects with the existing mappings
* of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
* of unmap operations, a maximum of two remap operations and a single map
* operation. The caller might receive no callback at all if no operation is
@@ -139,16 +139,16 @@
* one unmap operation and one or two map operations, such that drivers can
* derive the page table update delta accordingly.
*
- * Note that there can't be more than two existent mappings to split up, one at
+ * Note that there can't be more than two existing mappings to split up, one at
* the beginning and one at the end of the new mapping, hence there is a
* maximum of two remap operations.
*
* Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
* call back into the driver in order to unmap a range of GPU VA space. The
- * logic behind this function is way simpler though: For all existent mappings
+ * logic behind this function is way simpler though: For all existing mappings
* enclosed by the given range unmap operations are created. For mappings which
- * are only partically located within the given range, remap operations are
- * created such that those mappings are split up and re-mapped partically.
+ * are only partially located within the given range, remap operations are
+ * created such that those mappings are split up and re-mapped partially.
*
* As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
* drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
@@ -168,7 +168,7 @@
* provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
* drm_gpuva_unmap() instead.
*
- * The following diagram depicts the basic relationships of existent GPU VA
+ * The following diagram depicts the basic relationships of existing GPU VA
* mappings, a newly requested mapping and the resulting mappings as implemented
* by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
*
@@ -218,7 +218,7 @@
*
*
* 4) Existent mapping is a left aligned subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -236,9 +236,9 @@
* and/or non-contiguous BO offset.
*
*
- * 5) Requested mapping's range is a left aligned subset of the existent one,
+ * 5) Requested mapping's range is a left aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
- * the existent one adjusting its BO offset.
+ * the existing one adjusting its BO offset.
*
* ::
*
@@ -271,9 +271,9 @@
* new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
*
*
- * 7) Requested mapping's range is a right aligned subset of the existent one,
+ * 7) Requested mapping's range is a right aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
- * the existent one, without adjusting the BO offset.
+ * the existing one, without adjusting the BO offset.
*
* ::
*
@@ -304,7 +304,7 @@
*
* 9) Existent mapping is overlapped at the end by the requested mapping backed
* by a different BO. Hence, map the requested mapping and split up the
- * existent one, without adjusting the BO offset.
+ * existing one, without adjusting the BO offset.
*
* ::
*
@@ -334,9 +334,9 @@
* new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
*
*
- * 11) Requested mapping's range is a centered subset of the existent one
+ * 11) Requested mapping's range is a centered subset of the existing one
* having a different backing BO. Hence, map the requested mapping and split
- * up the existent one in two mappings, adjusting the BO offset of the right
+ * up the existing one in two mappings, adjusting the BO offset of the right
* one accordingly.
*
* ::
@@ -351,7 +351,7 @@
* new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
*
*
- * 12) Requested mapping is a contiguous subset of the existent one. Split it
+ * 12) Requested mapping is a contiguous subset of the existing one. Split it
* up, but indicate that the backing PTEs could be kept.
*
* ::
@@ -367,7 +367,7 @@
*
*
* 13) Existent mapping is a right aligned subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -386,7 +386,7 @@
*
*
* 14) Existent mapping is a centered subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -406,7 +406,7 @@
*
* 15) Existent mappings is overlapped at the beginning by the requested mapping
* backed by a different BO. Hence, map the requested mapping and split up
- * the existent one, adjusting its BO offset accordingly.
+ * the existing one, adjusting its BO offset accordingly.
*
* ::
*
@@ -469,8 +469,8 @@
* make use of them.
*
* The below code is strictly limited to illustrate the generic usage pattern.
- * To maintain simplicitly, it doesn't make use of any abstractions for common
- * code, different (asyncronous) stages with fence signalling critical paths,
+ * To maintain simplicity, it doesn't make use of any abstractions for common
+ * code, different (asynchronous) stages with fence signalling critical paths,
* any other helpers or error handling in terms of freeing memory and dropping
* previously taken locks.
*
@@ -479,7 +479,7 @@
* // Allocates a new &drm_gpuva.
* struct drm_gpuva * driver_gpuva_alloc(void);
*
- * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
+ * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
@@ -582,7 +582,7 @@
* .sm_step_unmap = driver_gpuva_unmap,
* };
*
- * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
+ * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
@@ -680,7 +680,7 @@
*
* This helper is here to provide lockless list iteration. Lockless as in, the
* iterator releases the lock immediately after picking the first element from
- * the list, so list insertion deletion can happen concurrently.
+ * the list, so list insertion and deletion can happen concurrently.
*
* Elements popped from the original list are kept in a local list, so removal
* and is_empty checks can still happen while we're iterating the list.
@@ -1160,7 +1160,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
+ * drm_gpuvm_prepare_objects() - prepare all associated BOs
* @gpuvm: the &drm_gpuvm
* @exec: the &drm_exec locking context
* @num_fences: the amount of &dma_fences to reserve
@@ -1230,13 +1230,13 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
/**
- * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
+ * drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
*
* Acquires all dma-resv locks of all &drm_gem_objects the given
* &drm_gpuvm contains mappings of.
*
- * Addionally, when calling this function with struct drm_gpuvm_exec::extra
+ * Additionally, when calling this function with struct drm_gpuvm_exec::extra
* being set the driver receives the given @fn callback to lock additional
* dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
* would call drm_exec_prepare_obj() from within this callback.
@@ -1293,7 +1293,7 @@ fn_lock_array(struct drm_gpuvm_exec *vm_exec)
}
/**
- * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
+ * drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
* @objs: additional &drm_gem_objects to lock
* @num_objs: the number of additional &drm_gem_objects to lock
@@ -1588,7 +1588,7 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
/**
- * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
+ * drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
* given &drm_gpuvm and &drm_gem_object
* @gpuvm: The &drm_gpuvm the @obj is mapped in.
* @obj: The &drm_gem_object being mapped in the @gpuvm.
@@ -1624,7 +1624,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
/**
- * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
+ * drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
* for the given &drm_gpuvm and &drm_gem_object
* @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
*
@@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
* @vm_bo: the &drm_gpuvm_bo to add or remove
* @evict: indicates whether the object is evicted
*
- * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
+ * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list.
*/
void
drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
@@ -1790,7 +1790,7 @@ __drm_gpuva_remove(struct drm_gpuva *va)
* drm_gpuva_remove() - remove a &drm_gpuva
* @va: the &drm_gpuva to remove
*
- * This removes the given &va from the underlaying tree.
+ * This removes the given &va from the underlying tree.
*
* It is safe to use this function using the safe versions of iterating the GPU
* VA space, such as drm_gpuvm_for_each_va_safe() and
@@ -2358,7 +2358,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the operations to
- * unmap and, if required, split existent mappings.
+ * unmap and, if required, split existing mappings.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
@@ -2430,7 +2430,9 @@ static const struct drm_gpuvm_ops lock_ops = {
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
* will be newly mapped.
*
- * The expected usage is:
+ * The expected usage is::
+ *
+ * .. code-block:: c
*
* vm_bind {
* struct drm_exec exec;
@@ -2473,7 +2475,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* required without the earlier DRIVER_OP_MAP. This is safe because we've
* already locked the GEM object in the earlier DRIVER_OP_MAP step.
*
- * Returns: 0 on success or a negative error codec
+ * Returns: 0 on success or a negative error code
*/
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
@@ -2617,12 +2619,12 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
* @req_offset: the offset within the &drm_gem_object
*
* This function creates a list of operations to perform splitting and merging
- * of existent mapping(s) with the newly requested one.
+ * of existing mapping(s) with the newly requested one.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain map, unmap and remap operations, but it
* also can be empty if no operation is required, e.g. if the requested mapping
- * already exists is the exact same way.
+ * already exists in the exact same way.
*
* There can be an arbitrary amount of unmap operations, a maximum of two remap
* operations and a single map operation. The latter one represents the original
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index 18492daae4b3..50c286c5cee8 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -381,6 +381,26 @@ struct DecFifo {
len: usize,
}
+// On arm32 architecture, dividing an `u64` by a constant will generate a call
+// to `__aeabi_uldivmod` which is not present in the kernel.
+// So use the multiply by inverse method for this architecture.
+fn div10(val: u64) -> u64 {
+ if cfg!(target_arch = "arm") {
+ let val_h = val >> 32;
+ let val_l = val & 0xFFFFFFFF;
+ let b_h: u64 = 0x66666666;
+ let b_l: u64 = 0x66666667;
+
+ let tmp1 = val_h * b_l + ((val_l * b_l) >> 32);
+ let tmp2 = val_l * b_h + (tmp1 & 0xffffffff);
+ let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32);
+
+ tmp3 >> 2
+ } else {
+ val / 10
+ }
+}
+
impl DecFifo {
fn push(&mut self, data: u64, len: usize) {
let mut chunk = data;
@@ -389,7 +409,7 @@ impl DecFifo {
}
for i in 0..len {
self.decimals[i] = (chunk % 10) as u8;
- chunk /= 10;
+ chunk = div10(chunk);
}
self.len += len;
}
@@ -404,7 +424,7 @@ impl DecFifo {
let mut out = 0;
let mut exp = 1;
for i in 0..poplen {
- out += self.decimals[self.len + i] as u16 * exp;
+ out += u16::from(self.decimals[self.len + i]) * exp;
exp *= 10;
}
Some((out, NUM_CHARS_BITS[poplen]))
@@ -425,7 +445,7 @@ impl Iterator for SegmentIterator<'_> {
match self.segment {
Segment::Binary(data) => {
if self.offset < data.len() {
- let byte = data[self.offset] as u16;
+ let byte = u16::from(data[self.offset]);
self.offset += 1;
Some((byte, 8))
} else {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
index 74f7832ea53e..0726cb5b736e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -325,6 +325,17 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
return hibmc_dp_link_reduce_rate(dp);
}
+static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp)
+{
+ dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
+ if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate)
+ dp->link.cap.link_rate = DP_LINK_BW_8_1;
+
+ dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+ if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX)
+ dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX;
+}
+
int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
{
struct hibmc_dp_link *link = &dp->link;
@@ -334,8 +345,7 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
if (ret)
drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
- dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
- dp->link.cap.lanes = 0x2;
+ hibmc_dp_update_caps(dp);
ret = hibmc_dp_get_serdes_rate_cfg(dp);
if (ret < 0)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 768b97f9e74a..289304500ab0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -32,7 +32,7 @@
DEFINE_DRM_GEM_FOPS(hibmc_fops);
-static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
+static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" };
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
@@ -115,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = {
static int hibmc_kms_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
+ struct drm_encoder *encoder;
+ u32 clone_mask = 0;
int ret;
ret = drmm_mode_config_init(dev);
@@ -154,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
+ drm_for_each_encoder(encoder, dev)
+ clone_mask |= drm_encoder_mask(encoder);
+
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_clones = clone_mask;
+
return 0;
}
@@ -277,7 +285,6 @@ static void hibmc_unload(struct drm_device *dev)
static int hibmc_msi_init(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- char name[32] = {0};
int valid_irq_num;
int irq;
int ret;
@@ -292,9 +299,6 @@ static int hibmc_msi_init(struct drm_device *dev)
valid_irq_num = ret;
for (int i = 0; i < valid_irq_num; i++) {
- snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
- dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
-
irq = pci_irq_vector(pdev, i);
if (i)
@@ -302,10 +306,10 @@ static int hibmc_msi_init(struct drm_device *dev)
ret = devm_request_threaded_irq(&pdev->dev, irq,
hibmc_dp_interrupt,
hibmc_dp_hpd_isr,
- IRQF_SHARED, name, dev);
+ IRQF_SHARED, g_irqs_names_map[i], dev);
else
ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
- IRQF_SHARED, name, dev);
+ IRQF_SHARED, g_irqs_names_map[i], dev);
if (ret) {
drm_err(dev, "install irq failed: %d\n", ret);
return ret;
@@ -323,13 +327,13 @@ static int hibmc_load(struct drm_device *dev)
ret = hibmc_hw_init(priv);
if (ret)
- goto err;
+ return ret;
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
- goto err;
+ return ret;
}
ret = hibmc_kms_init(priv);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 274feabe7df0..ca8502e2760c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -69,6 +69,7 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
+void hibmc_ddc_del(struct hibmc_vdac *vdac);
int hibmc_dp_init(struct hibmc_drm_private *priv);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 99b3b77b5445..44860011855e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
return i2c_bit_add_bus(&vdac->adapter);
}
+
+void hibmc_ddc_del(struct hibmc_vdac *vdac)
+{
+ i2c_del_adapter(&vdac->adapter);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index e8a527ede854..841e81f47b68 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
{
struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
- i2c_del_adapter(&vdac->adapter);
+ hibmc_ddc_del(vdac);
drm_connector_cleanup(connector);
}
@@ -110,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
- return ret;
+ goto err;
}
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
@@ -121,7 +121,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
&vdac->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
- return ret;
+ goto err;
}
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
@@ -131,4 +131,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
+
+err:
+ hibmc_ddc_del(vdac);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index ed8e640b96b0..801235a5bc0a 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -3239,14 +3239,22 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
- bool enable = intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder),
- crtc_state);
+ intel_wakeref_t wakeref;
int i;
+ u8 owned_lane_mask;
- if (DISPLAY_VER(display) < 20)
+ if (DISPLAY_VER(display) < 20 ||
+ !intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state))
return;
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+
+ wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0,
+ C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+
for (i = 0; i < 4; i++) {
int tx = i % 2 + 1;
u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
@@ -3256,9 +3264,10 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0),
CONTROL0_MAC_TRANSMIT_LFPS,
- enable ? CONTROL0_MAC_TRANSMIT_LFPS : 0,
- MB_WRITE_COMMITTED);
+ CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED);
}
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
}
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index fb25ec8adae3..68157f177b6a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -1506,10 +1506,14 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
if (!(master_ctl & GEN11_GU_MISC_IRQ))
return 0;
+ intel_display_rpm_assert_block(display);
+
iir = intel_de_read(display, GEN11_GU_MISC_IIR);
if (likely(iir))
intel_de_write(display, GEN11_GU_MISC_IIR, iir);
+ intel_display_rpm_assert_unblock(display);
+
return iir;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 6e26cb4c5724..685ac98bd001 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -552,10 +552,6 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
-
- /* wa_18038517565 Enable DPFC clock gating after FBC disable */
- if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
- fbc_compressor_clkgate_disable_wa(fbc, false);
}
}
@@ -1710,6 +1706,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
__intel_fbc_cleanup_cfb(fbc);
+ /* wa_18038517565 Enable DPFC clock gating after FBC disable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, false);
+
fbc->state.plane = NULL;
fbc->flip_pending = false;
fbc->busy_bits = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index ae9053919211..41988e193a41 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -3275,7 +3275,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
intel_psr_configure_full_frame_update(intel_dp);
@@ -3361,7 +3363,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0)
@@ -3378,11 +3380,13 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* existing SU configuration
*/
intel_psr_configure_full_frame_update(intel_dp);
- }
- intel_psr_force_update(intel_dp);
+ intel_psr_force_update(intel_dp);
+ } else {
+ intel_psr_exit(intel_dp);
+ }
- if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
+ if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) &&
!intel_dp->psr.busy_frontbuffer_bits)
queue_work(display->wq.unordered, &intel_dp->psr.work);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 3bc57579fe53..668ef139391b 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -23,6 +23,7 @@
#include "intel_modeset_lock.h"
#include "intel_tc.h"
+#define DP_PIN_ASSIGNMENT_NONE 0x0
#define DP_PIN_ASSIGNMENT_C 0x3
#define DP_PIN_ASSIGNMENT_D 0x4
#define DP_PIN_ASSIGNMENT_E 0x5
@@ -66,6 +67,7 @@ struct intel_tc_port {
enum tc_port_mode init_mode;
enum phy_fia phy_fia;
u8 phy_fia_idx;
+ u8 max_lane_count;
};
static enum intel_display_power_domain
@@ -307,6 +309,8 @@ static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
switch (pin_assignment) {
+ case DP_PIN_ASSIGNMENT_NONE:
+ return 0;
default:
MISSING_CASE(pin_assignment);
fallthrough;
@@ -365,12 +369,12 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
}
}
-int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
+static int get_max_lane_count(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- struct intel_tc_port *tc = to_tc_port(dig_port);
+ struct intel_display *display = to_intel_display(tc->dig_port);
+ struct intel_digital_port *dig_port = tc->dig_port;
- if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
+ if (tc->mode != TC_PORT_DP_ALT)
return 4;
assert_tc_cold_blocked(tc);
@@ -384,6 +388,25 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
return intel_tc_port_get_max_lane_count(dig_port);
}
+static void read_pin_configuration(struct intel_tc_port *tc)
+{
+ tc->max_lane_count = get_max_lane_count(tc);
+}
+
+int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
+{
+ struct intel_display *display = to_intel_display(dig_port);
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ if (!intel_encoder_is_tc(&dig_port->base))
+ return 4;
+
+ if (DISPLAY_VER(display) < 20)
+ return get_max_lane_count(tc);
+
+ return tc->max_lane_count;
+}
+
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
@@ -596,9 +619,12 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
tc_cold_wref = __tc_cold_block(tc, &domain);
tc->mode = tc_phy_get_current_mode(tc);
- if (tc->mode != TC_PORT_DISCONNECTED)
+ if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc);
+ read_pin_configuration(tc);
+ }
+
__tc_cold_unblock(tc, domain, tc_cold_wref);
}
@@ -656,8 +682,11 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
tc->lock_wakeref = tc_cold_block(tc);
- if (tc->mode == TC_PORT_TBT_ALT)
+ if (tc->mode == TC_PORT_TBT_ALT) {
+ read_pin_configuration(tc);
+
return true;
+ }
if ((!tc_phy_is_ready(tc) ||
!icl_tc_phy_take_ownership(tc, true)) &&
@@ -668,6 +697,7 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
goto out_unblock_tc_cold;
}
+ read_pin_configuration(tc);
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_release_phy;
@@ -858,9 +888,12 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
port_wakeref = intel_display_power_get(display, port_power_domain);
tc->mode = tc_phy_get_current_mode(tc);
- if (tc->mode != TC_PORT_DISCONNECTED)
+ if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc);
+ read_pin_configuration(tc);
+ }
+
intel_display_power_put(display, port_power_domain, port_wakeref);
}
@@ -873,6 +906,9 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
if (tc->mode == TC_PORT_TBT_ALT) {
tc->lock_wakeref = tc_cold_block(tc);
+
+ read_pin_configuration(tc);
+
return true;
}
@@ -894,6 +930,8 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
tc->lock_wakeref = tc_cold_block(tc);
+ read_pin_configuration(tc);
+
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_unblock_tc_cold;
@@ -1124,9 +1162,18 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
tc_cold_wref = __tc_cold_block(tc, &domain);
tc->mode = tc_phy_get_current_mode(tc);
- if (tc->mode != TC_PORT_DISCONNECTED)
+ if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc);
+ read_pin_configuration(tc);
+ /*
+ * Set a valid lane count value for a DP-alt sink which got
+ * disconnected. The driver can only disable the output on this PHY.
+ */
+ if (tc->max_lane_count == 0)
+ tc->max_lane_count = 4;
+ }
+
drm_WARN_ON(display->drm,
(tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
!xelpdp_tc_phy_tcss_power_is_enabled(tc));
@@ -1138,14 +1185,19 @@ static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
tc->lock_wakeref = tc_cold_block(tc);
- if (tc->mode == TC_PORT_TBT_ALT)
+ if (tc->mode == TC_PORT_TBT_ALT) {
+ read_pin_configuration(tc);
+
return true;
+ }
if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
goto out_unblock_tccold;
xelpdp_tc_phy_take_ownership(tc, true);
+ read_pin_configuration(tc);
+
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_release_phy;
@@ -1226,14 +1278,19 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc)
tc->phy_ops->get_hw_state(tc);
}
-static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
- bool phy_is_ready, bool phy_is_owned)
+/* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */
+static bool tc_phy_owned_by_display(struct intel_tc_port *tc,
+ bool phy_is_ready, bool phy_is_owned)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
+ if (DISPLAY_VER(display) < 20) {
+ drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
- return phy_is_ready && phy_is_owned;
+ return phy_is_ready && phy_is_owned;
+ } else {
+ return phy_is_owned;
+ }
}
static bool tc_phy_is_connected(struct intel_tc_port *tc,
@@ -1244,7 +1301,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
bool phy_is_owned = tc_phy_is_owned(tc);
bool is_connected;
- if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
+ if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned))
is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
@@ -1352,7 +1409,7 @@ tc_phy_get_current_mode(struct intel_tc_port *tc)
phy_is_ready = tc_phy_is_ready(tc);
phy_is_owned = tc_phy_is_owned(tc);
- if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
+ if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
} else {
drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
@@ -1441,11 +1498,11 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
intel_display_power_flush_work(display);
if (!intel_tc_cold_requires_aux_pw(dig_port)) {
enum intel_display_power_domain aux_domain;
- bool aux_powered;
aux_domain = intel_aux_power_domain(dig_port);
- aux_powered = intel_display_power_is_enabled(display, aux_domain);
- drm_WARN_ON(display->drm, aux_powered);
+ if (intel_display_power_is_enabled(display, aux_domain))
+ drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n",
+ tc->port_name);
}
tc_phy_disconnect(tc);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index b37e400f74e5..5a95f06900b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -634,6 +634,8 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ struct drm_i915_private *i915 = engine->i915;
+
/* Wa_1406697149 (WaDisableBankHangMode:icl) */
wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
@@ -669,6 +671,15 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_1406306137:icl,ehl */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
+
+ if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
+ /*
+ * Disable Repacking for Compression (masked R/W access)
+ * before rendering compressed surfaces for display.
+ */
+ wa_masked_en(wal, CACHE_MODE_0_GEN7,
+ DISABLE_REPACKING_FOR_COMPRESSION);
+ }
}
/*
@@ -2306,15 +2317,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
}
- if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
- /*
- * "Disable Repacking for Compression (masked R/W access)
- * before rendering compressed surfaces for display."
- */
- wa_masked_en(wal, CACHE_MODE_0_GEN7,
- DISABLE_REPACKING_FOR_COMPRESSION);
- }
-
if (GRAPHICS_VER(i915) == 11) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index e8a04e476c57..09a64f224c49 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -220,8 +220,7 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
*/
static int subbuf_start_callback(struct rchan_buf *buf,
void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
+ void *prev_subbuf)
{
/*
* Use no-overwrite mode by default, where relay will stop accepting
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index d5e6bab36414..f8a817689e16 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -387,19 +387,19 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
of_id = of_match_node(mtk_drm_of_ids, node);
if (!of_id)
- continue;
+ goto next_put_node;
pdev = of_find_device_by_node(node);
if (!pdev)
- continue;
+ goto next_put_node;
drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
if (!drm_dev)
- continue;
+ goto next_put_device_pdev_dev;
temp_drm_priv = dev_get_drvdata(drm_dev);
if (!temp_drm_priv)
- continue;
+ goto next_put_device_drm_dev;
if (temp_drm_priv->data->main_len)
all_drm_priv[CRTC_MAIN] = temp_drm_priv;
@@ -411,10 +411,17 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
if (temp_drm_priv->mtk_drm_bound)
cnt++;
- if (cnt == MAX_CRTC) {
- of_node_put(node);
+next_put_device_drm_dev:
+ put_device(drm_dev);
+
+next_put_device_pdev_dev:
+ put_device(&pdev->dev);
+
+next_put_node:
+ of_node_put(node);
+
+ if (cnt == MAX_CRTC)
break;
- }
}
if (drm_priv->data->mmsys_dev_num == cnt) {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d7726091819c..0e2bcd5f67b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1002,6 +1002,12 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
+ /*
+ * set flag to request the DSI host bridge be pre-enabled before device bridge
+ * in the chain, so the DSI host is ready when the device bridge is pre-enabled
+ */
+ dsi->next_bridge->pre_enable_prev_first = true;
+
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 845fd8aa43c3..b766dd5e6c8d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -182,8 +182,8 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
- regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
- VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
+ regmap_update_bits(hdmi->regs, VIDEO_CFG_4,
+ VIDEO_SOURCE_SEL, black ? GEN_RGB : NORMAL_PATH);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
@@ -310,8 +310,8 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
- regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
- GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
+ regmap_update_bits(hdmi->regs, GRL_SHIFT_R2,
+ AUDIO_PACKET_OFF, enable ? 0 : AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index cbc4f37da8ba..02349bd44001 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -292,7 +292,8 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
- mtk_crtc_plane_disable(old_state->crtc, plane);
+ if (old_state && old_state->crtc)
+ mtk_crtc_plane_disable(old_state->crtc, plane);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index faca2a0243ab..d5d1271fce61 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -11,7 +11,7 @@
static const unsigned int *gen7_0_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_2_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_9_0_external_core_regs[] __always_unused;
-static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
+static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused;
#include "adreno_gen7_0_0_snapshot.h"
@@ -174,8 +174,15 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
u32 *data)
{
- u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
- A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ u32 reg;
+
+ if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
+ reg = A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ } else {
+ reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ }
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
@@ -198,11 +205,18 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
readl((ptr) + ((offset) << 2))
/* read a value from the CX debug bus */
-static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
+static int cx_debugbus_read(struct msm_gpu *gpu, void __iomem *cxdbg, u32 block, u32 offset,
u32 *data)
{
- u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
- A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ u32 reg;
+
+ if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
+ reg = A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ } else {
+ reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ }
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
@@ -315,7 +329,8 @@ static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
ptr += debugbus_read(gpu, block->id, i, ptr);
}
-static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+static void a6xx_get_cx_debugbus_block(struct msm_gpu *gpu,
+ void __iomem *cxdbg,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_debugbus_block *block,
struct a6xx_gpu_state_obj *obj)
@@ -330,7 +345,7 @@ static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
obj->handle = block;
for (ptr = obj->data, i = 0; i < block->count; i++)
- ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+ ptr += cx_debugbus_read(gpu, cxdbg, block->id, i, ptr);
}
static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu,
@@ -423,8 +438,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]],
&a6xx_state->debugbus[i + debugbus_blocks_count]);
}
- }
+ a6xx_state->nr_debugbus = total_debugbus_blocks;
+ }
}
static void a6xx_get_debugbus(struct msm_gpu *gpu,
@@ -526,7 +542,8 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
int i;
for (i = 0; i < nr_cx_debugbus_blocks; i++)
- a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_get_cx_debugbus_block(gpu,
+ cxdbg,
a6xx_state,
&cx_debugbus_blocks[i],
&a6xx_state->cx_debugbus[i]);
@@ -759,15 +776,15 @@ static void a7xx_get_cluster(struct msm_gpu *gpu,
size_t datasize;
int i, regcount = 0;
- /* Some clusters need a selector register to be programmed too */
- if (cluster->sel)
- in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
-
in += CRASHDUMP_WRITE(in, REG_A7XX_CP_APERTURE_CNTL_CD,
A7XX_CP_APERTURE_CNTL_CD_PIPE(cluster->pipe_id) |
A7XX_CP_APERTURE_CNTL_CD_CLUSTER(cluster->cluster_id) |
A7XX_CP_APERTURE_CNTL_CD_CONTEXT(cluster->context_id));
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel)
+ in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
+
for (i = 0; cluster->regs[i] != UINT_MAX; i += 2) {
int count = RANGE(cluster->regs, i);
@@ -1796,6 +1813,7 @@ static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj,
print_name(p, " - type: ", a7xx_statetype_names[block->statetype]);
print_name(p, " - pipe: ", a7xx_pipe_names[block->pipeid]);
+ drm_printf(p, " - location: %d\n", block->location);
for (i = 0; i < block->num_sps; i++) {
drm_printf(p, " - sp: %d\n", i);
@@ -1873,6 +1891,7 @@ static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
print_name(p, " - pipe: ", a7xx_pipe_names[dbgahb->pipe_id]);
print_name(p, " - cluster-name: ", a7xx_cluster_names[dbgahb->cluster_id]);
drm_printf(p, " - context: %d\n", dbgahb->context_id);
+ drm_printf(p, " - location: %d\n", dbgahb->location_id);
a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 95d93ac6812a..1c18499b60bb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -419,47 +419,47 @@ static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
- REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
+ REG_A6XX_CP_SQE_STAT_DATA, 0x40, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
- REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
- { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ REG_A7XX_CP_BV_SQE_STAT_DATA, 0x40, NULL },
+ { "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ { "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
- REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
- { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ { "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ REG_A7XX_CP_SQE_AC_STAT_DATA, 0x40, NULL },
+ { "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ { "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ { "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
- "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
- { "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
- REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
- { "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
- REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
+ { "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2200, NULL },
+ { "CP_BV_MEM_POOL_DBG", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
+ REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2200, NULL },
};
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
index cb66ece6606b..04b49d385f9d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
@@ -81,7 +81,7 @@ static const u32 gen7_0_0_debugbus_blocks[] = {
A7XX_DBGBUS_USPTP_7,
};
-static struct gen7_shader_block gen7_0_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_0_0_shader_blocks[] = {
{A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
@@ -668,12 +668,19 @@ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
-static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
+/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */
+static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
0x0b60f, 0x0b621, 0x0b630, 0x0b633,
UINT_MAX, UINT_MAX,
};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8));
+
+/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
+ 0x0b600, 0x0b600,
+ UINT_MAX, UINT_MAX,
+};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
@@ -695,7 +702,7 @@ static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_0_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_0_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -764,7 +771,7 @@ static struct gen7_cluster_registers gen7_0_0_clusters[] = {
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -914,7 +921,7 @@ static const u32 gen7_0_0_dpm_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_dpm_registers), 8));
-static struct gen7_reg_list gen7_0_0_reg_list[] = {
+static const struct gen7_reg_list gen7_0_0_reg_list[] = {
{ gen7_0_0_gpu_registers, NULL },
{ gen7_0_0_cx_misc_registers, NULL },
{ gen7_0_0_dpm_registers, NULL },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
index 6f8ad50f32ce..772652eb61f3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
@@ -95,7 +95,7 @@ static const u32 gen7_2_0_debugbus_blocks[] = {
A7XX_DBGBUS_CCHE_2,
};
-static struct gen7_shader_block gen7_2_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_2_0_shader_blocks[] = {
{A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
@@ -489,7 +489,7 @@ static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_2_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_2_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -558,7 +558,7 @@ static struct gen7_cluster_registers gen7_2_0_clusters[] = {
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -573,6 +573,8 @@ static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
{ A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
@@ -737,7 +739,7 @@ static const u32 gen7_2_0_dpm_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_registers), 8));
-static struct gen7_reg_list gen7_2_0_reg_list[] = {
+static const struct gen7_reg_list gen7_2_0_reg_list[] = {
{ gen7_2_0_gpu_registers, NULL },
{ gen7_2_0_cx_misc_registers, NULL },
{ gen7_2_0_dpm_registers, NULL },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index e02cabb39f19..0956dfca1f05 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -117,7 +117,7 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = {
A7XX_DBGBUS_GBIF_CX,
};
-static struct gen7_shader_block gen7_9_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_9_0_shader_blocks[] = {
{ A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
{ A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
{ A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
@@ -1116,7 +1116,7 @@ static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_9_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_9_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -1185,7 +1185,7 @@ static struct gen7_cluster_registers gen7_9_0_clusters[] = {
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00},
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -1294,34 +1294,34 @@ static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
};
-static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
+static const struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x00040},
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x00200},
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
- { "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
+ { "CP_BV_ROQ_DBG", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
- { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ { "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ { "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
- { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
+ { "CP_RESOURCE_TABLE_DBG", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100},
- { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ { "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
- { "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
+ { "CP_LPAC_ROQ_DBG", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
REG_A7XX_CP_LPAC_ROQ_DBG_DATA, 0x00200},
- { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ { "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x08000},
- { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ { "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x00040},
- { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ { "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x00040},
{ "CP_AQE_ROQ_0", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0,
REG_A7XX_CP_AQE_ROQ_DBG_DATA_0, 0x00100},
@@ -1337,7 +1337,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A7XX_CP_AQE_STAT_DATA_1, 0x00040},
};
-static struct gen7_reg_list gen7_9_0_reg_list[] = {
+static const struct gen7_reg_list gen7_9_0_reg_list[] = {
{ gen7_9_0_gpu_registers, NULL},
{ gen7_9_0_cx_misc_registers, NULL},
{ gen7_9_0_cx_dbgc_registers, NULL},
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d4b545448d74..94912b4708fb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -596,7 +596,7 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (dpu_crtc->event) {
- DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name,
dpu_crtc->event);
trace_dpu_crtc_complete_flip(DRMID(crtc));
drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 05e5f3463e30..258edaa18fc0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -730,6 +730,8 @@ bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_st
return false;
conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return false;
/**
* These checks are duplicated from dpu_encoder_update_topology() since
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 11fb1bc54fa9..54b20faa0b69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -31,14 +31,14 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
u32 base;
if (!ctx) {
- DRM_ERROR("invalid ctx %pK\n", ctx);
+ DRM_ERROR("invalid ctx %p\n", ctx);
return;
}
base = ctx->cap->sblk->pcc.base;
if (!base) {
- DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+ DRM_ERROR("invalid ctx %p pcc base 0x%x\n", ctx, base);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 12dcb32b4724..a306077647c3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1345,7 +1345,7 @@ static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
dpu_kms->mmio = NULL;
return ret;
}
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev,
dpu_kms->pdev,
@@ -1380,7 +1380,7 @@ static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
dpu_kms->mmio = NULL;
return ret;
}
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 01171c535a27..6859e8ef6b05 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1129,7 +1129,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(state, plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *crtc_state = NULL;
int ret;
if (IS_ERR(plane_state))
@@ -1162,7 +1162,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
if (!old_plane_state || !old_plane_state->fb ||
old_plane_state->src_w != plane_state->src_w ||
old_plane_state->src_h != plane_state->src_h ||
- old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->crtc_w != plane_state->crtc_w ||
old_plane_state->crtc_h != plane_state->crtc_h ||
msm_framebuffer_format(old_plane_state->fb) !=
msm_framebuffer_format(plane_state->fb))
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 221f12db5f8b..4ea681130dba 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -5,6 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
#include <dt-bindings/phy/phy.h>
#include "dsi_phy.h"
@@ -511,30 +513,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
-static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
-{
- struct device *dev = &phy->pdev->dev;
- int ret;
-
- ret = pm_runtime_resume_and_get(dev);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(phy->ahb_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
- pm_runtime_put_sync(dev);
- }
-
- return ret;
-}
-
-static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
-{
- clk_disable_unprepare(phy->ahb_clk);
- pm_runtime_put(&phy->pdev->dev);
-}
-
static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
@@ -698,22 +676,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
if (ret)
return ret;
- phy->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(phy->ahb_clk))
- return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
- "Unable to get ahb clk\n");
+ platform_set_drvdata(pdev, phy);
- ret = devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
- /* PLL init will call into clk_register which requires
- * register access, so we need to enable power and ahb clock.
- */
- ret = dsi_phy_enable_resource(phy);
+ ret = devm_pm_clk_create(dev);
if (ret)
return ret;
+ ret = pm_clk_add(dev, "iface");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Unable to get iface clk\n");
+
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
if (ret)
@@ -727,18 +703,19 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to register clk provider\n");
- dsi_phy_disable_resource(phy);
-
- platform_set_drvdata(pdev, phy);
-
return 0;
}
+static const struct dev_pm_ops dsi_phy_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
static struct platform_driver dsi_phy_platform_driver = {
.probe = dsi_phy_driver_probe,
.driver = {
.name = "msm_dsi_phy",
.of_match_table = dsi_phy_dt_match,
+ .pm = &dsi_phy_pm_ops,
},
};
@@ -764,9 +741,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
dev = &phy->pdev->dev;
- ret = dsi_phy_enable_resource(phy);
+ ret = pm_runtime_resume_and_get(dev);
if (ret) {
- DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: resume failed, %d\n",
__func__, ret);
goto res_en_fail;
}
@@ -810,7 +787,7 @@ pll_restor_fail:
phy_en_fail:
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
- dsi_phy_disable_resource(phy);
+ pm_runtime_put(dev);
res_en_fail:
return ret;
}
@@ -823,7 +800,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
phy->cfg->ops.disable(phy);
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
- dsi_phy_disable_resource(phy);
+ pm_runtime_put(&phy->pdev->dev);
}
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c558f8df1684..3cbf08231492 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -104,7 +104,6 @@ struct msm_dsi_phy {
phys_addr_t lane_size;
int id;
- struct clk *ahb_clk;
struct regulator_bulk_data *supplies;
struct msm_dsi_dphy_timing timing;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index bbda865addae..97dc70876442 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -325,25 +325,28 @@ static struct drm_info_list msm_debugfs_list[] = {
static int late_init_minor(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- struct msm_drm_private *priv = dev->dev_private;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
int ret;
if (!minor)
return 0;
+ dev = minor->dev;
+ priv = dev->dev_private;
+
if (!priv->gpu_pdev)
return 0;
ret = msm_rd_debugfs_init(minor);
if (ret) {
- DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
+ DRM_DEV_ERROR(dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
- DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
+ DRM_DEV_ERROR(dev->dev, "could not install perf debugfs\n");
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7ff994d4f91a..e7631f4ef530 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -95,7 +95,6 @@ void msm_gem_vma_get(struct drm_gem_object *obj)
void msm_gem_vma_put(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
- struct drm_exec exec;
if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
return;
@@ -103,9 +102,13 @@ void msm_gem_vma_put(struct drm_gem_object *obj)
if (!priv->kms)
return;
+#ifdef CONFIG_DRM_MSM_KMS
+ struct drm_exec exec;
+
msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
drm_exec_fini(&exec); /* drop locks */
+#endif
}
/*
@@ -663,9 +666,13 @@ int msm_gem_set_iova(struct drm_gem_object *obj,
static bool is_kms_vm(struct drm_gpuvm *vm)
{
+#ifdef CONFIG_DRM_MSM_KMS
struct msm_drm_private *priv = vm->drm->dev_private;
return priv->kms && (priv->kms->vm == vm);
+#else
+ return false;
+#endif
}
/*
@@ -1113,10 +1120,12 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (msm_obj->flags & MSM_BO_NO_SHARE) {
+ if (obj->resv != &obj->_resv) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
+ WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
+
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 88239da1cd72..751c3b4965bc 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -100,7 +100,7 @@ struct msm_gem_vm {
*
* Only used for kernel managed VMs, unused for user managed VMs.
*
- * Protected by @mm_lock.
+ * Protected by vm lock. See msm_gem_lock_vm_and_obj(), for ex.
*/
struct drm_mm mm;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5f8e939a5906..3ab3b27134f9 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -271,32 +271,37 @@ out:
return ret;
}
-/* This is where we make sure all the bo's are reserved and pin'd: */
-static int submit_lock_objects(struct msm_gem_submit *submit)
+static int submit_lock_objects_vmbind(struct msm_gem_submit *submit)
{
- unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES;
struct drm_exec *exec = &submit->exec;
- int ret;
+ int ret = 0;
- if (msm_context_is_vmbind(submit->queue->ctx)) {
- flags |= DRM_EXEC_IGNORE_DUPLICATES;
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
- drm_exec_init(&submit->exec, flags, submit->nr_bos);
+ drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ break;
- drm_exec_until_all_locked (&submit->exec) {
- ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- return ret;
+ ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ break;
+ }
- ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- return ret;
- }
+ return ret;
+}
- return 0;
- }
+/* This is where we make sure all the bo's are reserved and pin'd: */
+static int submit_lock_objects(struct msm_gem_submit *submit)
+{
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ int ret = 0;
+
+ if (msm_context_is_vmbind(submit->queue->ctx))
+ return submit_lock_objects_vmbind(submit);
drm_exec_init(&submit->exec, flags, submit->nr_bos);
@@ -305,17 +310,17 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
drm_gpuvm_resv_obj(submit->vm));
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- return ret;
+ break;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- return ret;
+ break;
}
}
- return 0;
+ return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit)
@@ -514,14 +519,15 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
+ if (error)
+ submit_unpin_objects(submit);
+
if (submit->exec.objects)
drm_exec_fini(&submit->exec);
- if (error) {
- submit_unpin_objects(submit);
- /* job wasn't enqueued to scheduler, so early retirement: */
+ /* if job wasn't enqueued to scheduler, early retirement: */
+ if (error)
msm_submit_retire(submit);
- }
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -769,12 +775,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
sync_file = sync_file_create(submit->user_fence);
- if (!sync_file) {
+ if (!sync_file)
ret = -ENOMEM;
- } else {
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
- }
}
if (ret)
@@ -812,10 +814,14 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0)) {
- put_unused_fd(out_fence_fd);
+ if (ret) {
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
+ } else if (sync_file) {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(submit)) {
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 3cd8562a5109..00d0f3b7ba32 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -319,13 +319,10 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
mutex_lock(&vm->mmu_lock);
/*
- * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
+ * NOTE: if not using pgtable preallocation, we cannot hold
* a lock across map/unmap which is also used in the job_run()
* path, as this can cause deadlock in job_run() vs shrinker/
* reclaim.
- *
- * Revisit this if we can come up with a scheme to pre-alloc pages
- * for the pgtable in map/unmap ops.
*/
ret = vm_map_op(vm, &(struct msm_vm_map_op){
.iova = vma->va.addr,
@@ -454,6 +451,8 @@ msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
struct op_arg {
unsigned flags;
struct msm_vm_bind_job *job;
+ const struct msm_vm_bind_op *op;
+ bool kept;
};
static void
@@ -475,14 +474,18 @@ vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
}
static int
-msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
+msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
{
- struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct op_arg *arg = _arg;
+ struct msm_vm_bind_job *job = arg->job;
struct drm_gem_object *obj = op->map.gem.obj;
struct drm_gpuva *vma;
struct sg_table *sgt;
unsigned prot;
+ if (arg->kept)
+ return 0;
+
vma = vma_from_op(arg, &op->map);
if (WARN_ON(IS_ERR(vma)))
return PTR_ERR(vma);
@@ -602,15 +605,41 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
}
static int
-msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
+msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
{
- struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct op_arg *arg = _arg;
+ struct msm_vm_bind_job *job = arg->job;
struct drm_gpuva *vma = op->unmap.va;
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
vma->va.addr, vma->va.range);
+ /*
+ * Detect in-place remap. Turnip does this to change the vma flags,
+ * in particular MSM_VMA_DUMP. In this case we want to avoid actually
+ * touching the page tables, as that would require synchronization
+ * against SUBMIT jobs running on the GPU.
+ */
+ if (op->unmap.keep &&
+ (arg->op->op == MSM_VM_BIND_OP_MAP) &&
+ (vma->gem.obj == arg->op->obj) &&
+ (vma->gem.offset == arg->op->obj_offset) &&
+ (vma->va.addr == arg->op->iova) &&
+ (vma->va.range == arg->op->range)) {
+ /* We are only expecting a single in-place unmap+map cb pair: */
+ WARN_ON(arg->kept);
+
+ /* Leave the existing VMA in place, but signal that to the map cb: */
+ arg->kept = true;
+
+ /* Only flags are changing, so update that in-place: */
+ unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
+ vma->flags = orig_flags | arg->flags;
+
+ return 0;
+ }
+
if (!msm_vma->mapped)
goto out_close;
@@ -1271,6 +1300,7 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
const struct msm_vm_bind_op *op = &job->ops[i];
struct op_arg arg = {
.job = job,
+ .op = op,
};
switch (op->op) {
@@ -1460,12 +1490,8 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
sync_file = sync_file_create(job->fence);
- if (!sync_file) {
+ if (!sync_file)
ret = -ENOMEM;
- } else {
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
- }
}
if (ret)
@@ -1494,10 +1520,14 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0)) {
- put_unused_fd(out_fence_fd);
+ if (ret) {
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
+ } else if (sync_file) {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(job)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c317b25a8162..26c5ce897cbb 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -465,6 +465,7 @@ static void recover_worker(struct kthread_work *work)
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
+ struct task_struct *task;
int i;
mutex_lock(&gpu->lock);
@@ -482,16 +483,20 @@ static void recover_worker(struct kthread_work *work)
/* Increment the fault counts */
submit->queue->faults++;
- if (submit->vm) {
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
+ gpu->global_faults++;
+ else {
struct msm_gem_vm *vm = to_msm_vm(submit->vm);
vm->faults++;
/*
* If userspace has opted-in to VM_BIND (and therefore userspace
- * management of the VM), faults mark the VM as unusuable. This
+ * management of the VM), faults mark the VM as unusable. This
* matches vulkan expectations (vulkan is the main target for
- * VM_BIND)
+ * VM_BIND).
*/
if (!vm->managed)
msm_gem_vm_unusable(submit->vm);
@@ -553,8 +558,15 @@ static void recover_worker(struct kthread_work *work)
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
- list_for_each_entry(submit, &ring->submits, node)
+ list_for_each_entry(submit, &ring->submits, node) {
+ /*
+ * If the submit uses an unusable vm make sure
+ * we don't actually run it
+ */
+ if (to_msm_vm(submit->vm)->unusable)
+ submit->nr_cmds = 0;
gpu->funcs->submit(gpu, submit);
+ }
spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 55c29f49b788..76cdd5ea06a0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -14,7 +14,9 @@
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
- atomic_t pagetables;
+
+ struct mutex init_lock; /* protects pagetables counter and prr_page */
+ int pagetables;
struct page *prr_page;
struct kmem_cache *pt_cache;
@@ -227,7 +229,8 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
- if (atomic_dec_return(&iommu->pagetables) == 0) {
+ mutex_lock(&iommu->init_lock);
+ if (--iommu->pagetables == 0) {
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
if (adreno_smmu->set_prr_bit) {
@@ -236,6 +239,7 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
iommu->prr_page = NULL;
}
}
+ mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
@@ -568,9 +572,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
* If this is the first pagetable that we've allocated, send it back to
* the arm-smmu driver as a trigger to set up TTBR0
*/
- if (atomic_inc_return(&iommu->pagetables) == 1) {
+ mutex_lock(&iommu->init_lock);
+ if (iommu->pagetables++ == 0) {
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
if (ret) {
+ iommu->pagetables--;
+ mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
return ERR_PTR(ret);
@@ -595,6 +602,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
}
}
+ mutex_unlock(&iommu->init_lock);
/* Needed later for TLB flush */
pagetable->parent = parent;
@@ -730,7 +738,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
- atomic_set(&iommu->pagetables, 0);
+ mutex_init(&iommu->init_lock);
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 6889f1c1e721..56828d218e88 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -275,6 +275,12 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
if (ret)
return ret;
+ ret = msm_disp_snapshot_init(ddev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+ return ret;
+ }
+
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
@@ -327,10 +333,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
goto err_msm_uninit;
}
- ret = msm_disp_snapshot_init(ddev);
- if (ret)
- DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
-
drm_mode_config_reset(ddev);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 1f5fe7811e01..39885b333910 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -423,7 +423,7 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
- dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+ dev_dbg(&pdev->dev, "mapped mdss address space @%p\n", msm_mdss->mmio);
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index d860fd94feae..86fab2750ba7 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -594,10 +594,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0600" name="DBGC_CFG_DBGBUS_SEL_A"/>
<reg32 offset="0x0601" name="DBGC_CFG_DBGBUS_SEL_B"/>
<reg32 offset="0x0602" name="DBGC_CFG_DBGBUS_SEL_C"/>
- <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D">
+ <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A6XX">
<bitfield high="7" low="0" name="PING_INDEX"/>
<bitfield high="15" low="8" name="PING_BLK_SEL"/>
</reg32>
+ <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A7XX-">
+ <bitfield high="7" low="0" name="PING_INDEX"/>
+ <bitfield high="24" low="16" name="PING_BLK_SEL"/>
+ </reg32>
<reg32 offset="0x0604" name="DBGC_CFG_DBGBUS_CNTLT">
<bitfield high="5" low="0" name="TRACEEN"/>
<bitfield high="14" low="12" name="GRANU"/>
@@ -3796,6 +3800,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0030" name="CFG_DBGBUS_TRACE_BUF2"/>
</domain>
+<domain name="A7XX_CX_DBGC" width="32">
+ <!-- Bitfields shifted, but otherwise the same: -->
+ <reg32 offset="0x0000" name="CFG_DBGBUS_SEL_A" variants="A7XX-">
+ <bitfield high="7" low="0" name="PING_INDEX"/>
+ <bitfield high="24" low="16" name="PING_BLK_SEL"/>
+ </reg32>
+</domain>
+
<domain name="A6XX_CX_MISC" width="32" prefix="variant" varset="chip">
<reg32 offset="0x0001" name="SYSTEM_CACHE_CNTL_0"/>
<reg32 offset="0x0002" name="SYSTEM_CACHE_CNTL_1"/>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi.xml b/drivers/gpu/drm/msm/registers/display/dsi.xml
index 501ffc585a9f..c7a7b633d747 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi.xml
@@ -159,28 +159,28 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="RGB_SWAP" low="12" high="14" type="dsi_rgb_swap"/>
</reg32>
<reg32 offset="0x00020" name="ACTIVE_H">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00024" name="ACTIVE_V">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00028" name="TOTAL">
- <bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
- <bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
+ <bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x0002c" name="ACTIVE_HSYNC">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00030" name="ACTIVE_VSYNC_HPOS">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00034" name="ACTIVE_VSYNC_VPOS">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00038" name="CMD_DMA_CTRL">
@@ -209,8 +209,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00058" name="CMD_MDP_STREAM0_TOTAL">
- <bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
- <bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
+ <bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x0005c" name="CMD_MDP_STREAM1_CTRL">
<bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 11d5b923d6e7..e2c55f4b9c5a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -795,6 +795,10 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
struct nouveau_drm *drm = nouveau_drm(plane->dev);
uint8_t i;
+ /* All chipsets can display all formats in linear layout */
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
if (drm->client.device.info.chipset < 0xc0) {
const struct drm_format_info *info = drm_format_info(format);
const uint8_t kind = (modifier >> 12) & 0xff;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e1e542126310..805d0a87aa54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
int
nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb)
@@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct drm_framebuffer *fb;
- const struct drm_format_info *info;
unsigned int height, i;
uint32_t tile_mode;
uint8_t kind;
@@ -295,9 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
kind = nvbo->kind;
}
- info = drm_get_format_info(dev, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
-
for (i = 0; i < info->num_planes; i++) {
height = drm_format_info_plane_height(info,
mode_cmd->height,
@@ -321,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = gem;
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
@@ -344,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
if (!gem)
return ERR_PTR(-ENOENT);
- ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
+ ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb);
if (ret == 0)
return fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index e45f211501f6..470e0910d484 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -8,8 +8,11 @@
#include <drm/drm_framebuffer.h>
+struct drm_format_info;
+
int
nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb);
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index edbbda78bac9..c4949e815eb3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -60,14 +60,14 @@
* virtual address in the GPU's VA space there is no guarantee that the actual
* mappings are created in the GPU's MMU. If the given memory is swapped out
* at the time the bind operation is executed the kernel will stash the mapping
- * details into it's internal alloctor and create the actual MMU mappings once
+ * details into it's internal allocator and create the actual MMU mappings once
* the memory is swapped back in. While this is transparent for userspace, it is
* guaranteed that all the backing memory is swapped back in and all the memory
* mappings, as requested by userspace previously, are actually mapped once the
* DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
*
* A VM_BIND job can be executed either synchronously or asynchronously. If
- * exectued asynchronously, userspace may provide a list of syncobjs this job
+ * executed asynchronously, userspace may provide a list of syncobjs this job
* will wait for and/or a list of syncobj the kernel will signal once the
* VM_BIND job finished execution. If executed synchronously the ioctl will
* block until the bind job is finished. For synchronous jobs the kernel will
@@ -82,7 +82,7 @@
* Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
* an up to date view of the VA space. However, the actual mappings might still
* be pending. Hence, EXEC jobs require to have the particular fences - of
- * the corresponding VM_BIND jobs they depent on - attached to them.
+ * the corresponding VM_BIND jobs they depend on - attached to them.
*/
static int
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 99296f03371a..07c1ebc2a941 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
default:
WARN_ON(1);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
memcpy(args->data, argv, argc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
index b7da3ab44c27..7c43397c19e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
@@ -103,7 +103,7 @@ gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 i
static void
gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
{
- nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag);
while (len >= 4) {
nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
img += 4;
@@ -249,9 +249,11 @@ int
gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
{
struct nvkm_falcon *falcon = fw->falcon;
- int target, ret;
+ int ret;
if (fw->inst) {
+ int target;
+
nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
switch (nvkm_memory_target(fw->inst)) {
@@ -285,15 +287,6 @@ gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
}
if (fw->boot) {
- switch (nvkm_memory_target(&fw->fw.mem.memory)) {
- case NVKM_MEM_TARGET_VRAM: target = 4; break;
- case NVKM_MEM_TARGET_HOST: target = 5; break;
- case NVKM_MEM_TARGET_NCOH: target = 6; break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
fw->boot_addr >> 8, false);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 52412965fac1..5b721bd9d799 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -209,11 +209,12 @@ nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name,
fw->boot_addr = bld->start_tag << 8;
fw->boot_size = bld->code_size;
fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL);
- if (!fw->boot)
- ret = -ENOMEM;
nvkm_firmware_put(bl);
+ if (!fw->boot)
+ return -ENOMEM;
+
/* Patch in interface data. */
return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
index 9d06ff722fea..0dc4782df8c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
@@ -325,7 +325,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
+ kvfree(buf);
return rpc;
}
@@ -334,7 +334,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
+ kvfree(buf);
return rpc;
}
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
index 7e59a34b830d..4fe62cf98a23 100644
--- a/drivers/gpu/drm/nova/file.rs
+++ b/drivers/gpu/drm/nova/file.rs
@@ -39,7 +39,8 @@ impl File {
_ => return Err(EINVAL),
};
- getparam.set_value(value);
+ #[allow(clippy::useless_conversion)]
+ getparam.set_value(value.into());
Ok(0)
}
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
index 902876aa14d1..64fd670e99e1 100644
--- a/drivers/gpu/drm/nova/nova.rs
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -12,7 +12,7 @@ use crate::driver::NovaDriver;
kernel::module_auxiliary_driver! {
type: NovaDriver,
name: "Nova",
- author: "Danilo Krummrich",
+ authors: ["Danilo Krummrich"],
description: "Nova GPU driver",
license: "GPL v2",
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 30c81e2e5d6b..bb3105556f19 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -351,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
}
}
- fb = omap_framebuffer_init(dev, mode_cmd, bos);
+ fb = omap_framebuffer_init(dev, info, mode_cmd, bos);
if (IS_ERR(fb))
goto error;
@@ -365,9 +365,9 @@ error:
}
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- const struct drm_format_info *format = NULL;
struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
unsigned int pitch = mode_cmd->pitches[0];
@@ -377,15 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- format = drm_get_format_info(dev, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
-
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i] == mode_cmd->pixel_format)
break;
}
- if (!format || i == ARRAY_SIZE(formats)) {
+ if (i == ARRAY_SIZE(formats)) {
dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
@@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
}
fb = &omap_fb->base;
- omap_fb->format = format;
+ omap_fb->format = info;
mutex_init(&omap_fb->lock);
/*
@@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
* that the two planes of multiplane formats need the same number of
* bytes per pixel.
*/
- if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
+ if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
ret = -EINVAL;
goto fail;
}
- if (pitch % format->cpp[0]) {
+ if (pitch % info->cpp[0]) {
dev_dbg(dev->dev,
"buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
- pitch, format->cpp[0]);
+ pitch, info->cpp[0]);
ret = -EINVAL;
goto fail;
}
- for (i = 0; i < format->num_planes; i++) {
+ for (i = 0; i < info->num_planes; i++) {
struct plane *plane = &omap_fb->planes[i];
- unsigned int vsub = i == 0 ? 1 : format->vsub;
+ unsigned int vsub = i == 0 ? 1 : info->vsub;
unsigned int size;
size = pitch * mode_cmd->height / vsub;
@@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->dma_addr = 0;
}
- drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index 0873f953cf1d..e6010302a22b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -13,6 +13,7 @@ struct drm_connector;
struct drm_device;
struct drm_file;
struct drm_framebuffer;
+struct drm_format_info;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
struct drm_plane_state;
@@ -23,6 +24,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 7b6396890681..948af7ec1130 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
- fb = omap_framebuffer_init(dev, &mode_cmd, &bo);
+ fb = omap_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index bb73f2a68a12..85d6289a6eda 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -432,7 +432,7 @@ static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
if (!refcount)
return;
- resident_size = bo->base.pages ? bo->base.base.size : 0;
+ resident_size = panfrost_gem_rss(&bo->base.base);
snprintf(creator_info, sizeof(creator_info),
"%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b4bf5dfeea2d..4dc77c398617 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
int
radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
fb->obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
if (ret) {
fb->obj[0] = NULL;
@@ -1341,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
+ ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index e3a481bbee7b..dc81b0c2dbff 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
- const struct drm_format_info *info;
struct radeon_device *rdev = fb_helper->dev->dev_private;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
@@ -67,8 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
int height = mode_cmd->height;
u32 cpp;
- info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@@ -206,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = fb_helper->dev->dev_private;
+ const struct drm_format_info *format_info;
struct drm_mode_fb_cmd2 mode_cmd = { };
struct fb_info *info;
struct drm_gem_object *gobj;
@@ -224,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj);
+ format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format,
+ mode_cmd.modifier[0]);
+ ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
@@ -236,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
ret = -ENOMEM;
goto err_radeon_fbdev_destroy_pinned_object;
}
- ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto err_kfree;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3102f6c2d055..9e34da2cacef 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -40,6 +40,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
+struct drm_format_info;
struct edid;
struct drm_edid;
@@ -890,6 +891,7 @@ extern void
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
int radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *rfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index ab525668939a..faf50d872be3 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -53,6 +53,7 @@ config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_DISPLAY_DP_HELPER
help
This selects support for Rockchip SoC specific extensions
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 186f6452a7d3..b50927a824b4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -2579,12 +2579,13 @@ static int vop2_win_init(struct vop2 *vop2)
}
/*
- * The window registers are only updated when config done is written.
- * Until that they read back the old value. As we read-modify-write
- * these registers mark them as non-volatile. This makes sure we read
- * the new values from the regmap register cache.
+ * The window and video port registers are only updated when config
+ * done is written. Until that they read back the old value. As we
+ * read-modify-write these registers mark them as non-volatile. This
+ * makes sure we read the new values from the regmap register cache.
*/
static const struct regmap_range vop2_nonvolatile_range[] = {
+ regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255),
regmap_reg_range(0x1000, 0x23ff),
};
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 41a285ec889f..8ede07fb7a21 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -526,7 +526,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
if (drm_gem_is_imported(gem)) {
dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
- dma_buf_detach(gem->dma_buf, gem->import_attach);
+ dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
}
}
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 7299fa8971ce..981dada8f3a8 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -1033,13 +1033,14 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
NULL : &result->dst_pitch;
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
- buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index e32e680c7197..71c6ccad4b99 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -130,10 +130,10 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
static void virtio_gpu_shutdown(struct virtio_device *vdev)
{
- /*
- * drm does its own synchronization on shutdown.
- * Do nothing here, opt out of device reset.
- */
+ struct drm_device *dev = vdev->priv;
+
+ /* stop talking to the device */
+ drm_dev_unplug(dev);
}
static void virtio_gpu_config_changed(struct virtio_device *vdev)
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 2bb2bc052120..714d5702dfd7 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -5,6 +5,7 @@ config DRM_XE
depends on KUNIT || !KUNIT
depends on INTEL_VSEC || !INTEL_VSEC
depends on X86_PLATFORM_DEVICES || !(X86 && ACPI)
+ depends on PAGE_SIZE_4KB || COMPILE_TEST || BROKEN
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
diff --git a/drivers/gpu/drm/xe/regs/xe_bars.h b/drivers/gpu/drm/xe/regs/xe_bars.h
index ce05b6ae832f..880140d6ccdc 100644
--- a/drivers/gpu/drm/xe/regs/xe_bars.h
+++ b/drivers/gpu/drm/xe/regs/xe_bars.h
@@ -7,5 +7,6 @@
#define GTTMMADR_BAR 0 /* MMIO + GTT */
#define LMEM_BAR 2 /* VRAM */
+#define VF_LMEM_BAR 9 /* VF VRAM */
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 18f27da47a36..1be2415966df 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -812,7 +812,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
}
if (ttm_bo->type == ttm_bo_type_sg) {
- ret = xe_bo_move_notify(bo, ctx);
+ if (new_mem->mem_type == XE_PL_SYSTEM)
+ ret = xe_bo_move_notify(bo, ctx);
if (!ret)
ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
return ret;
@@ -2438,7 +2439,6 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
.no_wait_gpu = false,
.gfp_retry_mayfail = true,
};
- struct pin_cookie cookie;
int ret;
if (vm) {
@@ -2449,10 +2449,10 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
- cookie = xe_vm_set_validating(vm, allow_res_evict);
+ xe_vm_set_validating(vm, allow_res_evict);
trace_xe_bo_validate(bo);
ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
- xe_vm_clear_validating(vm, allow_res_evict, cookie);
+ xe_vm_clear_validating(vm, allow_res_evict);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index 8ec1ff1e4e80..e9b46a2d0019 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -267,7 +267,8 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
if (!pdev)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENODEV);
+ pci_dev_put(pdev);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 6dc84e4ed281..6ece4defa9df 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -681,6 +681,7 @@ static void sriov_update_device_info(struct xe_device *xe)
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
xe->info.probe_display = 0;
+ xe->info.has_heci_cscfi = 0;
xe->info.has_heci_gscfi = 0;
xe->info.skip_guc_pc = 1;
xe->info.skip_pcode = 1;
@@ -801,10 +802,6 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
- err = xe_devcoredump_init(xe);
- if (err)
- return err;
-
/*
* From here on, if a step fails, make sure a Driver-FLR is triggereed
*/
@@ -869,6 +866,10 @@ int xe_device_probe(struct xe_device *xe)
XE_WA(xe->tiles->media_gt, 15015404425_disable))
XE_DEVICE_WA_DISABLE(xe, 15015404425);
+ err = xe_devcoredump_init(xe);
+ if (err)
+ return err;
+
xe_nvm_init(xe);
err = xe_heci_gsc_init(xe);
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index e5fd0cd537bc..bd9015761aa0 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -160,8 +160,13 @@ static int late_bind_create_files(struct device *dev)
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
&cap, NULL);
- if (ret)
+ if (ret) {
+ if (ret == -ENXIO) {
+ drm_dbg(&xe->drm, "Late binding not supported by firmware\n");
+ ret = 0;
+ }
goto out;
+ }
if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) {
ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 6581cb0f0e59..247e41c1c48d 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -123,11 +123,19 @@ static int parse(FILE *input, FILE *csource, FILE *cheader, char *prefix)
return 0;
}
+/* Avoid GNU vs POSIX basename() discrepancy, just use our own */
+static const char *xbasename(const char *s)
+{
+ const char *p = strrchr(s, '/');
+
+ return p ? p + 1 : s;
+}
+
static int fn_to_prefix(const char *fn, char *prefix, size_t size)
{
size_t len;
- fn = basename(fn);
+ fn = xbasename(fn);
len = strlen(fn);
if (len > size - 1)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 35489fa81825..bdbd15f3afe3 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -16,6 +16,7 @@
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_guc_submit.h"
#include "xe_mmio.h"
#include "xe_pm.h"
@@ -47,9 +48,16 @@ static int pf_alloc_metadata(struct xe_gt *gt)
static void pf_init_workers(struct xe_gt *gt)
{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
}
+static void pf_fini_workers(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ disable_work_sync(&gt->sriov.pf.workers.restart);
+}
+
/**
* xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -79,6 +87,21 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static void pf_fini_action(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ pf_fini_workers(gt);
+}
+
+static int pf_init_late(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt);
+}
+
/**
* xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -95,7 +118,15 @@ int xe_gt_sriov_pf_init(struct xe_gt *gt)
if (err)
return err;
- return xe_gt_sriov_pf_migration_init(gt);
+ err = xe_gt_sriov_pf_migration_init(gt);
+ if (err)
+ return err;
+
+ err = pf_init_late(gt);
+ if (err)
+ return err;
+
+ return 0;
}
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
@@ -230,3 +261,27 @@ void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
pf_queue_restart(gt);
}
+
+static void pf_flush_restart(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ flush_work(&gt->sriov.pf.workers.restart);
+}
+
+/**
+ * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
+{
+ /* don't wait if there is another ongoing reset */
+ if (xe_guc_read_stopped(&gt->uc.guc))
+ return -EBUSY;
+
+ pf_flush_restart(gt);
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index e2b2ff8132dc..e7fde3f9937a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -11,6 +11,7 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
int xe_gt_sriov_pf_init(struct xe_gt *gt);
+int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index bf679b21f485..3ed245e04d0c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -22,6 +22,7 @@
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_pm.h"
+#include "xe_sriov_pf.h"
/*
* /sys/kernel/debug/dri/0/
@@ -205,7 +206,8 @@ static int CONFIG##_set(void *data, u64 val) \
return -EOVERFLOW; \
\
xe_pm_runtime_get(xe); \
- err = xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ err = xe_sriov_pf_wait_ready(xe) ?: \
+ xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
xe_pm_runtime_put(xe); \
\
return err; \
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index 859a3ba91be5..243dad3e2418 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -1817,6 +1817,12 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
str_yes_no(snapshot->kernel_reserved));
for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
+ /*
+ * FIXME: During devcoredump print we should avoid accessing the
+ * driver pointers for gt or engine. Printing should be done only
+ * using the snapshot captured. Here we are accessing the gt
+ * pointer. It should be fixed.
+ */
list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
capture_class, false);
snapshot_print_by_list_order(snapshot, p, type, list);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index b6acccfcd351..3f4e6a46ff16 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -95,12 +95,8 @@ struct g2h_fence {
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
+ memset(g2h_fence, 0, sizeof(*g2h_fence));
g2h_fence->response_buffer = response_buffer;
- g2h_fence->response_data = 0;
- g2h_fence->response_len = 0;
- g2h_fence->fail = false;
- g2h_fence->retry = false;
- g2h_fence->done = false;
g2h_fence->seqno = ~0x0;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 87a6dcb1b4b5..c926f840c87b 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -75,25 +75,18 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
enum xe_hw_engine_id id;
struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs;
struct xe_device *xe = gt_to_xe(gt);
- int err;
group_rcs_ccs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_rcs_ccs)) {
- err = PTR_ERR(group_rcs_ccs);
- goto err_group_rcs_ccs;
- }
+ if (IS_ERR(group_rcs_ccs))
+ return PTR_ERR(group_rcs_ccs);
group_bcs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_bcs)) {
- err = PTR_ERR(group_bcs);
- goto err_group_bcs;
- }
+ if (IS_ERR(group_bcs))
+ return PTR_ERR(group_bcs);
group_vcs_vecs = hw_engine_group_alloc(xe);
- if (IS_ERR(group_vcs_vecs)) {
- err = PTR_ERR(group_vcs_vecs);
- goto err_group_vcs_vecs;
- }
+ if (IS_ERR(group_vcs_vecs))
+ return PTR_ERR(group_vcs_vecs);
for_each_hw_engine(hwe, gt, id) {
switch (hwe->class) {
@@ -116,15 +109,6 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
}
return 0;
-
-err_group_vcs_vecs:
- kfree(group_vcs_vecs);
-err_group_bcs:
- kfree(group_bcs);
-err_group_rcs_ccs:
- kfree(group_rcs_ccs);
-
- return err;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index f08fc4377d25..c17ed1ae8649 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -332,6 +332,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
int ret = 0;
u32 reg_val, max;
struct xe_reg rapl_limit;
+ u64 max_supp_power_limit = 0;
mutex_lock(&hwmon->hwmon_lock);
@@ -356,6 +357,20 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
goto unlock;
}
+ /*
+ * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to
+ * the supported maximum (U12.3 format).
+ * This is to avoid truncation during reg_val calculation below and ensure the valid
+ * power limit is sent for pcode which would clamp it to card-supported value.
+ */
+ max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER;
+ if (value > max_supp_power_limit) {
+ value = max_supp_power_limit;
+ drm_info(&hwmon->xe->drm,
+ "Power limit clamped as selected %s exceeds channel %d limit\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ }
+
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
@@ -739,9 +754,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
{
int ret;
u32 uval;
+ u64 max_crit_power_curr = 0;
mutex_lock(&hwmon->hwmon_lock);
+ /*
+ * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1
+ * max supported value, clamp it to the command's max (U10.6 format).
+ * This is to avoid truncation during uval calculation below and ensure the valid power
+ * limit is sent for pcode which would clamp it to card-supported value.
+ */
+ max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor;
+ if (value > max_crit_power_curr) {
+ value = max_crit_power_curr;
+ drm_info(&hwmon->xe->drm,
+ "Power limit clamped as selected exceeds channel %d limit\n",
+ channel);
+ }
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
ret = xe_hwmon_pcode_write_i1(hwmon, uval);
diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c
index db9c0340be5c..bc7dc2099470 100644
--- a/drivers/gpu/drm/xe/xe_i2c.c
+++ b/drivers/gpu/drm/xe/xe_i2c.c
@@ -96,8 +96,8 @@ static int xe_i2c_register_adapter(struct xe_i2c *i2c)
int ret;
fwnode = fwnode_create_software_node(xe_i2c_adapter_properties, NULL);
- if (!fwnode)
- return -ENOMEM;
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
/*
* Not using platform_device_register_full() here because we don't have
@@ -283,6 +283,9 @@ int xe_i2c_probe(struct xe_device *xe)
if (xe->info.platform != XE_BATTLEMAGE)
return 0;
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
xe_i2c_read_endpoint(xe_root_tile_mmio(xe), &ep);
if (ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
return 0;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index ba1cff2e4cda..84f412fd3c5d 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -408,7 +408,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
/* Special layout, prepared below.. */
vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
- XE_VM_FLAG_SET_TILE_ID(tile));
+ XE_VM_FLAG_SET_TILE_ID(tile), NULL);
if (IS_ERR(vm))
return ERR_CAST(vm);
@@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
!IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
int buf_offset = 0;
+ void *bounce;
+ int err;
+
+ BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
+ bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
+ if (!bounce)
+ return -ENOMEM;
/*
* Less than ideal for large unaligned access but this should be
* fairly rare, can fixup if this becomes common.
*/
do {
- u8 bounce[XE_CACHELINE_BYTES];
- void *ptr = (void *)bounce;
- int err;
int copy_bytes = min_t(int, bytes_left,
XE_CACHELINE_BYTES -
(offset & XE_CACHELINE_MASK));
@@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
err = xe_migrate_access_memory(m, bo,
offset &
~XE_CACHELINE_MASK,
- (void *)ptr,
- sizeof(bounce), 0);
+ bounce,
+ XE_CACHELINE_BYTES, 0);
if (err)
- return err;
+ break;
if (write) {
- memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+ memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
err = xe_migrate_access_memory(m, bo,
offset & ~XE_CACHELINE_MASK,
- (void *)ptr,
- sizeof(bounce), write);
+ bounce,
+ XE_CACHELINE_BYTES, write);
if (err)
- return err;
+ break;
} else {
- memcpy(buf + buf_offset, ptr + ptr_offset,
+ memcpy(buf + buf_offset, bounce + ptr_offset,
copy_bytes);
}
@@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
offset += copy_bytes;
} while (bytes_left);
- return 0;
+ kfree(bounce);
+ return err;
}
dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
@@ -1882,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
else
current_bytes = min_t(int, bytes_left, cursor.size);
- if (fence)
- dma_fence_put(fence);
+ if (current_bytes & ~PAGE_MASK) {
+ int pitch = 4;
+
+ current_bytes = min_t(int, current_bytes, S16_MAX * pitch);
+ }
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
@@ -1892,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
if (IS_ERR(__fence)) {
- if (fence)
+ if (fence) {
dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
fence = __fence;
goto out_err;
}
+
+ dma_fence_put(fence);
fence = __fence;
buf += current_bytes;
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 107ffe87808c..d9391bd08194 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -27,6 +27,8 @@
#define DEFAULT_PROBE_DISPLAY true
#define DEFAULT_VRAM_BAR_SIZE 0
#define DEFAULT_FORCE_PROBE CONFIG_DRM_XE_FORCE_PROBE
+#define DEFAULT_MAX_VFS ~0
+#define DEFAULT_MAX_VFS_STR "unlimited"
#define DEFAULT_WEDGED_MODE 1
#define DEFAULT_SVM_NOTIFIER_SIZE 512
@@ -34,6 +36,9 @@ struct xe_modparam xe_modparam = {
.probe_display = DEFAULT_PROBE_DISPLAY,
.guc_log_level = DEFAULT_GUC_LOG_LEVEL,
.force_probe = DEFAULT_FORCE_PROBE,
+#ifdef CONFIG_PCI_IOV
+ .max_vfs = DEFAULT_MAX_VFS,
+#endif
.wedged_mode = DEFAULT_WEDGED_MODE,
.svm_notifier_size = DEFAULT_SVM_NOTIFIER_SIZE,
/* the rest are 0 by default */
@@ -79,7 +84,8 @@ MODULE_PARM_DESC(force_probe,
module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
MODULE_PARM_DESC(max_vfs,
"Limit number of Virtual Functions (VFs) that could be managed. "
- "(0 = no VFs [default]; N = allow up to N VFs)");
+ "(0=no VFs; N=allow up to N VFs "
+ "[default=" DEFAULT_MAX_VFS_STR "])");
#endif
module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index d991fbd90f20..5729e7d3e335 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1941,7 +1941,7 @@ static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param)
/* If not provided, OA unit defaults to OA unit 0 as per uapi */
if (!param->oa_unit)
- param->oa_unit = &xe_device_get_gt(oa->xe, 0)->oa.oa_unit[0];
+ param->oa_unit = &xe_root_mmio_gt(oa->xe)->oa.oa_unit[0];
/* When we have an exec_q, get hwe from the exec_q */
if (param->exec_q) {
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index 8813efdcafbb..af05db07162e 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -3,6 +3,10 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <linux/bitops.h>
+#include <linux/pci.h>
+
+#include "regs/xe_bars.h"
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
@@ -12,6 +16,7 @@
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
#include "xe_sriov_pf_helpers.h"
#include "xe_sriov_printk.h"
@@ -127,6 +132,18 @@ static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs,
}
}
+static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u32 sizes;
+
+ sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs);
+ if (!sizes)
+ return 0;
+
+ return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -138,6 +155,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_assert(xe, num_vfs <= total_vfs);
xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs));
+ err = xe_sriov_pf_wait_ready(xe);
+ if (err)
+ goto out;
+
/*
* We must hold additional reference to the runtime PM to keep PF in D0
* during VFs lifetime, as our VFs do not implement the PM capability.
@@ -153,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
+ if (IS_DGFX(xe)) {
+ err = resize_vf_vram_bar(xe, num_vfs);
+ if (err)
+ xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err);
+ }
+
err = pci_enable_sriov(pdev, num_vfs);
if (err < 0)
goto failed;
@@ -169,7 +196,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
failed:
pf_unprovision_vfs(xe, num_vfs);
xe_pm_runtime_put(xe);
-
+out:
xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
num_vfs, str_plural(num_vfs), ERR_PTR(err));
return err;
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c
index d92ec0f515b0..ca95f2a4d4ef 100644
--- a/drivers/gpu/drm/xe/xe_pxp_submit.c
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.c
@@ -101,7 +101,7 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
xe_assert(xe, hwe);
/* PXP instructions must be issued from PPGTT */
- vm = xe_vm_create(xe, XE_VM_FLAG_GSC);
+ vm = xe_vm_create(xe, XE_VM_FLAG_GSC, NULL);
if (IS_ERR(vm))
return PTR_ERR(vm);
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
index 1c3c04d52f55..90244fe59b59 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.c
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -54,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea
write_unlock(&shrinker->lock);
}
-static s64 xe_shrinker_walk(struct xe_device *xe,
- struct ttm_operation_ctx *ctx,
- const struct xe_bo_shrink_flags flags,
- unsigned long to_scan, unsigned long *scanned)
+static s64 __xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
{
unsigned int mem_type;
s64 freed = 0, lret;
@@ -93,6 +93,48 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
return freed;
}
+/*
+ * Try shrinking idle objects without writeback first, then if not sufficient,
+ * try also non-idle objects and finally if that's not sufficient either,
+ * add writeback. This avoids stalls and explicit writebacks with light or
+ * moderate memory pressure.
+ */
+static s64 xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
+{
+ bool no_wait_gpu = true;
+ struct xe_bo_shrink_flags save_flags = flags;
+ s64 lret, freed;
+
+ swap(no_wait_gpu, ctx->no_wait_gpu);
+ save_flags.writeback = false;
+ lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
+ swap(no_wait_gpu, ctx->no_wait_gpu);
+ if (lret < 0 || *scanned >= to_scan)
+ return lret;
+
+ freed = lret;
+ if (!ctx->no_wait_gpu) {
+ lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
+ if (lret < 0)
+ return lret;
+ freed += lret;
+ if (*scanned >= to_scan)
+ return freed;
+ }
+
+ if (flags.writeback) {
+ lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned);
+ if (lret < 0)
+ return lret;
+ freed += lret;
+ }
+
+ return freed;
+}
+
static unsigned long
xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
@@ -199,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con
runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
shrink_flags.purge = false;
+
lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
nr_to_scan, &nr_scanned);
if (lret >= 0)
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
index afbdd894bd6e..27ddf3cc80e9 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -9,6 +9,7 @@
#include "xe_assert.h"
#include "xe_device.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_module.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
@@ -103,6 +104,32 @@ int xe_sriov_pf_init_early(struct xe_device *xe)
}
/**
+ * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate.
+ * @xe: the &xe_device to test
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_wait_ready(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err;
+
+ if (xe_device_wedged(xe))
+ return -ECANCELED;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_wait_ready(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information.
* @xe: the &xe_device to print info from
* @p: the &drm_printer
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
index c392c3fcf085..e3b34f8f5e04 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -15,6 +15,7 @@ struct xe_device;
#ifdef CONFIG_PCI_IOV
bool xe_sriov_pf_readiness(struct xe_device *xe);
int xe_sriov_pf_init_early(struct xe_device *xe);
+int xe_sriov_pf_wait_ready(struct xe_device *xe);
void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
#else
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index f87276df18f2..82872a51f098 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -77,6 +77,7 @@ static void user_fence_worker(struct work_struct *w)
{
struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
+ WRITE_ONCE(ufence->signalled, 1);
if (mmget_not_zero(ufence->mm)) {
kthread_use_mm(ufence->mm);
if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
@@ -91,7 +92,6 @@ static void user_fence_worker(struct work_struct *w)
* Wake up waiters only after updating the ufence state, allowing the UMD
* to safely reuse the same ufence without encountering -EBUSY errors.
*/
- WRITE_ONCE(ufence->signalled, 1);
wake_up_all(&ufence->xe->ufence_wq);
user_fence_put(ufence);
}
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 3e0c3af235f2..465bda355443 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -164,7 +164,7 @@ static int vf_uc_load_hw(struct xe_uc *uc)
err = xe_guc_opt_in_features_enable(&uc->guc);
if (err)
- return err;
+ goto err_out;
err = xe_gt_record_default_lrcs(uc_to_gt(uc));
if (err)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 2035604121e6..d60c4b115304 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1610,8 +1610,12 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
- if (IS_ERR(vm->scratch_pt[id][i]))
- return PTR_ERR(vm->scratch_pt[id][i]);
+ if (IS_ERR(vm->scratch_pt[id][i])) {
+ int err = PTR_ERR(vm->scratch_pt[id][i]);
+
+ vm->scratch_pt[id][i] = NULL;
+ return err;
+ }
xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
}
@@ -1640,7 +1644,7 @@ static void xe_vm_free_scratch(struct xe_vm *vm)
}
}
-struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
+struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
{
struct drm_gem_object *vm_resv_obj;
struct xe_vm *vm;
@@ -1661,9 +1665,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->xe = xe;
vm->size = 1ull << xe->info.va_bits;
-
vm->flags = flags;
+ if (xef)
+ vm->xef = xe_file_get(xef);
/**
* GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
* manipulated under the PXP mutex. However, the PXP mutex can be taken
@@ -1794,6 +1799,20 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
+ if (xef && xe->info.has_asid) {
+ u32 asid;
+
+ down_write(&xe->usm.lock);
+ err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
+ XA_LIMIT(1, XE_MAX_ASID - 1),
+ &xe->usm.next_asid, GFP_KERNEL);
+ up_write(&xe->usm.lock);
+ if (err < 0)
+ goto err_unlock_close;
+
+ vm->usm.asid = asid;
+ }
+
trace_xe_vm_create(vm);
return vm;
@@ -1814,6 +1833,8 @@ err_no_resv:
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
+ if (vm->xef)
+ xe_file_put(vm->xef);
kfree(vm);
if (flags & XE_VM_FLAG_LR_MODE)
xe_pm_runtime_put(xe);
@@ -2059,9 +2080,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_vm_create *args = data;
- struct xe_tile *tile;
struct xe_vm *vm;
- u32 id, asid;
+ u32 id;
int err;
u32 flags = 0;
@@ -2097,29 +2117,10 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
flags |= XE_VM_FLAG_FAULT_MODE;
- vm = xe_vm_create(xe, flags);
+ vm = xe_vm_create(xe, flags, xef);
if (IS_ERR(vm))
return PTR_ERR(vm);
- if (xe->info.has_asid) {
- down_write(&xe->usm.lock);
- err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
- XA_LIMIT(1, XE_MAX_ASID - 1),
- &xe->usm.next_asid, GFP_KERNEL);
- up_write(&xe->usm.lock);
- if (err < 0)
- goto err_close_and_put;
-
- vm->usm.asid = asid;
- }
-
- vm->xef = xe_file_get(xef);
-
- /* Record BO memory for VM pagetable created against client */
- for_each_tile(tile, xe, id)
- if (vm->pt_root[id])
- xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
-
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
/* Warning: Security issue - never enable by default */
args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
@@ -3421,6 +3422,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
free_bind_ops:
if (args->num_binds > 1)
kvfree(*bind_ops);
+ *bind_ops = NULL;
return err;
}
@@ -3527,7 +3529,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_exec_queue *q = NULL;
u32 num_syncs, num_ufence = 0;
struct xe_sync_entry *syncs = NULL;
- struct drm_xe_vm_bind_op *bind_ops;
+ struct drm_xe_vm_bind_op *bind_ops = NULL;
struct xe_vma_ops vops;
struct dma_fence *fence;
int err;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 3475a118f666..2ecb417c19a2 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -26,7 +26,7 @@ struct xe_sync_entry;
struct xe_svm_range;
struct drm_exec;
-struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
+struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
@@ -315,22 +315,14 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
* Register this task as currently making bos resident for the vm. Intended
* to avoid eviction by the same task of shared bos bound to the vm.
* Call with the vm's resv lock held.
- *
- * Return: A pin cookie that should be used for xe_vm_clear_validating().
*/
-static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
- bool allow_res_evict)
+static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
{
- struct pin_cookie cookie = {};
-
if (vm && !allow_res_evict) {
xe_vm_assert_held(vm);
- cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
/* Pairs with READ_ONCE in xe_vm_is_validating() */
WRITE_ONCE(vm->validating, current);
}
-
- return cookie;
}
/**
@@ -338,17 +330,14 @@ static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
* @vm: Pointer to the vm or NULL
* @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
* value as for xe_vm_set_validation().
- * @cookie: Cookie obtained from xe_vm_set_validating().
*
* Register this task as currently making bos resident for the vm. Intended
* to avoid eviction by the same task of shared bos bound to the vm.
* Call with the vm's resv lock held.
*/
-static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
- struct pin_cookie cookie)
+static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict)
{
if (vm && !allow_res_evict) {
- lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
/* Pairs with READ_ONCE in xe_vm_is_validating() */
WRITE_ONCE(vm->validating, NULL);
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index bed6088e1bb3..8a07feef503b 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -266,7 +266,7 @@ struct xe_vm {
* up for revalidation. Protected from access with the
* @invalidated_lock. Removing items from the list
* additionally requires @lock in write mode, and adding
- * items to the list requires either the @userptr.notifer_lock in
+ * items to the list requires either the @userptr.notifier_lock in
* write mode, OR @lock in write mode.
*/
struct list_head invalidated;
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index 5749bad9c285..274989ea1fb4 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -19,7 +19,7 @@ kernel::pci_device_table!(
MODULE_PCI_TABLE,
<NovaCore as pci::Driver>::IdInfo,
[(
- pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as _),
+ pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as u32),
()
)]
);
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
index 0fdece652587..2931912ddba0 100644
--- a/drivers/gpu/nova-core/firmware.rs
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -30,11 +30,12 @@ pub(crate) struct Firmware {
impl Firmware {
pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> {
- let mut chip_name = CString::try_from_fmt(fmt!("{}", chipset))?;
+ let mut chip_name = CString::try_from_fmt(fmt!("{chipset}"))?;
chip_name.make_ascii_lowercase();
+ let chip_name = &*chip_name;
let request = |name_| {
- CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
+ CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name_}-{ver}.bin"))
.and_then(|path| firmware::Firmware::request(&path, dev))
};
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
index de14f2e92636..cb2bbb30cba1 100644
--- a/drivers/gpu/nova-core/nova_core.rs
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -18,7 +18,7 @@ pub(crate) const MODULE_NAME: &kernel::str::CStr = <LocalModule as kernel::Modul
kernel::module_pci_driver! {
type: driver::NovaCore,
name: "NovaCore",
- author: "Danilo Krummrich",
+ authors: ["Danilo Krummrich"],
description: "Nova Core GPU driver",
license: "GPL v2",
firmware: [],
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index 5ccfb61f850a..d49fddf6a3c6 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -36,7 +36,7 @@ impl NV_PMC_BOOT_0 {
pub(crate) fn chipset(self) -> Result<Chipset> {
self.architecture()
.map(|arch| {
- ((arch as u32) << Self::IMPLEMENTATION.len()) | self.implementation() as u32
+ ((arch as u32) << Self::IMPLEMENTATION.len()) | u32::from(self.implementation())
})
.and_then(Chipset::try_from)
}
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
index cdf668073480..a3e6de1779d4 100644
--- a/drivers/gpu/nova-core/regs/macros.rs
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -307,7 +307,7 @@ macro_rules! register {
pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self {
const MASK: u32 = $name::[<$field:upper _MASK>];
const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
- let value = ((value as u32) << SHIFT) & MASK;
+ let value = (u32::from(value) << SHIFT) & MASK;
self.0 = (self.0 & !MASK) | value;
self
diff --git a/drivers/gpu/nova-core/util.rs b/drivers/gpu/nova-core/util.rs
index 64fb13760764..76cedf3710d7 100644
--- a/drivers/gpu/nova-core/util.rs
+++ b/drivers/gpu/nova-core/util.rs
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
use kernel::prelude::*;
-use kernel::time::{Delta, Instant};
+use kernel::time::{Delta, Instant, Monotonic};
pub(crate) const fn to_lowercase_bytes<const N: usize>(s: &str) -> [u8; N] {
let src = s.as_bytes();
@@ -33,7 +33,7 @@ pub(crate) const fn const_bytes_to_str(bytes: &[u8]) -> &str {
/// TODO[DLAY]: replace with `read_poll_timeout` once it is available.
/// (https://lore.kernel.org/lkml/20250220070611.214262-8-fujita.tomonori@gmail.com/)
pub(crate) fn wait_on<R, F: Fn() -> Option<R>>(timeout: Delta, cond: F) -> Result<R> {
- let start_time = Instant::now();
+ let start_time = Instant::<Monotonic>::now();
loop {
if let Some(ret) = cond() {
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 18f2c92beff8..68e45a26e85f 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -437,7 +437,7 @@ find_active_client(struct list_head *head)
*/
bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
{
- if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+ if (pci_is_display(pdev)) {
/*
* apple-gmux is needed on pre-retina MacBook Pro
* to probe the panel if pdev is the inactive GPU.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a57901203aeb..79997553d8f9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1243,7 +1243,7 @@ config HID_U2FZERO
U2F Zero supports custom commands for blinking the LED
and getting data from the internal hardware RNG.
- The internal hardware can be used to feed the enthropy pool.
+ The internal hardware can be used to feed the entropy pool.
U2F Zero only supports blinking its LED, so this driver doesn't
allow setting the brightness to anything but 1, which will
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 4b45e31f0bab..d27dcfb2b9e4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1213,7 +1213,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
- if (!drvdata->input) {
+ /*
+ * Check that input registration succeeded. Checking that
+ * HID_CLAIMED_INPUT is set prevents a UAF when all input devices
+ * were freed during registration due to no usages being mapped,
+ * leaving drvdata->input pointing to freed memory.
+ */
+ if (!drvdata->input || !(hdev->claimed & HID_CLAIMED_INPUT)) {
hid_err(hdev, "Asus input not registered\n");
ret = -ENOMEM;
goto err_stop_hw;
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 234fa82eab07..482f62a78c41 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1288,7 +1288,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.label = "cp2112_gpio";
dev->gc.direction_input = cp2112_gpio_direction_input;
dev->gc.direction_output = cp2112_gpio_direction_output;
- dev->gc.set_rv = cp2112_gpio_set;
+ dev->gc.set = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index e86bda0dab9b..7107071c7c51 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3291,6 +3291,8 @@ static const char *keys[KEY_MAX + 1] = {
[BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
[BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
[BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
+ [BTN_GRIPL] = "BtnGripL", [BTN_GRIPR] = "BtnGripR",
+ [BTN_GRIPL2] = "BtnGripL2", [BTN_GRIPR2] = "BtnGripR2",
[BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
[BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
[BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 0ad7d25d9864..69771fd35006 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -101,6 +101,7 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
break;
+ case USB_DEVICE_ID_ELECOM_M_DT2DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
/*
* Report descriptor format:
@@ -123,6 +124,7 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT2DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_010C) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_019B) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a1096283855..149798754570 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -451,6 +451,7 @@
#define USB_DEVICE_ID_ELECOM_M_XT4DRBK 0x00fd
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
#define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
+#define USB_DEVICE_ID_ELECOM_M_DT2DRBK 0x018d
#define USB_DEVICE_ID_ELECOM_M_HT1URBK_010C 0x010c
#define USB_DEVICE_ID_ELECOM_M_HT1URBK_019B 0x019b
#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D 0x010d
@@ -834,6 +835,8 @@
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093
+#define USB_DEVICE_ID_LENOVO_LEGION_GO_DUAL_DINPUT 0x6184
+#define USB_DEVICE_ID_LENOVO_LEGION_GO2_DUAL_DINPUT 0x61ed
#define USB_VENDOR_ID_LETSKETCH 0x6161
#define USB_DEVICE_ID_WP9620N 0x4d15
@@ -907,6 +910,7 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc543
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER 0xc548
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
diff --git a/drivers/hid/hid-input-test.c b/drivers/hid/hid-input-test.c
index 77c2d45ac62a..6f5c71660d82 100644
--- a/drivers/hid/hid-input-test.c
+++ b/drivers/hid/hid-input-test.c
@@ -7,7 +7,7 @@
#include <kunit/test.h>
-static void hid_test_input_set_battery_charge_status(struct kunit *test)
+static void hid_test_input_update_battery_charge_status(struct kunit *test)
{
struct hid_device *dev;
bool handled;
@@ -15,15 +15,15 @@ static void hid_test_input_set_battery_charge_status(struct kunit *test)
dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
- handled = hidinput_set_battery_charge_status(dev, HID_DG_HEIGHT, 0);
+ handled = hidinput_update_battery_charge_status(dev, HID_DG_HEIGHT, 0);
KUNIT_EXPECT_FALSE(test, handled);
KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_UNKNOWN);
- handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 0);
+ handled = hidinput_update_battery_charge_status(dev, HID_BAT_CHARGING, 0);
KUNIT_EXPECT_TRUE(test, handled);
KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_DISCHARGING);
- handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 1);
+ handled = hidinput_update_battery_charge_status(dev, HID_BAT_CHARGING, 1);
KUNIT_EXPECT_TRUE(test, handled);
KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_CHARGING);
}
@@ -63,7 +63,7 @@ static void hid_test_input_get_battery_property(struct kunit *test)
}
static struct kunit_case hid_input_tests[] = {
- KUNIT_CASE(hid_test_input_set_battery_charge_status),
+ KUNIT_CASE(hid_test_input_update_battery_charge_status),
KUNIT_CASE(hid_test_input_get_battery_property),
{ }
};
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index ff1784b5c2a4..f45f856a127f 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -595,13 +595,33 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
dev->battery = NULL;
}
-static void hidinput_update_battery(struct hid_device *dev, int value)
+static bool hidinput_update_battery_charge_status(struct hid_device *dev,
+ unsigned int usage, int value)
+{
+ switch (usage) {
+ case HID_BAT_CHARGING:
+ dev->battery_charge_status = value ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING;
+ return true;
+ }
+
+ return false;
+}
+
+static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
+ int value)
{
int capacity;
if (!dev->battery)
return;
+ if (hidinput_update_battery_charge_status(dev, usage, value)) {
+ power_supply_changed(dev->battery);
+ return;
+ }
+
if (value == 0 || value < dev->battery_min || value > dev->battery_max)
return;
@@ -617,20 +637,6 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
power_supply_changed(dev->battery);
}
}
-
-static bool hidinput_set_battery_charge_status(struct hid_device *dev,
- unsigned int usage, int value)
-{
- switch (usage) {
- case HID_BAT_CHARGING:
- dev->battery_charge_status = value ?
- POWER_SUPPLY_STATUS_CHARGING :
- POWER_SUPPLY_STATUS_DISCHARGING;
- return true;
- }
-
- return false;
-}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
struct hid_field *field, bool is_percentage)
@@ -642,14 +648,9 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
{
}
-static void hidinput_update_battery(struct hid_device *dev, int value)
-{
-}
-
-static bool hidinput_set_battery_charge_status(struct hid_device *dev,
- unsigned int usage, int value)
+static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
+ int value)
{
- return false;
}
#endif /* CONFIG_HID_BATTERY_STRENGTH */
@@ -1515,11 +1516,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
if (usage->type == EV_PWR) {
- bool handled = hidinput_set_battery_charge_status(hid, usage->hid, value);
-
- if (!handled)
- hidinput_update_battery(hid, value);
-
+ hidinput_update_battery(hid, usage->hid, value);
return;
}
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 34fa71ceec2b..cce54dd9884a 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1983,6 +1983,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech lightspeed receiver (0xc543) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 10a3bc5f931b..aaef405a717e 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4596,6 +4596,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
{ /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
+ { /* Logitech G PRO 2 LIGHTSPEED Wireless Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xc09a) },
{ /* G935 Gaming Headset */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
index e6ea0a2140eb..dafdd5b4a079 100644
--- a/drivers/hid/hid-mcp2200.c
+++ b/drivers/hid/hid-mcp2200.c
@@ -279,8 +279,8 @@ static const struct gpio_chip template_chip = {
.get_direction = mcp_get_direction,
.direction_input = mcp_direction_input,
.direction_output = mcp_direction_output,
- .set_rv = mcp_set,
- .set_multiple_rv = mcp_set_multiple,
+ .set = mcp_set,
+ .set_multiple = mcp_set_multiple,
.get = mcp_get,
.get_multiple = mcp_get_multiple,
.base = -1,
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index fcfe9370a887..33603b019f97 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -906,6 +906,10 @@ static int mcp2221_raw_event(struct hid_device *hdev,
}
if (data[2] == MCP2221_I2C_READ_COMPL ||
data[2] == MCP2221_I2C_READ_PARTIAL) {
+ if (!mcp->rxbuf || mcp->rxbuf_idx < 0 || data[3] > 60) {
+ mcp->status = -EINVAL;
+ break;
+ }
buf = mcp->rxbuf;
memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]);
mcp->rxbuf_idx = mcp->rxbuf_idx + data[3];
@@ -1298,7 +1302,7 @@ static int mcp2221_probe(struct hid_device *hdev,
mcp->gc->direction_input = mcp_gpio_direction_input;
mcp->gc->direction_output = mcp_gpio_direction_output;
mcp->gc->get_direction = mcp_gpio_get_direction;
- mcp->gc->set_rv = mcp_gpio_set;
+ mcp->gc->set = mcp_gpio_set;
mcp->gc->get = mcp_gpio_get;
mcp->gc->ngpio = MCP_NGPIO;
mcp->gc->base = -1;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 294516a8f541..22c6314a8843 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1503,6 +1503,14 @@ static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
(hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
+ if (*size < 608) {
+ dev_info(
+ &hdev->dev,
+ "GT7868Q fixup: report descriptor is only %u bytes, skipping\n",
+ *size);
+ return rdesc;
+ }
+
if (rdesc[607] == 0x15) {
rdesc[607] = 0x25;
dev_info(
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 2738ce947434..0f76e241e0af 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -144,6 +144,9 @@ static void ntrig_report_version(struct hid_device *hdev)
struct usb_device *usb_dev = hid_to_usb_dev(hdev);
unsigned char *data = kmalloc(8, GFP_KERNEL);
+ if (!hid_is_usb(hdev))
+ return;
+
if (!data)
goto err_free;
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index ff11f1ad344d..f619ed10535d 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -124,6 +124,8 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_T609A), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_LEGION_GO_DUAL_DINPUT), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_LEGION_GO2_DUAL_DINPUT), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
@@ -411,6 +413,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT2DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_010C) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_019B) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 949d307c66a8..197126d6e081 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -755,15 +755,12 @@ static int steam_input_register(struct steam_device *steam)
input_set_capability(input, EV_KEY, BTN_THUMBL);
input_set_capability(input, EV_KEY, BTN_THUMB);
input_set_capability(input, EV_KEY, BTN_THUMB2);
+ input_set_capability(input, EV_KEY, BTN_GRIPL);
+ input_set_capability(input, EV_KEY, BTN_GRIPR);
if (steam->quirks & STEAM_QUIRK_DECK) {
input_set_capability(input, EV_KEY, BTN_BASE);
- input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY1);
- input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY2);
- input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY3);
- input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY4);
- } else {
- input_set_capability(input, EV_KEY, BTN_GEAR_DOWN);
- input_set_capability(input, EV_KEY, BTN_GEAR_UP);
+ input_set_capability(input, EV_KEY, BTN_GRIPL2);
+ input_set_capability(input, EV_KEY, BTN_GRIPR2);
}
input_set_abs_params(input, ABS_X, -32767, 32767, 0, 0);
@@ -1419,8 +1416,8 @@ static inline s16 steam_le16(u8 *data)
* 9.4 | BTN_SELECT | menu left
* 9.5 | BTN_MODE | steam logo
* 9.6 | BTN_START | menu right
- * 9.7 | BTN_GEAR_DOWN | left back lever
- * 10.0 | BTN_GEAR_UP | right back lever
+ * 9.7 | BTN_GRIPL | left back lever
+ * 10.0 | BTN_GRIPR | right back lever
* 10.1 | -- | left-pad clicked
* 10.2 | BTN_THUMBR | right-pad clicked
* 10.3 | BTN_THUMB | left-pad touched (but see explanation below)
@@ -1485,8 +1482,8 @@ static void steam_do_input_event(struct steam_device *steam,
input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4)));
input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5)));
input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6)));
- input_event(input, EV_KEY, BTN_GEAR_DOWN, !!(b9 & BIT(7)));
- input_event(input, EV_KEY, BTN_GEAR_UP, !!(b10 & BIT(0)));
+ input_event(input, EV_KEY, BTN_GRIPL, !!(b9 & BIT(7)));
+ input_event(input, EV_KEY, BTN_GRIPR, !!(b10 & BIT(0)));
input_event(input, EV_KEY, BTN_THUMBR, !!(b10 & BIT(2)));
input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6)));
input_event(input, EV_KEY, BTN_THUMB, lpad_touched || lpad_and_joy);
@@ -1547,8 +1544,8 @@ static void steam_do_input_event(struct steam_device *steam,
* 9.4 | BTN_SELECT | menu left
* 9.5 | BTN_MODE | steam logo
* 9.6 | BTN_START | menu right
- * 9.7 | BTN_TRIGGER_HAPPY3 | left bottom grip button
- * 10.0 | BTN_TRIGGER_HAPPY4 | right bottom grip button
+ * 9.7 | BTN_GRIPL2 | left bottom grip button
+ * 10.0 | BTN_GRIPR2 | right bottom grip button
* 10.1 | BTN_THUMB | left pad pressed
* 10.2 | BTN_THUMB2 | right pad pressed
* 10.3 | -- | left pad touched
@@ -1573,8 +1570,8 @@ static void steam_do_input_event(struct steam_device *steam,
* 12.6 | -- | unknown
* 12.7 | -- | unknown
* 13.0 | -- | unknown
- * 13.1 | BTN_TRIGGER_HAPPY1 | left top grip button
- * 13.2 | BTN_TRIGGER_HAPPY2 | right top grip button
+ * 13.1 | BTN_GRIPL | left top grip button
+ * 13.2 | BTN_GRIPR | right top grip button
* 13.3 | -- | unknown
* 13.4 | -- | unknown
* 13.5 | -- | unknown
@@ -1659,8 +1656,8 @@ static void steam_do_deck_input_event(struct steam_device *steam,
input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4)));
input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5)));
input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6)));
- input_event(input, EV_KEY, BTN_TRIGGER_HAPPY3, !!(b9 & BIT(7)));
- input_event(input, EV_KEY, BTN_TRIGGER_HAPPY4, !!(b10 & BIT(0)));
+ input_event(input, EV_KEY, BTN_GRIPL2, !!(b9 & BIT(7)));
+ input_event(input, EV_KEY, BTN_GRIPR2, !!(b10 & BIT(0)));
input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6)));
input_event(input, EV_KEY, BTN_THUMBR, !!(b11 & BIT(2)));
input_event(input, EV_KEY, BTN_DPAD_UP, !!(b9 & BIT(0)));
@@ -1669,8 +1666,8 @@ static void steam_do_deck_input_event(struct steam_device *steam,
input_event(input, EV_KEY, BTN_DPAD_DOWN, !!(b9 & BIT(3)));
input_event(input, EV_KEY, BTN_THUMB, !!(b10 & BIT(1)));
input_event(input, EV_KEY, BTN_THUMB2, !!(b10 & BIT(2)));
- input_event(input, EV_KEY, BTN_TRIGGER_HAPPY1, !!(b13 & BIT(1)));
- input_event(input, EV_KEY, BTN_TRIGGER_HAPPY2, !!(b13 & BIT(2)));
+ input_event(input, EV_KEY, BTN_GRIPL, !!(b13 & BIT(1)));
+ input_event(input, EV_KEY, BTN_GRIPR, !!(b13 & BIT(2)));
input_event(input, EV_KEY, BTN_BASE, !!(b14 & BIT(2)));
input_sync(input);
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index c57483224db6..9d150ce234f2 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -264,9 +264,6 @@ static void ish_shutdown(struct pci_dev *pdev)
static struct device __maybe_unused *ish_resume_device;
-/* 50ms to get resume response */
-#define WAIT_FOR_RESUME_ACK_MS 50
-
/**
* ish_resume_handler() - Work function to complete resume
* @work: work struct
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 6550ad5bfbb5..d8c3c54a8c0f 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -759,6 +759,9 @@ static void hid_ishtp_cl_resume_handler(struct work_struct *work)
if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) {
client_data->suspended = false;
wake_up_interruptible(&client_data->ishtp_resume_wait);
+ } else {
+ hid_ishtp_trace(client_data, "hid client: wait for resume timed out");
+ dev_err(cl_data_to_dev(client_data), "wait for resume timed out");
}
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 5ac7d70a7c84..93a0432e7058 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -852,9 +852,6 @@ EXPORT_SYMBOL(ishtp_device);
*/
bool ishtp_wait_resume(struct ishtp_device *dev)
{
- /* 50ms to get resume response */
- #define WAIT_FOR_RESUME_ACK_MS 50
-
/* Waiting to get resume response */
if (dev->resume_flag)
wait_event_interruptible_timeout(dev->resume_wait,
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index ec9f6e87aaf2..23db97ecf21c 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -47,6 +47,9 @@
#define MAX_DMA_DELAY 20
+/* 300ms to get resume response */
+#define WAIT_FOR_RESUME_ACK_MS 300
+
/* ISHTP device states */
enum ishtp_dev_state {
ISHTP_DEV_INITIALIZING = 0,
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index e944a6ccb776..854926b3cfd4 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -419,6 +419,7 @@ static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __io
*/
static void quicki2c_dev_deinit(struct quicki2c_device *qcdev)
{
+ thc_interrupt_quiesce(qcdev->thc_hw, true);
thc_interrupt_enable(qcdev->thc_hw, false);
thc_ltr_unconfig(qcdev->thc_hw);
thc_wot_unconfig(qcdev->thc_hw);
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
index 93d6fa982d60..d412eafcf9ea 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -77,6 +77,7 @@ struct quicki2c_subip_acpi_parameter {
u16 device_address;
u64 connection_speed;
u8 addressing_mode;
+ u8 reserved;
} __packed;
/**
@@ -126,6 +127,7 @@ struct quicki2c_subip_acpi_config {
u64 HMTD;
u64 HMRD;
u64 HMSL;
+ u8 reserved;
};
/**
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index 2b794bb481a0..636a68306501 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -1541,7 +1541,7 @@ int thc_i2c_subip_regs_save(struct thc_device *dev)
for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
ret = thc_i2c_subip_pio_read(dev, i2c_subip_regs[i],
- &read_size, (u32 *)&dev->i2c_subip_regs + i);
+ &read_size, &dev->i2c_subip_regs[i]);
if (ret < 0)
return ret;
}
@@ -1564,7 +1564,7 @@ int thc_i2c_subip_regs_restore(struct thc_device *dev)
for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
ret = thc_i2c_subip_pio_write(dev, i2c_subip_regs[i],
- write_size, (u32 *)&dev->i2c_subip_regs + i);
+ write_size, &dev->i2c_subip_regs[i]);
if (ret < 0)
return ret;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 955b39d22524..9b2c710f8da1 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -684,6 +684,7 @@ static bool wacom_is_art_pen(int tool_id)
case 0x885: /* Intuos3 Marker Pen */
case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x204: /* Art Pen 2 */
is_art_pen = true;
break;
}
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index a07e2eb93c71..1fcd320d6161 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -339,8 +339,8 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
st->gc.get = ltc2992_gpio_get;
st->gc.get_multiple = ltc2992_gpio_get_multiple;
- st->gc.set_rv = ltc2992_gpio_set;
- st->gc.set_multiple_rv = ltc2992_gpio_set_multiple;
+ st->gc.set = ltc2992_gpio_set;
+ st->gc.set_multiple = ltc2992_gpio_set_multiple;
ret = devm_gpiochip_add_data(&st->client->dev, &st->gc, st);
if (ret)
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 52d4000902d5..55e7af3a5f98 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -364,7 +364,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
data->gpio.direction_input = ucd9000_gpio_direction_input;
data->gpio.direction_output = ucd9000_gpio_direction_output;
data->gpio.get = ucd9000_gpio_get;
- data->gpio.set_rv = ucd9000_gpio_set;
+ data->gpio.set = ucd9000_gpio_set;
data->gpio.can_sleep = true;
data->gpio.base = -1;
data->gpio.parent = &client->dev;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c8d115b58e44..070d014fdc5d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -992,7 +992,6 @@ config I2C_APPLE
tristate "Apple SMBus platform driver"
depends on !I2C_PASEMI
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
Say Y here if you want to use the I2C controller present on Apple
Silicon chips such as the M1.
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 13889f52b6f7..ff2289b52c84 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -155,9 +155,9 @@ static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
/* source_clock = 32 MHz */
static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = {
- { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 40 },
- { I2C_MAX_FAST_MODE_FREQ, 4, 3, 11, 20 },
- { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 6, 15 },
+ { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 38 },
+ { I2C_MAX_FAST_MODE_FREQ, 4, 3, 9, 19 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 5, 15 },
{}
};
diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c
index e064e8a4a1f0..cfafe089102a 100644
--- a/drivers/i2c/busses/i2c-rtl9300.c
+++ b/drivers/i2c/busses/i2c-rtl9300.c
@@ -143,10 +143,10 @@ static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len)
return -EIO;
for (i = 0; i < len; i++) {
- if (i % 4 == 0)
- vals[i/4] = 0;
- vals[i/4] <<= 8;
- vals[i/4] |= buf[i];
+ unsigned int shift = (i % 4) * 8;
+ unsigned int reg = i / 4;
+
+ vals[reg] |= buf[i] << shift;
}
return regmap_bulk_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0,
@@ -175,7 +175,7 @@ static int rtl9300_i2c_execute_xfer(struct rtl9300_i2c *i2c, char read_write,
return ret;
ret = regmap_read_poll_timeout(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1,
- val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 2000);
+ val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 100000);
if (ret)
return ret;
@@ -281,15 +281,19 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s
ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
if (ret)
goto out_unlock;
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0]);
+ if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0] + 1);
if (ret)
goto out_unlock;
if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_write(i2c, &data->block[1], data->block[0]);
+ ret = rtl9300_i2c_write(i2c, &data->block[0], data->block[0] + 1);
if (ret)
goto out_unlock;
}
- len = data->block[0];
+ len = data->block[0] + 1;
break;
default:
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index c8b4b404f6c1..e6815f6cae78 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -742,11 +742,14 @@ static void stm32f7_i2c_dma_callback(void *arg)
{
struct stm32f7_i2c_dev *i2c_dev = arg;
struct stm32_i2c_dma *dma = i2c_dev->dma;
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
stm32f7_i2c_disable_dma_req(i2c_dev);
dmaengine_terminate_async(dma->chan_using);
dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len,
dma->dma_data_dir);
+ if (!f7_msg->smbus)
+ i2c_put_dma_safe_msg_buf(f7_msg->buf, i2c_dev->msg, true);
complete(&dma->dma_complete);
}
@@ -882,6 +885,7 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
{
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
void __iomem *base = i2c_dev->base;
+ u8 *dma_buf;
u32 cr1, cr2;
int ret;
@@ -931,17 +935,23 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
/* Configure DMA or enable RX/TX interrupt */
i2c_dev->use_dma = false;
- if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN
- && !i2c_dev->atomic) {
- ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma,
- msg->flags & I2C_M_RD,
- f7_msg->count, f7_msg->buf,
- stm32f7_i2c_dma_callback,
- i2c_dev);
- if (!ret)
- i2c_dev->use_dma = true;
- else
- dev_warn(i2c_dev->dev, "can't use DMA\n");
+ if (i2c_dev->dma && !i2c_dev->atomic) {
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, STM32F7_I2C_DMA_LEN_MIN);
+ if (dma_buf) {
+ f7_msg->buf = dma_buf;
+ ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma,
+ msg->flags & I2C_M_RD,
+ f7_msg->count, f7_msg->buf,
+ stm32f7_i2c_dma_callback,
+ i2c_dev);
+ if (ret) {
+ dev_warn(i2c_dev->dev, "can't use DMA\n");
+ i2c_put_dma_safe_msg_buf(f7_msg->buf, msg, false);
+ f7_msg->buf = msg->buf;
+ } else {
+ i2c_dev->use_dma = true;
+ }
+ }
}
if (!i2c_dev->use_dma) {
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4f05afab161f..4eb31b913c1a 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -134,6 +134,8 @@
#define I2C_MST_FIFO_STATUS_TX GENMASK(23, 16)
#define I2C_MST_FIFO_STATUS_RX GENMASK(7, 0)
+#define I2C_MASTER_RESET_CNTRL 0x0a8
+
/* configuration load timeout in microseconds */
#define I2C_CONFIG_LOAD_TIMEOUT 1000000
@@ -184,6 +186,9 @@ enum msg_end_type {
* @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
* provides additional features and allows for longer messages to
* be transferred in one go.
+ * @has_mst_reset: The I2C controller contains MASTER_RESET_CTRL register which
+ * provides an alternative to controller reset when configured as
+ * I2C master
* @quirks: I2C adapter quirks for limiting write/read transfer size and not
* allowing 0 length transfers.
* @supports_bus_clear: Bus Clear support to recover from bus hang during
@@ -213,6 +218,7 @@ struct tegra_i2c_hw_feature {
bool has_multi_master_mode;
bool has_slcg_override_reg;
bool has_mst_fifo;
+ bool has_mst_reset;
const struct i2c_adapter_quirks *quirks;
bool supports_bus_clear;
bool has_apb_dma;
@@ -605,6 +611,26 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
return 0;
}
+static int tegra_i2c_master_reset(struct tegra_i2c_dev *i2c_dev)
+{
+ if (!i2c_dev->hw->has_mst_reset)
+ return -EOPNOTSUPP;
+
+ /*
+ * Writing 1 to I2C_MASTER_RESET_CNTRL will reset all internal state of
+ * Master logic including FIFOs. Clear this bit to 0 for normal operation.
+ * SW needs to wait for 2us after assertion and de-assertion of this soft
+ * reset.
+ */
+ i2c_writel(i2c_dev, 0x1, I2C_MASTER_RESET_CNTRL);
+ fsleep(2);
+
+ i2c_writel(i2c_dev, 0x0, I2C_MASTER_RESET_CNTRL);
+ fsleep(2);
+
+ return 0;
+}
+
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode;
@@ -612,6 +638,16 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
int err;
/*
+ * Reset the controller before initializing it.
+ * In case if device_reset() returns -ENOENT, i.e. when the reset is
+ * not available, the internal software reset will be used if it is
+ * supported by the controller.
+ */
+ err = device_reset(i2c_dev->dev);
+ if (err == -ENOENT)
+ err = tegra_i2c_master_reset(i2c_dev);
+
+ /*
* The reset shouldn't ever fail in practice. The failure will be a
* sign of a severe problem that needs to be resolved. Still we don't
* want to fail the initialization completely because this may break
@@ -619,7 +655,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
* emit a noisy warning on error, which won't stay unnoticed and
* won't hose machine entirely.
*/
- err = device_reset(i2c_dev->dev);
WARN_ON_ONCE(err);
if (IS_DVC(i2c_dev))
@@ -1266,17 +1301,9 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (i2c_dev->dma_mode) {
if (i2c_dev->msg_read) {
- dma_sync_single_for_device(i2c_dev->dma_dev,
- i2c_dev->dma_phys,
- xfer_size, DMA_FROM_DEVICE);
-
err = tegra_i2c_dma_submit(i2c_dev, xfer_size);
if (err)
return err;
- } else {
- dma_sync_single_for_cpu(i2c_dev->dma_dev,
- i2c_dev->dma_phys,
- xfer_size, DMA_TO_DEVICE);
}
}
@@ -1286,11 +1313,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (i2c_dev->dma_mode) {
memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE,
msg->buf, i2c_dev->msg_len);
-
- dma_sync_single_for_device(i2c_dev->dma_dev,
- i2c_dev->dma_phys,
- xfer_size, DMA_TO_DEVICE);
-
err = tegra_i2c_dma_submit(i2c_dev, xfer_size);
if (err)
return err;
@@ -1331,13 +1353,8 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
return -ETIMEDOUT;
}
- if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) {
- dma_sync_single_for_cpu(i2c_dev->dma_dev,
- i2c_dev->dma_phys,
- xfer_size, DMA_FROM_DEVICE);
-
+ if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE)
memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, i2c_dev->msg_len);
- }
}
time_left = tegra_i2c_wait_completion(i2c_dev, &i2c_dev->msg_complete,
@@ -1468,6 +1485,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = false,
.has_apb_dma = true,
@@ -1492,6 +1510,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = false,
.has_apb_dma = true,
@@ -1516,6 +1535,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = true,
@@ -1540,6 +1560,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = true,
@@ -1564,6 +1585,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = true,
@@ -1588,6 +1610,7 @@ static const struct tegra_i2c_hw_feature tegra186_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
+ .has_mst_reset = false,
.quirks = &tegra_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = false,
@@ -1612,6 +1635,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = true,
+ .has_mst_reset = true,
.quirks = &tegra194_i2c_quirks,
.supports_bus_clear = true,
.has_apb_dma = false,
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 3445cc3b476b..ed90858a27b7 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -370,6 +370,7 @@ static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = {
* the device works without issues on Windows at what is expected to be
* a 400KHz frequency. The root cause of the issue is not known.
*/
+ { "DLL0945", 0 },
{ "ELAN06FA", 0 },
{}
};
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index c688af270a11..50fbc0d06e62 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -164,7 +164,7 @@ static int ltc4306_gpio_init(struct ltc4306 *data)
data->gpiochip.direction_input = ltc4306_gpio_direction_input;
data->gpiochip.direction_output = ltc4306_gpio_direction_output;
data->gpiochip.get = ltc4306_gpio_get;
- data->gpiochip.set_rv = ltc4306_gpio_set;
+ data->gpiochip.set = ltc4306_gpio_set;
data->gpiochip.set_config = ltc4306_gpio_set_config;
data->gpiochip.owner = THIS_MODULE;
diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c
index 284ff4afeeac..d3b32b794172 100644
--- a/drivers/i2c/muxes/i2c-mux-mule.c
+++ b/drivers/i2c/muxes/i2c-mux-mule.c
@@ -47,7 +47,6 @@ static int mule_i2c_mux_probe(struct platform_device *pdev)
struct mule_i2c_reg_mux *priv;
struct i2c_client *client;
struct i2c_mux_core *muxc;
- struct device_node *dev;
unsigned int readback;
int ndev, ret;
bool old_fw;
@@ -95,7 +94,7 @@ static int mule_i2c_mux_probe(struct platform_device *pdev)
"Failed to register mux remove\n");
/* Create device adapters */
- for_each_child_of_node(mux_dev->of_node, dev) {
+ for_each_child_of_node_scoped(mux_dev->of_node, dev) {
u32 reg;
ret = of_property_read_u32(dev, "reg", &reg);
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index e80e48756914..2396545763ff 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -26,11 +26,12 @@
*
* This function can sleep and thus cannot be called in atomic context.
*
- * Return: 0 in case of success, a negative error core otherwise.
- * -EAGAIN: controller lost address arbitration. Target
- * (IBI, HJ or controller role request) win the bus. Client
- * driver needs to resend the 'xfers' some time later.
- * See I3C spec ver 1.1.1 09-Jun-2021. Section: 5.1.2.2.3.
+ * Return:
+ * * 0 in case of success, a negative error core otherwise.
+ * * -EAGAIN: controller lost address arbitration. Target (IBI, HJ or
+ * controller role request) win the bus. Client driver needs to resend the
+ * 'xfers' some time later. See I3C spec ver 1.1.1 09-Jun-2021. Section:
+ * 5.1.2.2.3.
*/
int i3c_device_do_priv_xfers(struct i3c_device *dev,
struct i3c_priv_xfer *xfers,
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 433f6088b7ce..0d857cc68cc5 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -9,6 +9,7 @@
#define I3C_INTERNALS_H
#include <linux/i3c/master.h>
+#include <linux/io.h>
void i3c_bus_normaluse_lock(struct i3c_bus *bus);
void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
@@ -22,4 +23,41 @@ int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
+
+/**
+ * i3c_writel_fifo - Write data buffer to 32bit FIFO
+ * @addr: FIFO Address to write to
+ * @buf: Pointer to the data bytes to write
+ * @nbytes: Number of bytes to write
+ */
+static inline void i3c_writel_fifo(void __iomem *addr, const void *buf,
+ int nbytes)
+{
+ writesl(addr, buf, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, buf + (nbytes & ~3), nbytes & 3);
+ writel(tmp, addr);
+ }
+}
+
+/**
+ * i3c_readl_fifo - Read data buffer from 32bit FIFO
+ * @addr: FIFO Address to read from
+ * @buf: Pointer to the buffer to store read bytes
+ * @nbytes: Number of bytes to read
+ */
+static inline void i3c_readl_fifo(const void __iomem *addr, void *buf,
+ int nbytes)
+{
+ readsl(addr, buf, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ tmp = readl(addr);
+ memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
#endif /* I3C_INTERNAL_H */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index fd81871609d9..2ef898a8fd80 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -141,7 +141,7 @@ static ssize_t bcr_show(struct device *dev,
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
- ret = sprintf(buf, "%x\n", desc->info.bcr);
+ ret = sprintf(buf, "0x%02x\n", desc->info.bcr);
i3c_bus_normaluse_unlock(bus);
return ret;
@@ -158,7 +158,7 @@ static ssize_t dcr_show(struct device *dev,
i3c_bus_normaluse_lock(bus);
desc = dev_to_i3cdesc(dev);
- ret = sprintf(buf, "%x\n", desc->info.dcr);
+ ret = sprintf(buf, "0x%02x\n", desc->info.dcr);
i3c_bus_normaluse_unlock(bus);
return ret;
@@ -727,12 +727,12 @@ static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
switch (i3cbus->mode) {
case I3C_BUS_MODE_PURE:
if (!i3cbus->scl_rate.i3c)
- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ i3cbus->scl_rate.i3c = I3C_BUS_I3C_SCL_TYP_RATE;
break;
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
if (!i3cbus->scl_rate.i3c)
- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+ i3cbus->scl_rate.i3c = I3C_BUS_I3C_SCL_TYP_RATE;
if (!i3cbus->scl_rate.i2c)
i3cbus->scl_rate.i2c = max_i2c_scl_rate;
break;
@@ -754,8 +754,8 @@ static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
* I3C/I2C frequency may have been overridden, check that user-provided
* values are not exceeding max possible frequency.
*/
- if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
- i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
+ if (i3cbus->scl_rate.i3c > I3C_BUS_I3C_SCL_MAX_RATE ||
+ i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE)
return -EINVAL;
return 0;
@@ -837,14 +837,14 @@ static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
return -EINVAL;
if (!master->ops->send_ccc_cmd)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
return -EINVAL;
if (master->ops->supports_ccc_cmd &&
!master->ops->supports_ccc_cmd(master, cmd))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = master->ops->send_ccc_cmd(master, cmd);
if (ret) {
@@ -1439,7 +1439,7 @@ static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
if (dev->info.bcr & I3C_BCR_HDR_CAP) {
ret = i3c_master_gethdrcap_locked(master, &dev->info);
- if (ret)
+ if (ret && ret != -EOPNOTSUPP)
return ret;
}
@@ -2210,7 +2210,7 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
*/
if (boardinfo->base.flags & I2C_CLIENT_TEN) {
dev_err(dev, "I2C device with 10 bit address not supported.");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
/* LVR is encoded in reg[2]. */
@@ -2340,13 +2340,13 @@ static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
return -EINVAL;
if (!master->ops->i2c_xfers)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* Doing transfers to different devices is not supported. */
addr = xfers[0].addr;
for (i = 1; i < nxfers; i++) {
if (addr != xfers[i].addr)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
i3c_bus_normaluse_lock(&master->bus);
@@ -2467,6 +2467,8 @@ static int i3c_i2c_notifier_call(struct notifier_block *nb, unsigned long action
case BUS_NOTIFY_DEL_DEVICE:
ret = i3c_master_i2c_detach(adap, client);
break;
+ default:
+ ret = -EINVAL;
}
i3c_bus_maintenance_unlock(&master->bus);
@@ -2766,7 +2768,7 @@ static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
* controller)
* @ops: the master controller operations
* @secondary: true if you are registering a secondary master. Will return
- * -ENOTSUPP if set to true since secondary masters are not yet
+ * -EOPNOTSUPP if set to true since secondary masters are not yet
* supported
*
* This function takes care of everything for you:
@@ -2785,7 +2787,7 @@ int i3c_master_register(struct i3c_master_controller *master,
const struct i3c_master_controller_ops *ops,
bool secondary)
{
- unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE;
struct i3c_bus *i3cbus = i3c_master_get_bus(master);
enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
struct i2c_dev_boardinfo *i2cbi;
@@ -2793,7 +2795,7 @@ int i3c_master_register(struct i3c_master_controller *master,
/* We do not support secondary masters yet. */
if (secondary)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = i3c_master_check_ops(ops);
if (ret)
@@ -2844,7 +2846,7 @@ int i3c_master_register(struct i3c_master_controller *master,
}
if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
- i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
+ i2c_scl_rate = I3C_BUS_I2C_FM_SCL_MAX_RATE;
}
ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
@@ -2954,7 +2956,7 @@ int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
return -EINVAL;
if (!master->ops->priv_xfers)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return master->ops->priv_xfers(dev, xfers, nxfers);
}
@@ -3004,7 +3006,7 @@ int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
int ret;
if (!master->ops->request_ibi)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (dev->ibi)
return -EBUSY;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 7b30db3253af..13df2944f2ec 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -64,3 +64,13 @@ config MIPI_I3C_HCI_PCI
This driver can also be built as a module. If so, the module will be
called mipi-i3c-hci-pci.
+
+config RENESAS_I3C
+ tristate "Renesas I3C controller driver"
+ depends on HAS_IOMEM
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Support the Renesas I3C controller as found in some RZ variants.
+
+ This driver can also be built as a module. If so, the module will be
+ called renesas-i3c.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index 3e97960160bc..aac74f3e3851 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
obj-$(CONFIG_AST2600_I3C_MASTER) += ast2600-i3c-master.o
obj-$(CONFIG_SVC_I3C_MASTER) += svc-i3c-master.o
obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/
+obj-$(CONFIG_RENESAS_I3C) += renesas-i3c.o
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 611c22b72c15..974122b2d20e 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -23,6 +23,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include "../internals.h"
#include "dw-i3c-master.h"
#define DEVICE_CTRL 0x0
@@ -336,37 +337,19 @@ static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
const u8 *bytes, int nbytes)
{
- writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp = 0;
-
- memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
- writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
- }
-}
-
-static void dw_i3c_master_read_fifo(struct dw_i3c_master *master,
- int reg, u8 *bytes, int nbytes)
-{
- readsl(master->regs + reg, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp;
-
- readsl(master->regs + reg, &tmp, 1);
- memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
- }
+ i3c_writel_fifo(master->regs + RX_TX_DATA_PORT, bytes, nbytes);
}
static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
u8 *bytes, int nbytes)
{
- return dw_i3c_master_read_fifo(master, RX_TX_DATA_PORT, bytes, nbytes);
+ i3c_readl_fifo(master->regs + RX_TX_DATA_PORT, bytes, nbytes);
}
static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master,
u8 *bytes, int nbytes)
{
- return dw_i3c_master_read_fifo(master, IBI_QUEUE_STATUS, bytes, nbytes);
+ i3c_readl_fifo(master->regs + IBI_QUEUE_STATUS, bytes, nbytes);
}
static struct dw_i3c_xfer *
@@ -622,14 +605,14 @@ static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
core_period = DIV_ROUND_UP(1000000000, core_rate);
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
- hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE) - lcnt;
scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
SCL_I2C_FMP_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
master->i2c_fmp_timing = scl_timing;
lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
- hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_MAX_RATE) - lcnt;
scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
SCL_I2C_FM_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
@@ -699,7 +682,6 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
dw_i3c_master_enable(master);
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -829,7 +811,6 @@ static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
else
ret = dw_i3c_ccc_set(master, ccc);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -912,7 +893,6 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
dw_i3c_master_free_xfer(xfer);
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -932,7 +912,7 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
return 0;
if (i3c_nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < i3c_nxfers; i++) {
if (i3c_xfers[i].rnw)
@@ -943,7 +923,7 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (ntxwords > master->caps.datafifodepth ||
nrxwords > master->caps.datafifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
if (!xfer)
@@ -998,7 +978,6 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -1093,7 +1072,7 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
return 0;
if (i2c_nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < i2c_nxfers; i++) {
if (i2c_xfers[i].flags & I2C_M_RD)
@@ -1104,7 +1083,7 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
if (ntxwords > master->caps.datafifodepth ||
nrxwords > master->caps.datafifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
if (!xfer)
@@ -1142,13 +1121,12 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
}
dw_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
dw_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
dw_i3c_master_free_xfer(xfer);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -1316,7 +1294,6 @@ static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
master->regs + DEVICE_CTRL);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return 0;
}
@@ -1342,7 +1319,6 @@ static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
if (rc) {
dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
@@ -1362,7 +1338,6 @@ static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return 0;
}
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index fd3752cea654..97b151564d3d 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -23,6 +23,8 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include "../internals.h"
+
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
@@ -412,7 +414,6 @@ struct cdns_i3c_master {
} xferqueue;
void __iomem *regs;
struct clk *sysclk;
- struct clk *pclk;
struct cdns_i3c_master_caps caps;
unsigned long i3c_scl_lim;
const struct cdns_i3c_data *devdata;
@@ -427,25 +428,13 @@ to_cdns_i3c_master(struct i3c_master_controller *master)
static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
const u8 *bytes, int nbytes)
{
- writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp = 0;
-
- memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
- writesl(master->regs + TX_FIFO, &tmp, 1);
- }
+ i3c_writel_fifo(master->regs + TX_FIFO, bytes, nbytes);
}
static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
u8 *bytes, int nbytes)
{
- readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
- if (nbytes & 3) {
- u32 tmp;
-
- readsl(master->regs + RX_FIFO, &tmp, 1);
- memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
- }
+ i3c_readl_fifo(master->regs + RX_FIFO, bytes, nbytes);
}
static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
@@ -742,7 +731,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
for (i = 0; i < nxfers; i++) {
if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (!nxfers)
@@ -750,7 +739,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (nxfers > master->caps.cmdfifodepth ||
nxfers > master->caps.cmdrfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/*
* First make sure that all transactions (block of transfers separated
@@ -765,7 +754,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (rxslots > master->caps.rxfifodepth ||
txslots > master->caps.txfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
if (!cdns_xfer)
@@ -822,11 +811,11 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
int i, ret = 0;
if (nxfers > master->caps.cmdfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
for (i = 0; i < nxfers; i++) {
if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (xfers[i].flags & I2C_M_RD)
nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
@@ -836,7 +825,7 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
if (ntxwords > master->caps.txfifodepth ||
nrxwords > master->caps.rxfifodepth)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
if (!xfer)
@@ -863,7 +852,7 @@ static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
}
cdns_i3c_master_queue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
cdns_i3c_master_unqueue_xfer(master, xfer);
ret = xfer->ret;
@@ -1330,12 +1319,7 @@ static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
buf = slot->data;
nbytes = IBIR_XFER_BYTES(ibir);
- readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
- if (nbytes % 3) {
- u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
-
- memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
- }
+ i3c_readl_fifo(master->regs + IBI_DATA_FIFO, buf, nbytes);
slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
dev->ibi->max_payload_len);
@@ -1566,6 +1550,7 @@ MODULE_DEVICE_TABLE(of, cdns_i3c_master_of_ids);
static int cdns_i3c_master_probe(struct platform_device *pdev)
{
struct cdns_i3c_master *master;
+ struct clk *pclk;
int ret, irq;
u32 val;
@@ -1581,11 +1566,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(pclk))
+ return PTR_ERR(pclk);
- master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
+ master->sysclk = devm_clk_get_enabled(&pdev->dev, "sysclk");
if (IS_ERR(master->sysclk))
return PTR_ERR(master->sysclk);
@@ -1593,18 +1578,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = clk_prepare_enable(master->pclk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(master->sysclk);
- if (ret)
- goto err_disable_pclk;
-
- if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
- ret = -EINVAL;
- goto err_disable_sysclk;
- }
+ if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER)
+ return -EINVAL;
spin_lock_init(&master->xferqueue.lock);
INIT_LIST_HEAD(&master->xferqueue.list);
@@ -1615,7 +1590,7 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
dev_name(&pdev->dev), master);
if (ret)
- goto err_disable_sysclk;
+ return ret;
platform_set_drvdata(pdev, master);
@@ -1637,29 +1612,15 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
sizeof(*master->ibi.slots),
GFP_KERNEL);
- if (!master->ibi.slots) {
- ret = -ENOMEM;
- goto err_disable_sysclk;
- }
+ if (!master->ibi.slots)
+ return -ENOMEM;
writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
writel(MST_INT_IBIR_THR, master->regs + MST_IER);
writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
- ret = i3c_master_register(&master->base, &pdev->dev,
- &cdns_i3c_master_ops, false);
- if (ret)
- goto err_disable_sysclk;
-
- return 0;
-
-err_disable_sysclk:
- clk_disable_unprepare(master->sysclk);
-
-err_disable_pclk:
- clk_disable_unprepare(master->pclk);
-
- return ret;
+ return i3c_master_register(&master->base, &pdev->dev,
+ &cdns_i3c_master_ops, false);
}
static void cdns_i3c_master_remove(struct platform_device *pdev)
@@ -1668,9 +1629,6 @@ static void cdns_i3c_master_remove(struct platform_device *pdev)
cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
-
- clk_disable_unprepare(master->sysclk);
- clk_disable_unprepare(master->pclk);
}
static struct platform_driver cdns_i3c_master = {
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index bc4538694540..60f1175f1f37 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -395,7 +395,7 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
ret = hci->io->queue_xfer(hci, xfer, nxfers);
if (ret)
goto out;
- if (!wait_for_completion_timeout(&done, HZ) &&
+ if (!wait_for_completion_timeout(&done, m->i2c.timeout) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
ret = -ETIME;
goto out;
diff --git a/drivers/i3c/master/renesas-i3c.c b/drivers/i3c/master/renesas-i3c.c
new file mode 100644
index 000000000000..174d3dc5d276
--- /dev/null
+++ b/drivers/i3c/master/renesas-i3c.c
@@ -0,0 +1,1404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas I3C Controller driver
+ * Copyright (C) 2023-25 Renesas Electronics Corp.
+ *
+ * TODO: IBI support, HotJoin support, Target support
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include "../internals.h"
+
+#define PRTS 0x00
+#define PRTS_PRTMD BIT(0)
+
+#define BCTL 0x14
+#define BCTL_INCBA BIT(0)
+#define BCTL_HJACKCTL BIT(8)
+#define BCTL_ABT BIT(29)
+#define BCTL_BUSE BIT(31)
+
+#define MSDVAD 0x18
+#define MSDVAD_MDYAD(x) FIELD_PREP(GENMASK(21, 16), x)
+#define MSDVAD_MDYADV BIT(31)
+
+#define RSTCTL 0x20
+#define RSTCTL_RI3CRST BIT(0)
+#define RSTCTL_INTLRST BIT(16)
+
+#define INST 0x30
+
+#define IBINCTL 0x58
+#define IBINCTL_NRHJCTL BIT(0)
+#define IBINCTL_NRMRCTL BIT(1)
+#define IBINCTL_NRSIRCTL BIT(3)
+
+#define SVCTL 0x64
+
+#define REFCKCTL 0x70
+#define REFCKCTL_IREFCKS(x) FIELD_PREP(GENMASK(2, 0), x)
+
+#define STDBR 0x74
+#define STDBR_SBRLO(cond, x) FIELD_PREP(GENMASK(7, 0), (x) >> (cond))
+#define STDBR_SBRHO(cond, x) FIELD_PREP(GENMASK(15, 8), (x) >> (cond))
+#define STDBR_SBRLP(x) FIELD_PREP(GENMASK(21, 16), x)
+#define STDBR_SBRHP(x) FIELD_PREP(GENMASK(29, 24), x)
+#define STDBR_DSBRPO BIT(31)
+
+#define EXTBR 0x78
+#define EXTBR_EBRLO(x) FIELD_PREP(GENMASK(7, 0), x)
+#define EXTBR_EBRHO(x) FIELD_PREP(GENMASK(15, 8), x)
+#define EXTBR_EBRLP(x) FIELD_PREP(GENMASK(21, 16), x)
+#define EXTBR_EBRHP(x) FIELD_PREP(GENMASK(29, 24), x)
+
+#define BFRECDT 0x7c
+#define BFRECDT_FRECYC(x) FIELD_PREP(GENMASK(8, 0), x)
+
+#define BAVLCDT 0x80
+#define BAVLCDT_AVLCYC(x) FIELD_PREP(GENMASK(8, 0), x)
+
+#define BIDLCDT 0x84
+#define BIDLCDT_IDLCYC(x) FIELD_PREP(GENMASK(17, 0), x)
+
+#define ACKCTL 0xa0
+#define ACKCTL_ACKT BIT(1)
+#define ACKCTL_ACKTWP BIT(2)
+
+#define SCSTRCTL 0xa4
+#define SCSTRCTL_ACKTWE BIT(0)
+#define SCSTRCTL_RWE BIT(1)
+
+#define SCSTLCTL 0xb0
+
+#define CNDCTL 0x140
+#define CNDCTL_STCND BIT(0)
+#define CNDCTL_SRCND BIT(1)
+#define CNDCTL_SPCND BIT(2)
+
+#define NCMDQP 0x150 /* Normal Command Queue */
+#define NCMDQP_CMD_ATTR(x) FIELD_PREP(GENMASK(2, 0), x)
+#define NCMDQP_IMMED_XFER 0x01
+#define NCMDQP_ADDR_ASSGN 0x02
+#define NCMDQP_TID(x) FIELD_PREP(GENMASK(6, 3), x)
+#define NCMDQP_CMD(x) FIELD_PREP(GENMASK(14, 7), x)
+#define NCMDQP_CP BIT(15)
+#define NCMDQP_DEV_INDEX(x) FIELD_PREP(GENMASK(20, 16), x)
+#define NCMDQP_BYTE_CNT(x) FIELD_PREP(GENMASK(25, 23), x)
+#define NCMDQP_DEV_COUNT(x) FIELD_PREP(GENMASK(29, 26), x)
+#define NCMDQP_MODE(x) FIELD_PREP(GENMASK(28, 26), x)
+#define NCMDQP_RNW(x) FIELD_PREP(GENMASK(29, 29), x)
+#define NCMDQP_ROC BIT(30)
+#define NCMDQP_TOC BIT(31)
+#define NCMDQP_DATA_LENGTH(x) FIELD_PREP(GENMASK(31, 16), x)
+
+#define NRSPQP 0x154 /* Normal Respone Queue */
+#define NRSPQP_NO_ERROR 0
+#define NRSPQP_ERROR_CRC 1
+#define NRSPQP_ERROR_PARITY 2
+#define NRSPQP_ERROR_FRAME 3
+#define NRSPQP_ERROR_IBA_NACK 4
+#define NRSPQP_ERROR_ADDRESS_NACK 5
+#define NRSPQP_ERROR_OVER_UNDER_FLOW 6
+#define NRSPQP_ERROR_TRANSF_ABORT 8
+#define NRSPQP_ERROR_I2C_W_NACK_ERR 9
+#define NRSPQP_ERROR_UNSUPPORTED 10
+#define NRSPQP_DATA_LEN(x) FIELD_GET(GENMASK(15, 0), x)
+#define NRSPQP_ERR_STATUS(x) FIELD_GET(GENMASK(31, 28), x)
+
+#define NTDTBP0 0x158 /* Normal Transfer Data Buffer */
+#define NTDTBP0_DEPTH 16
+
+#define NQTHCTL 0x190
+#define NQTHCTL_CMDQTH(x) FIELD_PREP(GENMASK(1, 0), x)
+#define NQTHCTL_IBIDSSZ(x) FIELD_PREP(GENMASK(23, 16), x)
+
+#define NTBTHCTL0 0x194
+
+#define NRQTHCTL 0x1c0
+
+#define BST 0x1d0
+#define BST_STCNDDF BIT(0)
+#define BST_SPCNDDF BIT(1)
+#define BST_NACKDF BIT(4)
+#define BST_TENDF BIT(8)
+
+#define BSTE 0x1d4
+#define BSTE_STCNDDE BIT(0)
+#define BSTE_SPCNDDE BIT(1)
+#define BSTE_NACKDE BIT(4)
+#define BSTE_TENDE BIT(8)
+#define BSTE_ALE BIT(16)
+#define BSTE_TODE BIT(20)
+#define BSTE_WUCNDDE BIT(24)
+#define BSTE_ALL_FLAG (BSTE_STCNDDE | BSTE_SPCNDDE |\
+ BSTE_NACKDE | BSTE_TENDE |\
+ BSTE_ALE | BSTE_TODE | BSTE_WUCNDDE)
+
+#define BIE 0x1d8
+#define BIE_STCNDDIE BIT(0)
+#define BIE_SPCNDDIE BIT(1)
+#define BIE_NACKDIE BIT(4)
+#define BIE_TENDIE BIT(8)
+
+#define NTST 0x1e0
+#define NTST_TDBEF0 BIT(0)
+#define NTST_RDBFF0 BIT(1)
+#define NTST_CMDQEF BIT(3)
+#define NTST_RSPQFF BIT(4)
+#define NTST_TABTF BIT(5)
+#define NTST_TEF BIT(9)
+
+#define NTSTE 0x1e4
+#define NTSTE_TDBEE0 BIT(0)
+#define NTSTE_RDBFE0 BIT(1)
+#define NTSTE_IBIQEFE BIT(2)
+#define NTSTE_CMDQEE BIT(3)
+#define NTSTE_RSPQFE BIT(4)
+#define NTSTE_TABTE BIT(5)
+#define NTSTE_TEE BIT(9)
+#define NTSTE_RSQFE BIT(20)
+#define NTSTE_ALL_FLAG (NTSTE_TDBEE0 | NTSTE_RDBFE0 |\
+ NTSTE_IBIQEFE | NTSTE_CMDQEE |\
+ NTSTE_RSPQFE | NTSTE_TABTE |\
+ NTSTE_TEE | NTSTE_RSQFE)
+
+#define NTIE 0x1e8
+#define NTIE_TDBEIE0 BIT(0)
+#define NTIE_RDBFIE0 BIT(1)
+#define NTIE_IBIQEFIE BIT(2)
+#define NTIE_RSPQFIE BIT(4)
+#define NTIE_RSQFIE BIT(20)
+
+#define BCST 0x210
+#define BCST_BFREF BIT(0)
+
+#define DATBAS(x) (0x224 + 0x8 * (x))
+#define DATBAS_DVSTAD(x) FIELD_PREP(GENMASK(6, 0), x)
+#define DATBAS_DVDYAD(x) FIELD_PREP(GENMASK(23, 16), x)
+
+#define NDBSTLV0 0x398
+#define NDBSTLV0_RDBLV(x) FIELD_GET(GENMASK(15, 8), x)
+
+#define RENESAS_I3C_MAX_DEVS 8
+#define I2C_INIT_MSG -1
+
+enum i3c_internal_state {
+ I3C_INTERNAL_STATE_DISABLED,
+ I3C_INTERNAL_STATE_CONTROLLER_IDLE,
+ I3C_INTERNAL_STATE_CONTROLLER_ENTDAA,
+ I3C_INTERNAL_STATE_CONTROLLER_SETDASA,
+ I3C_INTERNAL_STATE_CONTROLLER_WRITE,
+ I3C_INTERNAL_STATE_CONTROLLER_READ,
+ I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE,
+ I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ,
+};
+
+enum renesas_i3c_event {
+ I3C_COMMAND_ADDRESS_ASSIGNMENT,
+ I3C_WRITE,
+ I3C_READ,
+ I3C_COMMAND_WRITE,
+ I3C_COMMAND_READ,
+};
+
+struct renesas_i3c_cmd {
+ u32 cmd0;
+ u32 len;
+ const void *tx_buf;
+ u32 tx_count;
+ void *rx_buf;
+ u32 rx_count;
+ u32 err;
+ u8 rnw;
+ /* i2c xfer */
+ int i2c_bytes_left;
+ int i2c_is_last;
+ u8 *i2c_buf;
+ const struct i2c_msg *msg;
+};
+
+struct renesas_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ bool is_i2c_xfer;
+ unsigned int ncmds;
+ struct renesas_i3c_cmd cmds[] __counted_by(ncmds);
+};
+
+struct renesas_i3c_xferqueue {
+ struct list_head list;
+ struct renesas_i3c_xfer *cur;
+ /* Lock for accessing the xfer queue */
+ spinlock_t lock;
+};
+
+struct renesas_i3c {
+ struct i3c_master_controller base;
+ enum i3c_internal_state internal_state;
+ u16 maxdevs;
+ u32 free_pos;
+ u32 i2c_STDBR;
+ u32 i3c_STDBR;
+ u8 addrs[RENESAS_I3C_MAX_DEVS];
+ struct renesas_i3c_xferqueue xferqueue;
+ void __iomem *regs;
+ struct clk *tclk;
+};
+
+struct renesas_i3c_i2c_dev_data {
+ u8 index;
+};
+
+struct renesas_i3c_irq_desc {
+ const char *name;
+ irq_handler_t isr;
+ const char *desc;
+};
+
+struct renesas_i3c_config {
+ unsigned int has_pclkrw:1;
+};
+
+static inline void renesas_i3c_reg_update(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 data = readl(reg);
+
+ data &= ~mask;
+ data |= (val & mask);
+ writel(data, reg);
+}
+
+static inline u32 renesas_readl(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void renesas_writel(void __iomem *base, u32 reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static void renesas_set_bit(void __iomem *base, u32 reg, u32 val)
+{
+ renesas_i3c_reg_update(base + reg, val, val);
+}
+
+static void renesas_clear_bit(void __iomem *base, u32 reg, u32 val)
+{
+ renesas_i3c_reg_update(base + reg, val, 0);
+}
+
+static inline struct renesas_i3c *to_renesas_i3c(struct i3c_master_controller *m)
+{
+ return container_of(m, struct renesas_i3c, base);
+}
+
+static inline u32 datbas_dvdyad_with_parity(u8 addr)
+{
+ return DATBAS_DVDYAD(addr | (parity8(addr) ? 0 : BIT(7)));
+}
+
+static int renesas_i3c_get_free_pos(struct renesas_i3c *i3c)
+{
+ if (!(i3c->free_pos & GENMASK(i3c->maxdevs - 1, 0)))
+ return -ENOSPC;
+
+ return ffs(i3c->free_pos) - 1;
+}
+
+static int renesas_i3c_get_addr_pos(struct renesas_i3c *i3c, u8 addr)
+{
+ int pos;
+
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (addr == i3c->addrs[pos])
+ return pos;
+ }
+
+ return -EINVAL;
+}
+
+static struct renesas_i3c_xfer *renesas_i3c_alloc_xfer(struct renesas_i3c *i3c,
+ unsigned int ncmds)
+{
+ struct renesas_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void renesas_i3c_start_xfer_locked(struct renesas_i3c *i3c)
+{
+ struct renesas_i3c_xfer *xfer = i3c->xferqueue.cur;
+ struct renesas_i3c_cmd *cmd;
+ u32 cmd1;
+
+ if (!xfer)
+ return;
+
+ cmd = xfer->cmds;
+
+ switch (i3c->internal_state) {
+ case I3C_INTERNAL_STATE_CONTROLLER_ENTDAA:
+ case I3C_INTERNAL_STATE_CONTROLLER_SETDASA:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RSPQFIE);
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, 0);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_WRITE:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RSPQFIE);
+ if (cmd->len <= 4) {
+ cmd->cmd0 |= NCMDQP_CMD_ATTR(NCMDQP_IMMED_XFER);
+ cmd->cmd0 |= NCMDQP_BYTE_CNT(cmd->len);
+ cmd->tx_count = cmd->len;
+ cmd1 = cmd->len == 0 ? 0 : *(u32 *)cmd->tx_buf;
+ } else {
+ cmd1 = NCMDQP_DATA_LENGTH(cmd->len);
+ }
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, cmd1);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_READ:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ:
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ cmd1 = NCMDQP_DATA_LENGTH(cmd->len);
+ renesas_writel(i3c->regs, NCMDQP, cmd->cmd0);
+ renesas_writel(i3c->regs, NCMDQP, cmd1);
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the command queue empty flag */
+ renesas_clear_bit(i3c->regs, NTST, NTST_CMDQEF);
+}
+
+static void renesas_i3c_dequeue_xfer_locked(struct renesas_i3c *i3c,
+ struct renesas_i3c_xfer *xfer)
+{
+ if (i3c->xferqueue.cur == xfer)
+ i3c->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+}
+
+static void renesas_i3c_dequeue_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ scoped_guard(spinlock_irqsave, &i3c->xferqueue.lock)
+ renesas_i3c_dequeue_xfer_locked(i3c, xfer);
+}
+
+static void renesas_i3c_enqueue_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ reinit_completion(&xfer->comp);
+ scoped_guard(spinlock_irqsave, &i3c->xferqueue.lock) {
+ if (i3c->xferqueue.cur) {
+ list_add_tail(&xfer->node, &i3c->xferqueue.list);
+ } else {
+ i3c->xferqueue.cur = xfer;
+ if (!xfer->is_i2c_xfer)
+ renesas_i3c_start_xfer_locked(i3c);
+ }
+ }
+}
+
+static void renesas_i3c_wait_xfer(struct renesas_i3c *i3c, struct renesas_i3c_xfer *xfer)
+{
+ unsigned long time_left;
+
+ renesas_i3c_enqueue_xfer(i3c, xfer);
+
+ time_left = wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000));
+ if (!time_left)
+ renesas_i3c_dequeue_xfer(i3c, xfer);
+}
+
+static void renesas_i3c_set_prts(struct renesas_i3c *i3c, u32 val)
+{
+ /* Required sequence according to tnrza0140ae */
+ renesas_set_bit(i3c->regs, RSTCTL, RSTCTL_INTLRST);
+ renesas_writel(i3c->regs, PRTS, val);
+ renesas_clear_bit(i3c->regs, RSTCTL, RSTCTL_INTLRST);
+}
+
+static void renesas_i3c_bus_enable(struct i3c_master_controller *m, bool i3c_mode)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ /* Setup either I3C or I2C protocol */
+ if (i3c_mode) {
+ renesas_i3c_set_prts(i3c, 0);
+ /* Revisit: INCBA handling, especially after I2C transfers */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_HJACKCTL | BCTL_INCBA);
+ renesas_set_bit(i3c->regs, MSDVAD, MSDVAD_MDYADV);
+ renesas_writel(i3c->regs, STDBR, i3c->i3c_STDBR);
+ } else {
+ renesas_i3c_set_prts(i3c, PRTS_PRTMD);
+ renesas_writel(i3c->regs, STDBR, i3c->i2c_STDBR);
+ }
+
+ /* Enable I3C bus */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_BUSE);
+}
+
+static int renesas_i3c_reset(struct renesas_i3c *i3c)
+{
+ u32 val;
+
+ renesas_writel(i3c->regs, BCTL, 0);
+ renesas_set_bit(i3c->regs, RSTCTL, RSTCTL_RI3CRST);
+
+ return read_poll_timeout(renesas_readl, val, !(val & RSTCTL_RI3CRST),
+ 0, 1000, false, i3c->regs, RSTCTL);
+}
+
+static int renesas_i3c_bus_init(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = {};
+ struct i2c_timings t;
+ unsigned long rate;
+ u32 double_SBR, val;
+ int cks, pp_high_ticks, pp_low_ticks, i3c_total_ticks;
+ int od_high_ticks, od_low_ticks, i2c_total_ticks;
+ int ret;
+
+ rate = clk_get_rate(i3c->tclk);
+ if (!rate)
+ return -EINVAL;
+
+ ret = renesas_i3c_reset(i3c);
+ if (ret)
+ return ret;
+
+ i2c_total_ticks = DIV_ROUND_UP(rate, bus->scl_rate.i2c);
+ i3c_total_ticks = DIV_ROUND_UP(rate, bus->scl_rate.i3c);
+
+ i2c_parse_fw_timings(&m->dev, &t, true);
+
+ for (cks = 0; cks < 7; cks++) {
+ /* SCL low-period calculation in Open-drain mode */
+ od_low_ticks = ((i2c_total_ticks * 6) / 10);
+
+ /* SCL clock calculation in Push-Pull mode */
+ if (bus->mode == I3C_BUS_MODE_PURE)
+ pp_high_ticks = ((i3c_total_ticks * 5) / 10);
+ else
+ pp_high_ticks = DIV_ROUND_UP(I3C_BUS_THIGH_MIXED_MAX_NS,
+ NSEC_PER_SEC / rate);
+ pp_low_ticks = i3c_total_ticks - pp_high_ticks;
+
+ if ((od_low_ticks / 2) <= 0xFF && pp_low_ticks < 0x3F)
+ break;
+
+ i2c_total_ticks /= 2;
+ i3c_total_ticks /= 2;
+ rate /= 2;
+ }
+
+ /* SCL clock period calculation in Open-drain mode */
+ if ((od_low_ticks / 2) > 0xFF || pp_low_ticks > 0x3F) {
+ dev_err(&m->dev, "invalid speed (i2c-scl = %lu Hz, i3c-scl = %lu Hz). Too slow.\n",
+ (unsigned long)bus->scl_rate.i2c, (unsigned long)bus->scl_rate.i3c);
+ return -EINVAL;
+ }
+
+ /* SCL high-period calculation in Open-drain mode */
+ od_high_ticks = i2c_total_ticks - od_low_ticks;
+
+ /* Standard Bit Rate setting */
+ double_SBR = od_low_ticks > 0xFF ? 1 : 0;
+ i3c->i3c_STDBR = (double_SBR ? STDBR_DSBRPO : 0) |
+ STDBR_SBRLO(double_SBR, od_low_ticks) |
+ STDBR_SBRHO(double_SBR, od_high_ticks) |
+ STDBR_SBRLP(pp_low_ticks) |
+ STDBR_SBRHP(pp_high_ticks);
+
+ od_low_ticks -= t.scl_fall_ns / (NSEC_PER_SEC / rate) + 1;
+ od_high_ticks -= t.scl_rise_ns / (NSEC_PER_SEC / rate) + 1;
+ i3c->i2c_STDBR = (double_SBR ? STDBR_DSBRPO : 0) |
+ STDBR_SBRLO(double_SBR, od_low_ticks) |
+ STDBR_SBRHO(double_SBR, od_high_ticks) |
+ STDBR_SBRLP(pp_low_ticks) |
+ STDBR_SBRHP(pp_high_ticks);
+ renesas_writel(i3c->regs, STDBR, i3c->i3c_STDBR);
+
+ /* Extended Bit Rate setting */
+ renesas_writel(i3c->regs, EXTBR, EXTBR_EBRLO(od_low_ticks) |
+ EXTBR_EBRHO(od_high_ticks) |
+ EXTBR_EBRLP(pp_low_ticks) |
+ EXTBR_EBRHP(pp_high_ticks));
+
+ renesas_writel(i3c->regs, REFCKCTL, REFCKCTL_IREFCKS(cks));
+
+ /* Disable Slave Mode */
+ renesas_writel(i3c->regs, SVCTL, 0);
+
+ /* Initialize Queue/Buffer threshold */
+ renesas_writel(i3c->regs, NQTHCTL, NQTHCTL_IBIDSSZ(6) |
+ NQTHCTL_CMDQTH(1));
+
+ /* The only supported configuration is two entries*/
+ renesas_writel(i3c->regs, NTBTHCTL0, 0);
+ /* Interrupt when there is one entry in the queue */
+ renesas_writel(i3c->regs, NRQTHCTL, 0);
+
+ /* Enable all Bus/Transfer Status Flags */
+ renesas_writel(i3c->regs, BSTE, BSTE_ALL_FLAG);
+ renesas_writel(i3c->regs, NTSTE, NTSTE_ALL_FLAG);
+
+ /* Interrupt enable settings */
+ renesas_writel(i3c->regs, BIE, BIE_NACKDIE | BIE_TENDIE);
+ renesas_writel(i3c->regs, NTIE, 0);
+
+ /* Clear Status register */
+ renesas_writel(i3c->regs, NTST, 0);
+ renesas_writel(i3c->regs, INST, 0);
+ renesas_writel(i3c->regs, BST, 0);
+
+ /* Hot-Join Acknowlege setting. */
+ renesas_set_bit(i3c->regs, BCTL, BCTL_HJACKCTL);
+
+ renesas_writel(i3c->regs, IBINCTL, IBINCTL_NRHJCTL | IBINCTL_NRMRCTL |
+ IBINCTL_NRSIRCTL);
+
+ renesas_writel(i3c->regs, SCSTLCTL, 0);
+ renesas_set_bit(i3c->regs, SCSTRCTL, SCSTRCTL_ACKTWE);
+
+ /* Bus condition timing */
+ val = DIV_ROUND_UP(I3C_BUS_TBUF_MIXED_FM_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BFRECDT, BFRECDT_FRECYC(val));
+
+ val = DIV_ROUND_UP(I3C_BUS_TAVAL_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BAVLCDT, BAVLCDT_AVLCYC(val));
+
+ val = DIV_ROUND_UP(I3C_BUS_TIDLE_MIN_NS, NSEC_PER_SEC / rate);
+ renesas_writel(i3c->regs, BIDLCDT, BIDLCDT_IDLCYC(val));
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ renesas_writel(i3c->regs, MSDVAD, MSDVAD_MDYAD(ret) | MSDVAD_MDYADV);
+
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+ return i3c_master_set_info(&i3c->base, &info);
+}
+
+static void renesas_i3c_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ renesas_i3c_reset(i3c);
+}
+
+static int renesas_i3c_daa(struct i3c_master_controller *m)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_cmd *cmd;
+ u32 olddevs, newdevs;
+ u8 last_addr = 0, pos;
+ int ret;
+
+ struct renesas_i3c_xfer *xfer __free(kfree) = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ /* Enable I3C bus. */
+ renesas_i3c_bus_enable(m, true);
+
+ olddevs = ~(i3c->free_pos);
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_ENTDAA;
+
+ /* Setting DATBASn registers for target devices. */
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (olddevs & BIT(pos))
+ continue;
+
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ i3c->addrs[pos] = ret;
+ last_addr = ret;
+
+ renesas_writel(i3c->regs, DATBAS(pos), datbas_dvdyad_with_parity(ret));
+ }
+
+ init_completion(&xfer->comp);
+ cmd = xfer->cmds;
+ cmd->rx_count = 0;
+
+ ret = renesas_i3c_get_free_pos(i3c);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Setup the command descriptor to start the ENTDAA command
+ * and starting at the selected device index.
+ */
+ cmd->cmd0 = NCMDQP_CMD_ATTR(NCMDQP_ADDR_ASSGN) | NCMDQP_ROC |
+ NCMDQP_TID(I3C_COMMAND_ADDRESS_ASSIGNMENT) |
+ NCMDQP_CMD(I3C_CCC_ENTDAA) | NCMDQP_DEV_INDEX(ret) |
+ NCMDQP_DEV_COUNT(i3c->maxdevs - ret) | NCMDQP_TOC;
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+
+ newdevs = GENMASK(i3c->maxdevs - cmd->rx_count - 1, 0);
+ newdevs &= ~olddevs;
+
+ for (pos = 0; pos < i3c->maxdevs; pos++) {
+ if (newdevs & BIT(pos))
+ i3c_master_add_i3c_dev_locked(m, i3c->addrs[pos]);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static bool renesas_i3c_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(1, true):
+ case I3C_CCC_ENTAS(2, true):
+ case I3C_CCC_ENTAS(3, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_ENTAS(1, false):
+ case I3C_CCC_ENTAS(2, false):
+ case I3C_CCC_ENTAS(3, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_DEFSLVS:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_ENTTM:
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETACCMST:
+ case I3C_CCC_GETMXDS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int renesas_i3c_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ int ret, pos = 0;
+
+ if (ccc->id & I3C_CCC_DIRECT) {
+ pos = renesas_i3c_get_addr_pos(i3c, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+ }
+
+ xfer = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ renesas_i3c_bus_enable(m, true);
+
+ init_completion(&xfer->comp);
+ cmd = xfer->cmds;
+ cmd->rnw = ccc->rnw;
+ cmd->cmd0 = 0;
+
+ /* Calculate the command descriptor. */
+ switch (ccc->id) {
+ case I3C_CCC_SETDASA:
+ renesas_writel(i3c->regs, DATBAS(pos),
+ DATBAS_DVSTAD(ccc->dests[0].addr) |
+ DATBAS_DVDYAD(*(u8 *)ccc->dests[0].payload.data >> 1));
+ cmd->cmd0 = NCMDQP_CMD_ATTR(NCMDQP_ADDR_ASSGN) | NCMDQP_ROC |
+ NCMDQP_TID(I3C_COMMAND_ADDRESS_ASSIGNMENT) |
+ NCMDQP_CMD(I3C_CCC_SETDASA) | NCMDQP_DEV_INDEX(pos) |
+ NCMDQP_DEV_COUNT(0) | NCMDQP_TOC;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_SETDASA;
+ break;
+ default:
+ /* Calculate the command descriptor. */
+ cmd->cmd0 = NCMDQP_TID(I3C_COMMAND_WRITE) | NCMDQP_MODE(0) |
+ NCMDQP_RNW(ccc->rnw) | NCMDQP_CMD(ccc->id) |
+ NCMDQP_ROC | NCMDQP_TOC | NCMDQP_CP |
+ NCMDQP_DEV_INDEX(pos);
+
+ if (ccc->rnw) {
+ cmd->rx_buf = ccc->dests[0].payload.data;
+ cmd->len = ccc->dests[0].payload.len;
+ cmd->rx_count = 0;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ;
+ } else {
+ cmd->tx_buf = ccc->dests[0].payload.data;
+ cmd->len = ccc->dests[0].payload.len;
+ cmd->tx_count = 0;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE;
+ }
+ }
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+
+ ret = xfer->ret;
+ if (ret)
+ ccc->err = I3C_ERROR_M2;
+
+ kfree(xfer);
+
+ return ret;
+}
+
+static int renesas_i3c_priv_xfers(struct i3c_dev_desc *dev, struct i3c_priv_xfer *i3c_xfers,
+ int i3c_nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct renesas_i3c_xfer *xfer;
+ int i;
+
+ /* Enable I3C bus. */
+ renesas_i3c_bus_enable(m, true);
+
+ xfer = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ init_completion(&xfer->comp);
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct renesas_i3c_cmd *cmd = xfer->cmds;
+
+ /* Calculate the Transfer Command Descriptor */
+ cmd->rnw = i3c_xfers[i].rnw;
+ cmd->cmd0 = NCMDQP_DEV_INDEX(data->index) | NCMDQP_MODE(0) |
+ NCMDQP_RNW(cmd->rnw) | NCMDQP_ROC | NCMDQP_TOC;
+
+ if (i3c_xfers[i].rnw) {
+ cmd->rx_count = 0;
+ cmd->cmd0 |= NCMDQP_TID(I3C_READ);
+ cmd->rx_buf = i3c_xfers[i].data.in;
+ cmd->len = i3c_xfers[i].len;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_READ;
+ } else {
+ cmd->tx_count = 0;
+ cmd->cmd0 |= NCMDQP_TID(I3C_WRITE);
+ cmd->tx_buf = i3c_xfers[i].data.out;
+ cmd->len = i3c_xfers[i].len;
+ i3c->internal_state = I3C_INTERNAL_STATE_CONTROLLER_WRITE;
+ }
+
+ if (!i3c_xfers[i].rnw && i3c_xfers[i].len > 4) {
+ i3c_writel_fifo(i3c->regs + NTDTBP0, cmd->tx_buf, cmd->len);
+ if (cmd->len > NTDTBP0_DEPTH * sizeof(u32))
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ }
+
+ renesas_i3c_wait_xfer(i3c, xfer);
+ }
+
+ return 0;
+}
+
+static int renesas_i3c_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = renesas_i3c_get_free_pos(i3c);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ i3c->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
+ i3c->free_pos &= ~BIT(pos);
+
+ renesas_writel(i3c->regs, DATBAS(pos), DATBAS_DVSTAD(dev->info.static_addr) |
+ datbas_dvdyad_with_parity(i3c->addrs[pos]));
+ i3c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static int renesas_i3c_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
+ dev->info.static_addr;
+
+ return 0;
+}
+
+static void renesas_i3c_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct renesas_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ i3c_dev_set_master_data(dev, NULL);
+ i3c->addrs[data->index] = 0;
+ i3c->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static int renesas_i3c_i2c_xfers(struct i2c_dev_desc *dev,
+ struct i2c_msg *i2c_xfers,
+ int i2c_nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_cmd *cmd;
+ u8 start_bit = CNDCTL_STCND;
+ int i;
+
+ struct renesas_i3c_xfer *xfer __free(kfree) = renesas_i3c_alloc_xfer(i3c, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ if (!i2c_nxfers)
+ return 0;
+
+ renesas_i3c_bus_enable(m, false);
+
+ init_completion(&xfer->comp);
+ xfer->is_i2c_xfer = true;
+ cmd = xfer->cmds;
+
+ if (!(renesas_readl(i3c->regs, BCST) & BCST_BFREF)) {
+ cmd->err = -EBUSY;
+ return cmd->err;
+ }
+
+ renesas_writel(i3c->regs, BST, 0);
+
+ renesas_i3c_enqueue_xfer(i3c, xfer);
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ cmd->i2c_bytes_left = I2C_INIT_MSG;
+ cmd->i2c_buf = i2c_xfers[i].buf;
+ cmd->msg = &i2c_xfers[i];
+ cmd->i2c_is_last = (i == i2c_nxfers - 1);
+
+ renesas_set_bit(i3c->regs, BIE, BIE_NACKDIE);
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, BIE, BIE_STCNDDIE);
+
+ /* Issue Start condition */
+ renesas_set_bit(i3c->regs, CNDCTL, start_bit);
+
+ renesas_set_bit(i3c->regs, NTSTE, NTSTE_TDBEE0);
+
+ wait_for_completion_timeout(&xfer->comp, m->i2c.timeout);
+
+ if (cmd->err)
+ break;
+
+ start_bit = CNDCTL_SRCND;
+ }
+
+ renesas_i3c_dequeue_xfer(i3c, xfer);
+ return cmd->err;
+}
+
+static int renesas_i3c_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+ struct renesas_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = renesas_i3c_get_free_pos(i3c);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ i3c->addrs[pos] = dev->addr;
+ i3c->free_pos &= ~BIT(pos);
+ i2c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static void renesas_i3c_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct renesas_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct renesas_i3c *i3c = to_renesas_i3c(m);
+
+ i2c_dev_set_master_data(dev, NULL);
+ i3c->addrs[data->index] = 0;
+ i3c->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static irqreturn_t renesas_i3c_tx_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u8 val;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left != I2C_INIT_MSG) {
+ val = *cmd->i2c_buf;
+ cmd->i2c_buf++;
+ cmd->i2c_bytes_left--;
+ renesas_writel(i3c->regs, NTDTBP0, val);
+ }
+
+ if (cmd->i2c_bytes_left == 0) {
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, BIE, BIE_TENDIE);
+ }
+
+ /* Clear the Transmit Buffer Empty status flag. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_TDBEF0);
+ } else {
+ i3c_writel_fifo(i3c->regs + NTDTBP0, cmd->tx_buf, cmd->len);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_resp_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u32 resp_descriptor = renesas_readl(i3c->regs, NRSPQP);
+ u32 bytes_remaining = 0;
+ u32 ntst, data_len;
+ int ret = 0;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ /* Clear the Respone Queue Full status flag*/
+ renesas_clear_bit(i3c->regs, NTST, NTST_RSPQFF);
+
+ data_len = NRSPQP_DATA_LEN(resp_descriptor);
+
+ switch (i3c->internal_state) {
+ case I3C_INTERNAL_STATE_CONTROLLER_ENTDAA:
+ cmd->rx_count = data_len;
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_WRITE:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_WRITE:
+ /* Disable the transmit IRQ if it hasn't been disabled already. */
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ break;
+ case I3C_INTERNAL_STATE_CONTROLLER_READ:
+ case I3C_INTERNAL_STATE_CONTROLLER_COMMAND_READ:
+ if (NDBSTLV0_RDBLV(renesas_readl(i3c->regs, NDBSTLV0)) && !cmd->err)
+ bytes_remaining = data_len - cmd->rx_count;
+
+ i3c_readl_fifo(i3c->regs + NTDTBP0, cmd->rx_buf, bytes_remaining);
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ break;
+ default:
+ break;
+ }
+
+ switch (NRSPQP_ERR_STATUS(resp_descriptor)) {
+ case NRSPQP_NO_ERROR:
+ break;
+ case NRSPQP_ERROR_PARITY:
+ case NRSPQP_ERROR_IBA_NACK:
+ case NRSPQP_ERROR_TRANSF_ABORT:
+ case NRSPQP_ERROR_CRC:
+ case NRSPQP_ERROR_FRAME:
+ ret = -EIO;
+ break;
+ case NRSPQP_ERROR_OVER_UNDER_FLOW:
+ ret = -ENOSPC;
+ break;
+ case NRSPQP_ERROR_UNSUPPORTED:
+ ret = -EOPNOTSUPP;
+ break;
+ case NRSPQP_ERROR_I2C_W_NACK_ERR:
+ case NRSPQP_ERROR_ADDRESS_NACK:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ /*
+ * If the transfer was aborted, then the abort flag must be cleared
+ * before notifying the application that a transfer has completed.
+ */
+ ntst = renesas_readl(i3c->regs, NTST);
+ if (ntst & NTST_TABTF)
+ renesas_clear_bit(i3c->regs, BCTL, BCTL_ABT);
+
+ /* Clear error status flags. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_TEF | NTST_TABTF);
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&i3c->xferqueue.list,
+ struct renesas_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ i3c->xferqueue.cur = xfer;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_tend_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (renesas_readl(i3c->regs, BST) & BST_NACKDF) {
+ /* We got a NACKIE */
+ renesas_readl(i3c->regs, NTDTBP0); /* dummy read */
+ renesas_clear_bit(i3c->regs, BST, BST_NACKDF);
+ cmd->err = -ENXIO;
+ } else if (cmd->i2c_bytes_left) {
+ renesas_set_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ return IRQ_NONE;
+ }
+
+ if (cmd->i2c_is_last || cmd->err) {
+ renesas_clear_bit(i3c->regs, BIE, BIE_TENDIE);
+ renesas_set_bit(i3c->regs, BIE, BIE_SPCNDDIE);
+ renesas_set_bit(i3c->regs, CNDCTL, CNDCTL_SPCND);
+ } else {
+ /* Transfer is complete, but do not send STOP */
+ renesas_clear_bit(i3c->regs, NTSTE, NTSTE_TDBEE0);
+ renesas_clear_bit(i3c->regs, BIE, BIE_TENDIE);
+ xfer->ret = 0;
+ complete(&xfer->comp);
+ }
+ }
+
+ /* Clear the Transmit Buffer Empty status flag. */
+ renesas_clear_bit(i3c->regs, BST, BST_TENDF);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_rx_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ int read_bytes;
+
+ /* If resp_isr already read the data and updated 'xfer', we can just leave */
+ if (!(renesas_readl(i3c->regs, NTIE) & NTIE_RDBFIE0))
+ return IRQ_NONE;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left == I2C_INIT_MSG) {
+ cmd->i2c_bytes_left = cmd->msg->len;
+ renesas_set_bit(i3c->regs, SCSTRCTL, SCSTRCTL_RWE);
+ renesas_readl(i3c->regs, NTDTBP0); /* dummy read */
+ if (cmd->i2c_bytes_left == 1)
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKT | ACKCTL_ACKTWP);
+ return IRQ_HANDLED;
+ }
+
+ if (cmd->i2c_bytes_left == 1) {
+ /* STOP must come before we set ACKCTL! */
+ if (cmd->i2c_is_last) {
+ renesas_set_bit(i3c->regs, BIE, BIE_SPCNDDIE);
+ renesas_clear_bit(i3c->regs, BST, BST_SPCNDDF);
+ renesas_set_bit(i3c->regs, CNDCTL, CNDCTL_SPCND);
+ }
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKT | ACKCTL_ACKTWP);
+ } else {
+ renesas_writel(i3c->regs, ACKCTL, ACKCTL_ACKTWP);
+ }
+
+ /* Reading acks the RIE interrupt */
+ *cmd->i2c_buf = renesas_readl(i3c->regs, NTDTBP0);
+ cmd->i2c_buf++;
+ cmd->i2c_bytes_left--;
+ } else {
+ read_bytes = NDBSTLV0_RDBLV(renesas_readl(i3c->regs, NDBSTLV0)) * sizeof(u32);
+ i3c_readl_fifo(i3c->regs + NTDTBP0, cmd->rx_buf, read_bytes);
+ cmd->rx_count = read_bytes;
+ }
+
+ /* Clear the Read Buffer Full status flag. */
+ renesas_clear_bit(i3c->regs, NTST, NTST_RDBFF0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_stop_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+
+ /* read back registers to confirm writes have fully propagated */
+ renesas_writel(i3c->regs, BST, 0);
+ renesas_readl(i3c->regs, BST);
+ renesas_writel(i3c->regs, BIE, 0);
+ renesas_clear_bit(i3c->regs, NTST, NTST_TDBEF0 | NTST_RDBFF0);
+ renesas_clear_bit(i3c->regs, SCSTRCTL, SCSTRCTL_RWE);
+
+ xfer->ret = 0;
+ complete(&xfer->comp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t renesas_i3c_start_isr(int irq, void *data)
+{
+ struct renesas_i3c *i3c = data;
+ struct renesas_i3c_xfer *xfer;
+ struct renesas_i3c_cmd *cmd;
+ u8 val;
+
+ scoped_guard(spinlock, &i3c->xferqueue.lock) {
+ xfer = i3c->xferqueue.cur;
+ cmd = xfer->cmds;
+
+ if (xfer->is_i2c_xfer) {
+ if (!cmd->i2c_bytes_left)
+ return IRQ_NONE;
+
+ if (cmd->i2c_bytes_left == I2C_INIT_MSG) {
+ if (cmd->msg->flags & I2C_M_RD) {
+ /* On read, switch over to receive interrupt */
+ renesas_clear_bit(i3c->regs, NTIE, NTIE_TDBEIE0);
+ renesas_set_bit(i3c->regs, NTIE, NTIE_RDBFIE0);
+ } else {
+ /* On write, initialize length */
+ cmd->i2c_bytes_left = cmd->msg->len;
+ }
+
+ val = i2c_8bit_addr_from_msg(cmd->msg);
+ renesas_writel(i3c->regs, NTDTBP0, val);
+ }
+ }
+
+ renesas_clear_bit(i3c->regs, BIE, BIE_STCNDDIE);
+ renesas_clear_bit(i3c->regs, BST, BST_STCNDDF);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct i3c_master_controller_ops renesas_i3c_ops = {
+ .bus_init = renesas_i3c_bus_init,
+ .bus_cleanup = renesas_i3c_bus_cleanup,
+ .attach_i3c_dev = renesas_i3c_attach_i3c_dev,
+ .reattach_i3c_dev = renesas_i3c_reattach_i3c_dev,
+ .detach_i3c_dev = renesas_i3c_detach_i3c_dev,
+ .do_daa = renesas_i3c_daa,
+ .supports_ccc_cmd = renesas_i3c_supports_ccc_cmd,
+ .send_ccc_cmd = renesas_i3c_send_ccc_cmd,
+ .priv_xfers = renesas_i3c_priv_xfers,
+ .attach_i2c_dev = renesas_i3c_attach_i2c_dev,
+ .detach_i2c_dev = renesas_i3c_detach_i2c_dev,
+ .i2c_xfers = renesas_i3c_i2c_xfers,
+};
+
+static const struct renesas_i3c_irq_desc renesas_i3c_irqs[] = {
+ { .name = "resp", .isr = renesas_i3c_resp_isr, .desc = "i3c-resp" },
+ { .name = "rx", .isr = renesas_i3c_rx_isr, .desc = "i3c-rx" },
+ { .name = "tx", .isr = renesas_i3c_tx_isr, .desc = "i3c-tx" },
+ { .name = "st", .isr = renesas_i3c_start_isr, .desc = "i3c-start" },
+ { .name = "sp", .isr = renesas_i3c_stop_isr, .desc = "i3c-stop" },
+ { .name = "tend", .isr = renesas_i3c_tend_isr, .desc = "i3c-tend" },
+ { .name = "nack", .isr = renesas_i3c_tend_isr, .desc = "i3c-nack" },
+};
+
+static int renesas_i3c_probe(struct platform_device *pdev)
+{
+ struct renesas_i3c *i3c;
+ struct reset_control *reset;
+ struct clk *clk;
+ const struct renesas_i3c_config *config = of_device_get_match_data(&pdev->dev);
+ int ret, i;
+
+ if (!config)
+ return -ENODATA;
+
+ i3c = devm_kzalloc(&pdev->dev, sizeof(*i3c), GFP_KERNEL);
+ if (!i3c)
+ return -ENOMEM;
+
+ i3c->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(i3c->regs))
+ return PTR_ERR(i3c->regs);
+
+ clk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (config->has_pclkrw) {
+ clk = devm_clk_get_enabled(&pdev->dev, "pclkrw");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ i3c->tclk = devm_clk_get_enabled(&pdev->dev, "tclk");
+ if (IS_ERR(i3c->tclk))
+ return PTR_ERR(i3c->tclk);
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, "tresetn");
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+ "Error: missing tresetn ctrl\n");
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, "presetn");
+ if (IS_ERR(reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+ "Error: missing presetn ctrl\n");
+
+ spin_lock_init(&i3c->xferqueue.lock);
+ INIT_LIST_HEAD(&i3c->xferqueue.list);
+
+ ret = renesas_i3c_reset(i3c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(renesas_i3c_irqs); i++) {
+ ret = platform_get_irq_byname(pdev, renesas_i3c_irqs[i].name);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, ret, renesas_i3c_irqs[i].isr,
+ 0, renesas_i3c_irqs[i].desc, i3c);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, i3c);
+
+ i3c->maxdevs = RENESAS_I3C_MAX_DEVS;
+ i3c->free_pos = GENMASK(i3c->maxdevs - 1, 0);
+
+ return i3c_master_register(&i3c->base, &pdev->dev, &renesas_i3c_ops, false);
+}
+
+static void renesas_i3c_remove(struct platform_device *pdev)
+{
+ struct renesas_i3c *i3c = platform_get_drvdata(pdev);
+
+ i3c_master_unregister(&i3c->base);
+}
+
+static const struct renesas_i3c_config empty_i3c_config = {
+};
+
+static const struct renesas_i3c_config r9a09g047_i3c_config = {
+ .has_pclkrw = 1,
+};
+
+static const struct of_device_id renesas_i3c_of_ids[] = {
+ { .compatible = "renesas,r9a08g045-i3c", .data = &empty_i3c_config },
+ { .compatible = "renesas,r9a09g047-i3c", .data = &r9a09g047_i3c_config },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, renesas_i3c_of_ids);
+
+static struct platform_driver renesas_i3c = {
+ .probe = renesas_i3c_probe,
+ .remove = renesas_i3c_remove,
+ .driver = {
+ .name = "renesas-i3c",
+ .of_match_table = renesas_i3c_of_ids,
+ },
+};
+module_platform_driver(renesas_i3c);
+
+MODULE_AUTHOR("Wolfram Sang <wsa+renesas@sang-engineering.com>");
+MODULE_AUTHOR("Renesas BSP teams");
+MODULE_DESCRIPTION("Renesas I3C controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 7e1a7cb94b43..701ae165b25b 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -104,6 +104,7 @@
#define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
#define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
#define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
+#define SVC_I3C_MDATACTRL_TXCOUNT(x) FIELD_GET(GENMASK(20, 16), (x))
#define SVC_I3C_MDATACTRL_TXFULL BIT(30)
#define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
@@ -664,7 +665,6 @@ static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
}
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -779,7 +779,6 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
goto rpm_out;
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -801,7 +800,6 @@ static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
/* Disable master */
writel(0, master->regs + SVC_I3C_MCONFIG);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
@@ -1207,7 +1205,6 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
dev_err(master->dev, "Cannot handle such a list of devices");
rpm_out:
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -1304,14 +1301,19 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
* FIFO start filling as soon as possible after EmitStartAddr.
*/
if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
- u32 end = xfer_len > SVC_I3C_FIFO_SIZE ? 0 : SVC_I3C_MWDATAB_END;
- u32 len = min_t(u32, xfer_len, SVC_I3C_FIFO_SIZE);
-
- writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
- /* Mark END bit if this is the last byte */
- writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
- xfer_len -= len;
- out += len;
+ u32 space, end, len;
+
+ reg = readl(master->regs + SVC_I3C_MDATACTRL);
+ space = SVC_I3C_FIFO_SIZE - SVC_I3C_MDATACTRL_TXCOUNT(reg);
+ if (space) {
+ end = xfer_len > space ? 0 : SVC_I3C_MWDATAB_END;
+ len = min_t(u32, xfer_len, space);
+ writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
+ /* Mark END bit if this is the last byte */
+ writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
+ xfer_len -= len;
+ out += len;
+ }
}
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
@@ -1511,7 +1513,6 @@ static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
}
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
}
@@ -1708,7 +1709,7 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
mutex_lock(&master->lock);
svc_i3c_master_enqueue_xfer(master, xfer);
- if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
svc_i3c_master_dequeue_xfer(master, xfer);
mutex_unlock(&master->lock);
@@ -1801,7 +1802,6 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return ret;
@@ -1834,7 +1834,6 @@ static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
if (!master->enabled_events)
svc_i3c_master_disable_interrupts(master);
- pm_runtime_mark_last_busy(master->dev);
pm_runtime_put_autosuspend(master->dev);
return 0;
@@ -1954,7 +1953,6 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (ret)
goto rpm_disable;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 73747d20df85..91a7b7e7c0c8 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1679,7 +1679,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
};
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
- X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
+ X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL),
{}
};
diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c
index bda370c0f660..8380b237831c 100644
--- a/drivers/iio/accel/sca3300.c
+++ b/drivers/iio/accel/sca3300.c
@@ -477,7 +477,7 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct sca3300_data *data = iio_priv(indio_dev);
int bit, ret, val, i = 0;
- IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX);
+ IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX) = { };
iio_for_each_active_channel(indio_dev, bit) {
ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 6de2abad0197..24f2572c487e 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1300,7 +1300,7 @@ config RN5T618_ADC
config ROHM_BD79124
tristate "Rohm BD79124 ADC driver"
- depends on I2C
+ depends on I2C && GPIOLIB
select REGMAP_I2C
select IIO_ADC_HELPER
help
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 6cf790ff3eb5..dcdb5778f7d6 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -2064,7 +2064,7 @@ static int ad4130_probe(struct spi_device *spi)
st->gc.can_sleep = true;
st->gc.init_valid_mask = ad4130_gpio_init_valid_mask;
st->gc.get_direction = ad4130_gpio_get_direction;
- st->gc.set_rv = ad4130_gpio_set;
+ st->gc.set = ad4130_gpio_set;
ret = devm_gpiochip_add_data(dev, &st->gc, st);
if (ret)
diff --git a/drivers/iio/adc/ad4170-4.c b/drivers/iio/adc/ad4170-4.c
index 6cd84d6fb08b..efaed92191f1 100644
--- a/drivers/iio/adc/ad4170-4.c
+++ b/drivers/iio/adc/ad4170-4.c
@@ -1807,7 +1807,7 @@ static int ad4170_gpio_init(struct iio_dev *indio_dev)
st->gpiochip.direction_input = ad4170_gpio_direction_input;
st->gpiochip.direction_output = ad4170_gpio_direction_output;
st->gpiochip.get = ad4170_gpio_get;
- st->gpiochip.set_rv = ad4170_gpio_set;
+ st->gpiochip.set = ad4170_gpio_set;
st->gpiochip.owner = THIS_MODULE;
return devm_gpiochip_add_data(&st->spi->dev, &st->gpiochip, indio_dev);
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 9808df2e9242..4d8c6bafd1c3 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -849,7 +849,7 @@ enum {
static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan_spec *chan)
{
struct device *dev = &st->sd.spi->dev;
- struct ad7124_channel *ch = &st->channels[chan->channel];
+ struct ad7124_channel *ch = &st->channels[chan->address];
int ret;
if (ch->syscalib_mode == AD7124_SYSCALIB_ZERO_SCALE) {
@@ -865,8 +865,8 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan
if (ret < 0)
return ret;
- dev_dbg(dev, "offset for channel %d after zero-scale calibration: 0x%x\n",
- chan->channel, ch->cfg.calibration_offset);
+ dev_dbg(dev, "offset for channel %lu after zero-scale calibration: 0x%x\n",
+ chan->address, ch->cfg.calibration_offset);
} else {
ch->cfg.calibration_gain = st->gain_default;
@@ -880,8 +880,8 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan
if (ret < 0)
return ret;
- dev_dbg(dev, "gain for channel %d after full-scale calibration: 0x%x\n",
- chan->channel, ch->cfg.calibration_gain);
+ dev_dbg(dev, "gain for channel %lu after full-scale calibration: 0x%x\n",
+ chan->address, ch->cfg.calibration_gain);
}
return 0;
@@ -924,7 +924,7 @@ static int ad7124_set_syscalib_mode(struct iio_dev *indio_dev,
{
struct ad7124_state *st = iio_priv(indio_dev);
- st->channels[chan->channel].syscalib_mode = mode;
+ st->channels[chan->address].syscalib_mode = mode;
return 0;
}
@@ -934,7 +934,7 @@ static int ad7124_get_syscalib_mode(struct iio_dev *indio_dev,
{
struct ad7124_state *st = iio_priv(indio_dev);
- return st->channels[chan->channel].syscalib_mode;
+ return st->channels[chan->address].syscalib_mode;
}
static const struct iio_enum ad7124_syscalib_mode_enum = {
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 4413207be28f..683146e83ab2 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -200,7 +200,7 @@ struct ad7173_channel_config {
/*
* Following fields are used to compare equality. If you
* make adaptations in it, you most likely also have to adapt
- * ad7173_find_live_config(), too.
+ * ad7173_is_setup_equal(), too.
*/
struct_group(config_props,
bool bipolar;
@@ -561,12 +561,19 @@ static void ad7173_reset_usage_cnts(struct ad7173_state *st)
st->config_usage_counter = 0;
}
-static struct ad7173_channel_config *
-ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
+/**
+ * ad7173_is_setup_equal - Compare two channel setups
+ * @cfg1: First channel configuration
+ * @cfg2: Second channel configuration
+ *
+ * Compares all configuration options that affect the registers connected to
+ * SETUP_SEL, namely CONFIGx, FILTERx, GAINx and OFFSETx.
+ *
+ * Returns: true if the setups are identical, false otherwise
+ */
+static bool ad7173_is_setup_equal(const struct ad7173_channel_config *cfg1,
+ const struct ad7173_channel_config *cfg2)
{
- struct ad7173_channel_config *cfg_aux;
- int i;
-
/*
* This is just to make sure that the comparison is adapted after
* struct ad7173_channel_config was changed.
@@ -579,14 +586,22 @@ ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *c
u8 ref_sel;
}));
+ return cfg1->bipolar == cfg2->bipolar &&
+ cfg1->input_buf == cfg2->input_buf &&
+ cfg1->odr == cfg2->odr &&
+ cfg1->ref_sel == cfg2->ref_sel;
+}
+
+static struct ad7173_channel_config *
+ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
+{
+ struct ad7173_channel_config *cfg_aux;
+ int i;
+
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
- if (cfg_aux->live &&
- cfg->bipolar == cfg_aux->bipolar &&
- cfg->input_buf == cfg_aux->input_buf &&
- cfg->odr == cfg_aux->odr &&
- cfg->ref_sel == cfg_aux->ref_sel)
+ if (cfg_aux->live && ad7173_is_setup_equal(cfg, cfg_aux))
return cfg_aux;
}
return NULL;
@@ -1228,7 +1243,7 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct ad7173_state *st = iio_priv(indio_dev);
- int i, ret;
+ int i, j, k, ret;
for (i = 0; i < indio_dev->num_channels; i++) {
if (test_bit(i, scan_mask))
@@ -1239,6 +1254,54 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev,
return ret;
}
+ /*
+ * On some chips, there are more channels that setups, so if there were
+ * more unique setups requested than the number of available slots,
+ * ad7173_set_channel() will have written over some of the slots. We
+ * can detect this by making sure each assigned cfg_slot matches the
+ * requested configuration. If it doesn't, we know that the slot was
+ * overwritten by a different channel.
+ */
+ for_each_set_bit(i, scan_mask, indio_dev->num_channels) {
+ const struct ad7173_channel_config *cfg1, *cfg2;
+
+ cfg1 = &st->channels[i].cfg;
+
+ for_each_set_bit(j, scan_mask, indio_dev->num_channels) {
+ cfg2 = &st->channels[j].cfg;
+
+ /*
+ * Only compare configs that are assigned to the same
+ * SETUP_SEL slot and don't compare channel to itself.
+ */
+ if (i == j || cfg1->cfg_slot != cfg2->cfg_slot)
+ continue;
+
+ /*
+ * If we find two different configs trying to use the
+ * same SETUP_SEL slot, then we know that the that we
+ * have too many unique configurations requested for
+ * the available slots and at least one was overwritten.
+ */
+ if (!ad7173_is_setup_equal(cfg1, cfg2)) {
+ /*
+ * At this point, there isn't a way to tell
+ * which setups are actually programmed in the
+ * ADC anymore, so we could read them back to
+ * see, but it is simpler to just turn off all
+ * of the live flags so that everything gets
+ * reprogramed on the next attempt read a sample.
+ */
+ for (k = 0; k < st->num_channels; k++)
+ st->channels[k].cfg.live = false;
+
+ dev_err(&st->sd.spi->dev,
+ "Too many unique channel configurations requested for scan\n");
+ return -EINVAL;
+ }
+ }
+ }
+
return 0;
}
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index 6f7034b6c266..fa251dc1aae6 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -873,6 +873,7 @@ static const struct ad7380_chip_info adaq4381_4_chip_info = {
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct spi_offload_config ad7380_offload_config = {
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index a2e061f0cb08..ca8fa91796ca 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -673,7 +673,7 @@ static int ad7768_gpio_init(struct iio_dev *indio_dev)
.direction_input = ad7768_gpio_direction_input,
.direction_output = ad7768_gpio_direction_output,
.get = ad7768_gpio_get,
- .set_rv = ad7768_gpio_set,
+ .set = ad7768_gpio_set,
.owner = THIS_MODULE,
};
diff --git a/drivers/iio/adc/rohm-bd79124.c b/drivers/iio/adc/rohm-bd79124.c
index bb7c93ae4055..06c55c8da93f 100644
--- a/drivers/iio/adc/rohm-bd79124.c
+++ b/drivers/iio/adc/rohm-bd79124.c
@@ -246,8 +246,8 @@ static int bd79124_init_valid_mask(struct gpio_chip *gc,
static const struct gpio_chip bd79124gpo_chip = {
.label = "bd79124-gpo",
.get_direction = bd79124gpo_direction_get,
- .set_rv = bd79124gpo_set,
- .set_multiple_rv = bd79124gpo_set_multiple,
+ .set = bd79124gpo_set,
+ .set_multiple = bd79124gpo_set_multiple,
.init_valid_mask = bd79124_init_valid_mask,
.can_sleep = true,
.ngpio = 8,
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index 9674d48074c9..cadb0446bc29 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -89,7 +89,6 @@ struct rzg2l_adc {
struct completion completion;
struct mutex lock;
u16 last_val[RZG2L_ADC_MAX_CHANNELS];
- bool was_rpm_active;
};
/**
@@ -428,6 +427,8 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
adc = iio_priv(indio_dev);
adc->hw_params = device_get_match_data(dev);
@@ -460,8 +461,6 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- platform_set_drvdata(pdev, indio_dev);
-
ret = rzg2l_adc_hw_init(dev, adc);
if (ret)
return dev_err_probe(&pdev->dev, ret,
@@ -541,14 +540,9 @@ static int rzg2l_adc_suspend(struct device *dev)
};
int ret;
- if (pm_runtime_suspended(dev)) {
- adc->was_rpm_active = false;
- } else {
- ret = pm_runtime_force_suspend(dev);
- if (ret)
- return ret;
- adc->was_rpm_active = true;
- }
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
ret = reset_control_bulk_assert(ARRAY_SIZE(resets), resets);
if (ret)
@@ -557,9 +551,7 @@ static int rzg2l_adc_suspend(struct device *dev)
return 0;
rpm_restore:
- if (adc->was_rpm_active)
- pm_runtime_force_resume(dev);
-
+ pm_runtime_force_resume(dev);
return ret;
}
@@ -577,11 +569,9 @@ static int rzg2l_adc_resume(struct device *dev)
if (ret)
return ret;
- if (adc->was_rpm_active) {
- ret = pm_runtime_force_resume(dev);
- if (ret)
- goto resets_restore;
- }
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ goto resets_restore;
ret = rzg2l_adc_hw_init(dev, adc);
if (ret)
@@ -590,10 +580,7 @@ static int rzg2l_adc_resume(struct device *dev)
return 0;
rpm_restore:
- if (adc->was_rpm_active) {
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
+ pm_runtime_force_suspend(dev);
resets_restore:
reset_control_bulk_assert(ARRAY_SIZE(resets), resets);
return ret;
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 0356ccf23fea..bbe1ce577789 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -648,7 +648,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.direction_input = ti_ads7950_direction_input;
st->chip.direction_output = ti_ads7950_direction_output;
st->chip.get = ti_ads7950_get;
- st->chip.set_rv = ti_ads7950_set;
+ st->chip.set = ti_ads7950_set;
ret = gpiochip_add_data(&st->chip, st);
if (ret) {
diff --git a/drivers/iio/addac/ad74115.c b/drivers/iio/addac/ad74115.c
index 4d8b64048e4f..f8b04d86b01f 100644
--- a/drivers/iio/addac/ad74115.c
+++ b/drivers/iio/addac/ad74115.c
@@ -1577,7 +1577,7 @@ static int ad74115_setup_gpio_chip(struct ad74115_state *st)
.direction_input = ad74115_gpio_direction_input,
.direction_output = ad74115_gpio_direction_output,
.get = ad74115_gpio_get,
- .set_rv = ad74115_gpio_set,
+ .set = ad74115_gpio_set,
};
return devm_gpiochip_add_data(dev, &st->gc, st);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index a0bb1dbcb7ad..a20b4d48c5f7 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -1425,8 +1425,8 @@ static int ad74413r_probe(struct spi_device *spi)
st->gpo_gpiochip.ngpio = st->num_gpo_gpios;
st->gpo_gpiochip.parent = st->dev;
st->gpo_gpiochip.can_sleep = true;
- st->gpo_gpiochip.set_rv = ad74413r_gpio_set;
- st->gpo_gpiochip.set_multiple_rv = ad74413r_gpio_set_multiple;
+ st->gpo_gpiochip.set = ad74413r_gpio_set;
+ st->gpo_gpiochip.set_multiple = ad74413r_gpio_set_multiple;
st->gpo_gpiochip.set_config = ad74413r_gpio_set_gpo_config;
st->gpo_gpiochip.get_direction =
ad74413r_gpio_get_gpo_direction;
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 5f2cd51723f6..4720733d66b2 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -129,7 +129,7 @@ static int ad5592r_gpio_init(struct ad5592r_state *st)
st->gpiochip.direction_input = ad5592r_gpio_direction_input;
st->gpiochip.direction_output = ad5592r_gpio_direction_output;
st->gpiochip.get = ad5592r_gpio_get;
- st->gpiochip.set_rv = ad5592r_gpio_set;
+ st->gpiochip.set = ad5592r_gpio_set;
st->gpiochip.request = ad5592r_gpio_request;
st->gpiochip.owner = THIS_MODULE;
st->gpiochip.names = ad5592r_gpio_names;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
index 8b15afca498c..271a4788604a 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
@@ -32,8 +32,12 @@ static int inv_icm42600_temp_read(struct inv_icm42600_state *st, s16 *temp)
goto exit;
*temp = (s16)be16_to_cpup(raw);
+ /*
+ * Temperature data is invalid if both accel and gyro are off.
+ * Return -EBUSY in this case.
+ */
if (*temp == INV_ICM42600_DATA_INVALID)
- ret = -EINVAL;
+ ret = -EBUSY;
exit:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index 68f60dc3c79d..32719f584c47 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -639,7 +639,7 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
struct {
__le16 chan[4];
aligned_s64 ts;
- } scan;
+ } scan = { };
int data_result, ret;
mutex_lock(&data->mutex);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 74505c9ec1a0..6cdc8ed53520 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -3213,11 +3213,12 @@ int bmp280_common_probe(struct device *dev,
/* Bring chip out of reset if there is an assigned GPIO line */
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod), "failed to get reset GPIO\n");
+
/* Deassert the signal */
- if (gpiod) {
- dev_info(dev, "release reset\n");
- gpiod_set_value(gpiod, 0);
- }
+ dev_info(dev, "release reset\n");
+ gpiod_set_value(gpiod, 0);
data->regmap = regmap;
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index d1510fe24050..f69db6f2f380 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -938,12 +938,18 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct isl29501_private *isl29501 = iio_priv(indio_dev);
const unsigned long *active_mask = indio_dev->active_scan_mask;
- u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */
-
- if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
- isl29501_register_read(isl29501, REG_DISTANCE, buffer);
+ u32 value;
+ struct {
+ u16 data;
+ aligned_s64 ts;
+ } scan = { };
+
+ if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) {
+ isl29501_register_read(isl29501, REG_DISTANCE, &value);
+ scan.data = value;
+ }
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index cae8e84821d7..205939680fd4 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/spi/spi.h>
+#include <linux/types.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -121,8 +122,15 @@ struct maxim_thermocouple_data {
struct spi_device *spi;
const struct maxim_thermocouple_chip *chip;
char tc_type;
-
- u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
+ /* Buffer for reading up to 2 hardware channels. */
+ struct {
+ union {
+ __be16 raw16;
+ __be32 raw32;
+ __be16 raw[2];
+ };
+ aligned_s64 timestamp;
+ } buffer __aligned(IIO_DMA_MINALIGN);
};
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
@@ -130,18 +138,16 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
{
unsigned int storage_bytes = data->chip->read_size;
unsigned int shift = chan->scan_type.shift + (chan->address * 8);
- __be16 buf16;
- __be32 buf32;
int ret;
switch (storage_bytes) {
case 2:
- ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
- *val = be16_to_cpu(buf16);
+ ret = spi_read(data->spi, &data->buffer.raw16, storage_bytes);
+ *val = be16_to_cpu(data->buffer.raw16);
break;
case 4:
- ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
- *val = be32_to_cpu(buf32);
+ ret = spi_read(data->spi, &data->buffer.raw32, storage_bytes);
+ *val = be32_to_cpu(data->buffer.raw32);
break;
default:
ret = -EINVAL;
@@ -166,9 +172,9 @@ static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private)
struct maxim_thermocouple_data *data = iio_priv(indio_dev);
int ret;
- ret = spi_read(data->spi, data->buffer, data->chip->read_size);
+ ret = spi_read(data->spi, data->buffer.raw, data->chip->read_size);
if (!ret) {
- iio_push_to_buffers_with_ts(indio_dev, data->buffer,
+ iio_push_to_buffers_with_ts(indio_dev, &data->buffer,
sizeof(data->buffer),
iio_get_time_ns(indio_dev));
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index b1c44ec1a3f3..572a91a62a7b 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -115,7 +115,7 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
out_free_map:
if (ib_uses_virt_dma(dev))
- kfree(map->pfn_list);
+ kvfree(map->pfn_list);
else
hmm_dma_map_free(dev->dma_device, map);
return ret;
@@ -287,7 +287,7 @@ static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
mutex_unlock(&umem_odp->umem_mutex);
mmu_interval_notifier_remove(&umem_odp->notifier);
if (ib_uses_virt_dma(dev))
- kfree(umem_odp->map.pfn_list);
+ kvfree(umem_odp->map.pfn_list);
else
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 37c2bc3bdba5..260dc67b8b87 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1921,7 +1921,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq);
struct bnxt_re_dev *rdev = srq->rdev;
- int rc;
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
@@ -1933,11 +1932,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
return -EINVAL;
srq->qplib_srq.threshold = srq_attr->srq_limit;
- rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
- if (rc) {
- ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
- return rc;
- }
+ bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
+
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 293b0a96c8e3..df7cf8d68e27 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -2017,6 +2017,28 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
rdev->nqr = NULL;
}
+/* When DEL_GID fails, driver is not freeing GID ctx memory.
+ * To avoid the memory leak, free the memory during unload
+ */
+static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
+ int i;
+
+ if (!sgid_tbl->active)
+ return;
+
+ ctx_tbl = sgid_tbl->ctx;
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (sgid_tbl->hw_id[i] == 0xFFFF)
+ continue;
+
+ ctx = ctx_tbl[i];
+ kfree(ctx);
+ }
+}
+
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
@@ -2030,6 +2052,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
+ bnxt_re_free_gid_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
&rdev->flags))
bnxt_re_cleanup_res(rdev);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index dfe3177123e5..ee36b3d82cc0 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -705,9 +705,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
srq->dbinfo.db = srq->dpi->dbr;
srq->dbinfo.max_slot = 1;
srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
- if (srq->threshold)
- bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
- srq->arm_req = false;
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
return 0;
fail:
@@ -717,24 +715,6 @@ fail:
return rc;
}
-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
- struct bnxt_qplib_srq *srq)
-{
- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- u32 count;
-
- count = __bnxt_qplib_get_avail(srq_hwq);
- if (count > srq->threshold) {
- srq->arm_req = false;
- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
- } else {
- /* Deferred arming */
- srq->arm_req = true;
- }
-
- return 0;
-}
-
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
@@ -776,7 +756,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
struct rq_wqe *srqe;
struct sq_sge *hw_sge;
- u32 count = 0;
int i, next;
spin_lock(&srq_hwq->lock);
@@ -808,15 +787,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
- spin_lock(&srq_hwq->lock);
- count = __bnxt_qplib_get_avail(srq_hwq);
- spin_unlock(&srq_hwq->lock);
/* Ring DB */
bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
- if (srq->arm_req == true && count > srq->threshold) {
- srq->arm_req = false;
- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
- }
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ab125f1d949e..4921a214c34c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -546,8 +546,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
srqn_handler_t srq_handler);
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
- struct bnxt_qplib_srq *srq);
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 6cd05207ffed..cc5c82d96839 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
if (!pbl->pg_arr)
return -ENOMEM;
+ memset(pbl->pg_arr, 0, pages * sizeof(void *));
pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
if (!pbl->pg_map_arr) {
@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_arr = NULL;
return -ENOMEM;
}
+ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
pbl->pg_count = 0;
pbl->pg_size = sginfo->pgsize;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 94c211df09d8..fdeec33c71da 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -994,6 +994,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
if (xa_is_err(old_entry))
ret = xa_err(old_entry);
+ else
+ qp->ibqp.qp_num = 1;
} else {
ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
XA_LIMIT(1, dev->attrs.max_qp - 1),
@@ -1031,7 +1033,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (ret)
goto err_out_cmd;
} else {
- init_kernel_qp(dev, qp, attrs);
+ ret = init_kernel_qp(dev, qp, attrs);
+ if (ret)
+ goto err_out_xa;
}
qp->attrs.max_send_sge = attrs->cap.max_send_sge;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 64bca08f3f1a..f82bdd46a917 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -3043,7 +3043,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
if (!hr_dev->is_vf)
hns_roce_free_link_table(hr_dev);
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
free_dip_entry(hr_dev);
}
@@ -5476,7 +5476,7 @@ out:
return ret;
}
-static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
+static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn,
void *buffer)
{
struct hns_roce_v2_scc_context *context;
@@ -5488,7 +5488,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
- qpn);
+ sccn);
if (ret)
goto out;
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index f637b73b946e..230187dda6a0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
struct hns_roce_v2_qp_context qpc;
struct hns_roce_v2_scc_context sccc;
} context = {};
+ u32 sccn = hr_qp->qpn;
int ret;
if (!hr_dev->hw->query_qpc)
@@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
!hr_dev->hw->query_sccc)
goto out;
- ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
+ if (hr_qp->cong_type == CONG_TYPE_DIP) {
+ if (!hr_qp->dip)
+ goto out;
+ sccn = hr_qp->dip->dip_idx;
+ }
+
+ ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc);
if (ret)
ibdev_warn_ratelimited(&hr_dev->ib_dev,
"failed to query SCCC, ret = %d.\n",
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 132a87e52d5c..ac0183a2ff7a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -345,33 +345,15 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
static void rxe_skb_tx_dtor(struct sk_buff *skb)
{
- struct net_device *ndev = skb->dev;
- struct rxe_dev *rxe;
- unsigned int qp_index;
- struct rxe_qp *qp;
+ struct rxe_qp *qp = skb->sk->sk_user_data;
int skb_out;
- rxe = rxe_get_dev_from_net(ndev);
- if (!rxe && is_vlan_dev(ndev))
- rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
- if (WARN_ON(!rxe))
- return;
-
- qp_index = (int)(uintptr_t)skb->sk->sk_user_data;
- if (!qp_index)
- return;
-
- qp = rxe_pool_get_index(&rxe->qp_pool, qp_index);
- if (!qp)
- goto put_dev;
-
skb_out = atomic_dec_return(&qp->skb_out);
- if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)
+ if (unlikely(qp->need_req_skb &&
+ skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_sched_task(&qp->send_task);
rxe_put(qp);
-put_dev:
- ib_device_put(&rxe->ib_dev);
sock_put(skb->sk);
}
@@ -383,6 +365,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
sock_hold(sk);
skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
+ rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP))
@@ -405,6 +388,7 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
sock_hold(sk);
skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
+ rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP))
@@ -497,6 +481,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
goto out;
}
+ /* Add time stamp to skb. */
+ skb->tstamp = ktime_get();
+
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
/* FIXME: hold reference to this netdev until life of this skb. */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index f2af3e0aef35..95f1c1c2949d 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
return err;
- qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
+ qp->sk->sk->sk_user_data = qp;
/* pick a source UDP port number for this QP based on
* the source QPN. this spreads traffic for different QPs
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 3a08f57d2211..f7dd32c6e5ba 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -340,18 +340,17 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
if (!sendpage_ok(page[i]))
msg.msg_flags &= ~MSG_SPLICE_PAGES;
bvec_set_page(&bvec, page[i], bytes, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, bytes);
try_page_again:
lock_sock(sk);
- rv = tcp_sendmsg_locked(sk, &msg, size);
+ rv = tcp_sendmsg_locked(sk, &msg, bytes);
release_sock(sk);
if (rv > 0) {
size -= rv;
sent += rv;
if (rv != bytes) {
- offset += rv;
bytes -= rv;
goto try_page_again;
}
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 930b64d2115e..2cd6e1c9a778 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -7,7 +7,7 @@
obj-$(CONFIG_INPUT) += input-core.o
input-core-y := input.o input-compat.o input-mt.o input-poller.o ff-core.o
-input-core-y += touchscreen.o
+input-core-y += touchscreen.o touch-overlay.o
obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index b5cbb57ee5f6..90ff6be85cf4 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -1408,8 +1408,12 @@ static void evdev_disconnect(struct input_handle *handle)
}
static const struct input_device_id evdev_ids[] = {
- { .driver_info = 1 }, /* Matches all devices */
- { }, /* Terminating zero entry */
+ {
+ /* Matches all devices */
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_SYN) },
+ },
+ { } /* Terminating zero entry */
};
MODULE_DEVICE_TABLE(input, evdev_ids);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 44887e51e049..1da41324362b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -971,7 +971,7 @@ static const struct input_device_id *input_match_device(struct input_handler *ha
{
const struct input_device_id *id;
- for (id = handler->id_table; id->flags || id->driver_info; id++) {
+ for (id = handler->id_table; id->flags; id++) {
if (input_match_device_id(dev, id) &&
(!handler->match || handler->match(handler, dev))) {
return id;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 1d8c579b5433..4c94297e17e6 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -442,8 +442,8 @@ static const signed short xpad_btn[] = {
/* used when dpad is mapped to buttons */
static const signed short xpad_btn_pad[] = {
- BTN_TRIGGER_HAPPY1, BTN_TRIGGER_HAPPY2, /* d-pad left, right */
- BTN_TRIGGER_HAPPY3, BTN_TRIGGER_HAPPY4, /* d-pad up, down */
+ BTN_DPAD_LEFT, BTN_DPAD_RIGHT, /* d-pad left, right */
+ BTN_DPAD_UP, BTN_DPAD_DOWN, /* d-pad up, down */
-1 /* terminating entry */
};
@@ -479,8 +479,8 @@ static const signed short xpad_abs_triggers[] = {
/* used when the controller has extra paddle buttons */
static const signed short xpad_btn_paddles[] = {
- BTN_TRIGGER_HAPPY5, BTN_TRIGGER_HAPPY6, /* paddle upper right, lower right */
- BTN_TRIGGER_HAPPY7, BTN_TRIGGER_HAPPY8, /* paddle upper left, lower left */
+ BTN_GRIPR, BTN_GRIPR2, /* paddle upper right, lower right */
+ BTN_GRIPL, BTN_GRIPL2, /* paddle upper left, lower left */
-1 /* terminating entry */
};
@@ -840,10 +840,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & BIT(3));
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & BIT(1));
+ input_report_key(dev, BTN_DPAD_LEFT, data[2] & BIT(2));
+ input_report_key(dev, BTN_DPAD_RIGHT, data[2] & BIT(3));
+ input_report_key(dev, BTN_DPAD_UP, data[2] & BIT(0));
+ input_report_key(dev, BTN_DPAD_DOWN, data[2] & BIT(1));
} else {
input_report_abs(dev, ABS_HAT0X,
!!(data[2] & 0x08) - !!(data[2] & 0x04));
@@ -891,10 +891,10 @@ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[2] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[2] & BIT(3));
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[2] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[2] & BIT(1));
+ input_report_key(dev, BTN_DPAD_LEFT, data[2] & BIT(2));
+ input_report_key(dev, BTN_DPAD_RIGHT, data[2] & BIT(3));
+ input_report_key(dev, BTN_DPAD_UP, data[2] & BIT(0));
+ input_report_key(dev, BTN_DPAD_DOWN, data[2] & BIT(1));
}
/*
@@ -1075,10 +1075,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
data[18] = 0;
/* Elite Series 2 split packet paddle bits */
- input_report_key(dev, BTN_TRIGGER_HAPPY5, data[18] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY6, data[18] & BIT(1));
- input_report_key(dev, BTN_TRIGGER_HAPPY7, data[18] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY8, data[18] & BIT(3));
+ input_report_key(dev, BTN_GRIPR, data[18] & BIT(0));
+ input_report_key(dev, BTN_GRIPR2, data[18] & BIT(1));
+ input_report_key(dev, BTN_GRIPL, data[18] & BIT(2));
+ input_report_key(dev, BTN_GRIPL2, data[18] & BIT(3));
do_sync = true;
}
@@ -1113,10 +1113,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
- input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & BIT(3));
- input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & BIT(1));
+ input_report_key(dev, BTN_DPAD_LEFT, data[5] & BIT(2));
+ input_report_key(dev, BTN_DPAD_RIGHT, data[5] & BIT(3));
+ input_report_key(dev, BTN_DPAD_UP, data[5] & BIT(0));
+ input_report_key(dev, BTN_DPAD_DOWN, data[5] & BIT(1));
} else {
input_report_abs(dev, ABS_HAT0X,
!!(data[5] & 0x08) - !!(data[5] & 0x04));
@@ -1175,10 +1175,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
data[32] = 0;
/* OG Elite Series Controller paddle bits */
- input_report_key(dev, BTN_TRIGGER_HAPPY5, data[32] & BIT(1));
- input_report_key(dev, BTN_TRIGGER_HAPPY6, data[32] & BIT(3));
- input_report_key(dev, BTN_TRIGGER_HAPPY7, data[32] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY8, data[32] & BIT(2));
+ input_report_key(dev, BTN_GRIPR, data[32] & BIT(1));
+ input_report_key(dev, BTN_GRIPR2, data[32] & BIT(3));
+ input_report_key(dev, BTN_GRIPL, data[32] & BIT(0));
+ input_report_key(dev, BTN_GRIPL2, data[32] & BIT(2));
} else if (xpad->packet_type == PKT_XBE2_FW_OLD) {
/* Mute paddles if controller has a custom mapping applied.
* Checked by comparing the current mapping
@@ -1188,10 +1188,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
data[18] = 0;
/* Elite Series 2 4.x firmware paddle bits */
- input_report_key(dev, BTN_TRIGGER_HAPPY5, data[18] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY6, data[18] & BIT(1));
- input_report_key(dev, BTN_TRIGGER_HAPPY7, data[18] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY8, data[18] & BIT(3));
+ input_report_key(dev, BTN_GRIPR, data[18] & BIT(0));
+ input_report_key(dev, BTN_GRIPR2, data[18] & BIT(1));
+ input_report_key(dev, BTN_GRIPL, data[18] & BIT(2));
+ input_report_key(dev, BTN_GRIPL2, data[18] & BIT(3));
} else if (xpad->packet_type == PKT_XBE2_FW_5_EARLY) {
/* Mute paddles if controller has a custom mapping applied.
* Checked by comparing the current mapping
@@ -1203,10 +1203,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
/* Elite Series 2 5.x firmware paddle bits
* (before the packet was split)
*/
- input_report_key(dev, BTN_TRIGGER_HAPPY5, data[22] & BIT(0));
- input_report_key(dev, BTN_TRIGGER_HAPPY6, data[22] & BIT(1));
- input_report_key(dev, BTN_TRIGGER_HAPPY7, data[22] & BIT(2));
- input_report_key(dev, BTN_TRIGGER_HAPPY8, data[22] & BIT(3));
+ input_report_key(dev, BTN_GRIPR, data[22] & BIT(0));
+ input_report_key(dev, BTN_GRIPR2, data[22] & BIT(1));
+ input_report_key(dev, BTN_GRIPL, data[22] & BIT(2));
+ input_report_key(dev, BTN_GRIPL2, data[22] & BIT(3));
}
}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index dc734974ce06..414fbef4abf9 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -232,8 +232,8 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned int off)
return !!(val & bit);
}
-static void adp5588_gpio_set_value(struct gpio_chip *chip,
- unsigned int off, int val)
+static int adp5588_gpio_set_value(struct gpio_chip *chip, unsigned int off,
+ int val)
{
struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
@@ -246,7 +246,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
else
kpad->dat_out[bank] &= ~bit;
- adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, kpad->dat_out[bank]);
+ return adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank,
+ kpad->dat_out[bank]);
}
static int adp5588_gpio_set_config(struct gpio_chip *chip, unsigned int off,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index c9e1127578b9..6c999d89ee4b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -84,12 +84,12 @@ static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = {
#include "hpps2atkbd.h" /* include the keyboard scancodes */
#else
- 0, 67, 65, 63, 61, 59, 60, 88, 0, 68, 66, 64, 62, 15, 41,117,
- 0, 56, 42, 93, 29, 16, 2, 0, 0, 0, 44, 31, 30, 17, 3, 0,
- 0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183,
- 0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185,
- 0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0,
- 0, 89, 40, 0, 26, 13, 0,193, 58, 54, 28, 27, 0, 43, 0, 85,
+ 0, 67, 65, 63, 61, 59, 60, 88,183, 68, 66, 64, 62, 15, 41,117,
+ 184, 56, 42, 93, 29, 16, 2, 0,185, 0, 44, 31, 30, 17, 3, 0,
+ 186, 46, 45, 32, 18, 5, 4, 95,187, 57, 47, 33, 20, 19, 6,183,
+ 188, 49, 48, 35, 34, 21, 7,184,189, 0, 50, 36, 22, 8, 9,185,
+ 190, 51, 37, 23, 24, 11, 10, 0,191, 52, 53, 38, 39, 25, 12, 0,
+ 192, 89, 40, 0, 26, 13, 0,193, 58, 54, 28, 27, 0, 43, 0,194,
0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0,
82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 061d48350df6..50e2e792c91d 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -12,6 +12,7 @@
#include <linux/mfd/mt6331/registers.h>
#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6397/registers.h>
#include <linux/module.h>
@@ -117,6 +118,19 @@ static const struct mtk_pmic_regs mt6358_regs = {
.rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
};
+static const struct mtk_pmic_regs mt6359_regs = {
+ .keys_regs[MTK_PMIC_PWRKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6359_TOPSTATUS,
+ 0x2, MT6359_PSC_TOP_INT_CON0, 0x5,
+ MTK_PMIC_PWRKEY_RST),
+ .keys_regs[MTK_PMIC_HOMEKEY_INDEX] =
+ MTK_PMIC_KEYS_REGS(MT6359_TOPSTATUS,
+ 0x8, MT6359_PSC_TOP_INT_CON0, 0xa,
+ MTK_PMIC_HOMEKEY_RST),
+ .pmic_rst_reg = MT6359_TOP_RST_MISC,
+ .rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
+};
+
struct mtk_pmic_keys_info {
struct mtk_pmic_keys *keys;
const struct mtk_pmic_keys_regs *regs;
@@ -297,6 +311,9 @@ static const struct of_device_id of_mtk_pmic_keys_match_tbl[] = {
.compatible = "mediatek,mt6358-keys",
.data = &mt6358_regs,
}, {
+ .compatible = "mediatek,mt6359-keys",
+ .data = &mt6359_regs,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 9f1049aa3048..17127269e3f0 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -7,6 +7,7 @@
* Author: Donghwa Lee <dh09.lee@samsung.com>
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -29,11 +30,11 @@
#define SAMSUNG_KEYIFFC 0x10
/* SAMSUNG_KEYIFCON */
-#define SAMSUNG_KEYIFCON_INT_F_EN (1 << 0)
-#define SAMSUNG_KEYIFCON_INT_R_EN (1 << 1)
-#define SAMSUNG_KEYIFCON_DF_EN (1 << 2)
-#define SAMSUNG_KEYIFCON_FC_EN (1 << 3)
-#define SAMSUNG_KEYIFCON_WAKEUPEN (1 << 4)
+#define SAMSUNG_KEYIFCON_INT_F_EN BIT(0)
+#define SAMSUNG_KEYIFCON_INT_R_EN BIT(1)
+#define SAMSUNG_KEYIFCON_DF_EN BIT(2)
+#define SAMSUNG_KEYIFCON_FC_EN BIT(3)
+#define SAMSUNG_KEYIFCON_WAKEUPEN BIT(4)
/* SAMSUNG_KEYIFSTSCLR */
#define SAMSUNG_KEYIFSTSCLR_P_INT_MASK (0xff << 0)
@@ -44,8 +45,7 @@
#define S5PV210_KEYIFSTSCLR_R_INT_OFFSET 16
/* SAMSUNG_KEYIFCOL */
-#define SAMSUNG_KEYIFCOL_MASK (0xff << 0)
-#define S5PV210_KEYIFCOLEN_MASK (0xff << 8)
+#define SAMSUNG_KEYIFCOL_MASK 0xff
/* SAMSUNG_KEYIFROW */
#define SAMSUNG_KEYIFROW_MASK (0xff << 0)
@@ -54,12 +54,12 @@
/* SAMSUNG_KEYIFFC */
#define SAMSUNG_KEYIFFC_MASK (0x3ff << 0)
-enum samsung_keypad_type {
- KEYPAD_TYPE_SAMSUNG,
- KEYPAD_TYPE_S5PV210,
+struct samsung_chip_info {
+ unsigned int column_shift;
};
struct samsung_keypad {
+ const struct samsung_chip_info *chip;
struct input_dev *input_dev;
struct platform_device *pdev;
struct clk *clk;
@@ -68,7 +68,6 @@ struct samsung_keypad {
bool stopped;
bool wake_enabled;
int irq;
- enum samsung_keypad_type type;
unsigned int row_shift;
unsigned int rows;
unsigned int cols;
@@ -83,19 +82,14 @@ static void samsung_keypad_scan(struct samsung_keypad *keypad,
unsigned int val;
for (col = 0; col < keypad->cols; col++) {
- if (keypad->type == KEYPAD_TYPE_S5PV210) {
- val = S5PV210_KEYIFCOLEN_MASK;
- val &= ~(1 << col) << 8;
- } else {
- val = SAMSUNG_KEYIFCOL_MASK;
- val &= ~(1 << col);
- }
+ val = SAMSUNG_KEYIFCOL_MASK & ~BIT(col);
+ val <<= keypad->chip->column_shift;
writel(val, keypad->base + SAMSUNG_KEYIFCOL);
mdelay(1);
val = readl(keypad->base + SAMSUNG_KEYIFROW);
- row_state[col] = ~val & ((1 << keypad->rows) - 1);
+ row_state[col] = ~val & GENMASK(keypad->rows - 1, 0);
}
/* KEYIFCOL reg clear */
@@ -119,10 +113,10 @@ static bool samsung_keypad_report(struct samsung_keypad *keypad,
continue;
for (row = 0; row < keypad->rows; row++) {
- if (!(changed & (1 << row)))
+ if (!(changed & BIT(row)))
continue;
- pressed = row_state[col] & (1 << row);
+ pressed = row_state[col] & BIT(row);
dev_dbg(&keypad->input_dev->dev,
"key %s, row: %d, col: %d\n",
@@ -314,11 +308,11 @@ static int samsung_keypad_probe(struct platform_device *pdev)
{
const struct samsung_keypad_platdata *pdata;
const struct matrix_keymap_data *keymap_data;
+ const struct platform_device_id *id;
struct samsung_keypad *keypad;
struct resource *res;
struct input_dev *input_dev;
unsigned int row_shift;
- unsigned int keymap_size;
int error;
pdata = dev_get_platdata(&pdev->dev);
@@ -345,12 +339,16 @@ static int samsung_keypad_probe(struct platform_device *pdev)
pdata->cfg_gpio(pdata->rows, pdata->cols);
row_shift = get_count_order(pdata->cols);
- keymap_size = (pdata->rows << row_shift) * sizeof(keypad->keycodes[0]);
- keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad) + keymap_size,
+ keypad = devm_kzalloc(&pdev->dev,
+ struct_size(keypad, keycodes,
+ pdata->rows << row_shift),
GFP_KERNEL);
+ if (!keypad)
+ return -ENOMEM;
+
input_dev = devm_input_allocate_device(&pdev->dev);
- if (!keypad || !input_dev)
+ if (!input_dev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -361,18 +359,12 @@ static int samsung_keypad_probe(struct platform_device *pdev)
if (!keypad->base)
return -EBUSY;
- keypad->clk = devm_clk_get(&pdev->dev, "keypad");
+ keypad->clk = devm_clk_get_prepared(&pdev->dev, "keypad");
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get keypad clk\n");
return PTR_ERR(keypad->clk);
}
- error = clk_prepare(keypad->clk);
- if (error) {
- dev_err(&pdev->dev, "keypad clock prepare failed\n");
- return error;
- }
-
keypad->input_dev = input_dev;
keypad->pdev = pdev;
keypad->row_shift = row_shift;
@@ -381,15 +373,20 @@ static int samsung_keypad_probe(struct platform_device *pdev)
keypad->stopped = true;
init_waitqueue_head(&keypad->wait);
- if (pdev->dev.of_node)
- keypad->type = of_device_is_compatible(pdev->dev.of_node,
- "samsung,s5pv210-keypad");
- else
- keypad->type = platform_get_device_id(pdev)->driver_data;
+ keypad->chip = device_get_match_data(&pdev->dev);
+ if (!keypad->chip) {
+ id = platform_get_device_id(pdev);
+ if (id)
+ keypad->chip = (const void *)id->driver_data;
+ }
+
+ if (!keypad->chip) {
+ dev_err(&pdev->dev, "Unable to determine chip type");
+ return -EINVAL;
+ }
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
- input_dev->dev.parent = &pdev->dev;
input_dev->open = samsung_keypad_open;
input_dev->close = samsung_keypad_close;
@@ -399,7 +396,7 @@ static int samsung_keypad_probe(struct platform_device *pdev)
keypad->keycodes, input_dev);
if (error) {
dev_err(&pdev->dev, "failed to build keymap\n");
- goto err_unprepare_clk;
+ return error;
}
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
@@ -411,7 +408,7 @@ static int samsung_keypad_probe(struct platform_device *pdev)
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0) {
error = keypad->irq;
- goto err_unprepare_clk;
+ return error;
}
error = devm_request_threaded_irq(&pdev->dev, keypad->irq, NULL,
@@ -419,16 +416,19 @@ static int samsung_keypad_probe(struct platform_device *pdev)
dev_name(&pdev->dev), keypad);
if (error) {
dev_err(&pdev->dev, "failed to register keypad interrupt\n");
- goto err_unprepare_clk;
+ return error;
}
device_init_wakeup(&pdev->dev, pdata->wakeup);
platform_set_drvdata(pdev, keypad);
- pm_runtime_enable(&pdev->dev);
+
+ error = devm_pm_runtime_enable(&pdev->dev);
+ if (error)
+ return error;
error = input_register_device(keypad->input_dev);
if (error)
- goto err_disable_runtime_pm;
+ return error;
if (pdev->dev.of_node) {
devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
@@ -436,23 +436,6 @@ static int samsung_keypad_probe(struct platform_device *pdev)
devm_kfree(&pdev->dev, (void *)pdata);
}
return 0;
-
-err_disable_runtime_pm:
- pm_runtime_disable(&pdev->dev);
-err_unprepare_clk:
- clk_unprepare(keypad->clk);
- return error;
-}
-
-static void samsung_keypad_remove(struct platform_device *pdev)
-{
- struct samsung_keypad *keypad = platform_get_drvdata(pdev);
-
- pm_runtime_disable(&pdev->dev);
-
- input_unregister_device(keypad->input_dev);
-
- clk_unprepare(keypad->clk);
}
static int samsung_keypad_runtime_suspend(struct device *dev)
@@ -528,15 +511,13 @@ static int samsung_keypad_suspend(struct device *dev)
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
- mutex_lock(&input_dev->mutex);
+ guard(mutex)(&input_dev->mutex);
if (input_device_enabled(input_dev))
samsung_keypad_stop(keypad);
samsung_keypad_toggle_wakeup(keypad, true);
- mutex_unlock(&input_dev->mutex);
-
return 0;
}
@@ -546,15 +527,13 @@ static int samsung_keypad_resume(struct device *dev)
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
- mutex_lock(&input_dev->mutex);
+ guard(mutex)(&input_dev->mutex);
samsung_keypad_toggle_wakeup(keypad, false);
if (input_device_enabled(input_dev))
samsung_keypad_start(keypad);
- mutex_unlock(&input_dev->mutex);
-
return 0;
}
@@ -564,11 +543,24 @@ static const struct dev_pm_ops samsung_keypad_pm_ops = {
samsung_keypad_runtime_resume, NULL)
};
+static const struct samsung_chip_info samsung_s3c6410_chip_info = {
+ .column_shift = 0,
+};
+
+static const struct samsung_chip_info samsung_s5pv210_chip_info = {
+ .column_shift = 8,
+};
+
#ifdef CONFIG_OF
static const struct of_device_id samsung_keypad_dt_match[] = {
- { .compatible = "samsung,s3c6410-keypad" },
- { .compatible = "samsung,s5pv210-keypad" },
- {},
+ {
+ .compatible = "samsung,s3c6410-keypad",
+ .data = &samsung_s3c6410_chip_info,
+ }, {
+ .compatible = "samsung,s5pv210-keypad",
+ .data = &samsung_s5pv210_chip_info,
+ },
+ { }
};
MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match);
#endif
@@ -576,18 +568,17 @@ MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match);
static const struct platform_device_id samsung_keypad_driver_ids[] = {
{
.name = "samsung-keypad",
- .driver_data = KEYPAD_TYPE_SAMSUNG,
+ .driver_data = (kernel_ulong_t)&samsung_s3c6410_chip_info,
}, {
.name = "s5pv210-keypad",
- .driver_data = KEYPAD_TYPE_S5PV210,
+ .driver_data = (kernel_ulong_t)&samsung_s5pv210_chip_info,
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids);
static struct platform_driver samsung_keypad_driver = {
.probe = samsung_keypad_probe,
- .remove = samsung_keypad_remove,
.driver = {
.name = "samsung-keypad",
.of_match_table = of_match_ptr(samsung_keypad_dt_match),
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index f5496ca0c0d2..0fb21c99a5e3 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -584,13 +584,6 @@ config INPUT_PALMAS_PWRBUTTON
To compile this driver as a module, choose M here. The module will
be called palmas_pwrbutton.
-config INPUT_PCF50633_PMU
- tristate "PCF50633 PMU events"
- depends on MFD_PCF50633
- help
- Say Y to include support for delivering PMU events via input
- layer on NXP PCF50633.
-
config INPUT_PCF8574
tristate "PCF8574 Keypad input device"
depends on I2C
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 6d91804d0a6f..d468c8140b93 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -59,7 +59,6 @@ obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON) += palmas-pwrbutton.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
-obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
obj-$(CONFIG_INPUT_PM8941_PWRKEY) += pm8941-pwrkey.o
diff --git a/drivers/input/misc/cs40l50-vibra.c b/drivers/input/misc/cs40l50-vibra.c
index 330f09123631..7aa7d577e01b 100644
--- a/drivers/input/misc/cs40l50-vibra.c
+++ b/drivers/input/misc/cs40l50-vibra.c
@@ -482,7 +482,6 @@ static int cs40l50_erase(struct input_dev *dev, int effect_id)
static void cs40l50_remove_wq(void *data)
{
- flush_workqueue(data);
destroy_workqueue(data);
}
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 1dfd7b95a4ce..5d45680d74a4 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -68,15 +68,16 @@ struct max77693_haptic {
static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic)
{
- struct pwm_args pargs;
- int delta;
+ struct pwm_state state;
int error;
- pwm_get_args(haptic->pwm_dev, &pargs);
- delta = (pargs.period + haptic->pwm_duty) / 2;
- error = pwm_config(haptic->pwm_dev, delta, pargs.period);
+ pwm_init_state(haptic->pwm_dev, &state);
+ state.duty_cycle = (state.period + haptic->pwm_duty) / 2;
+
+ error = pwm_apply_might_sleep(haptic->pwm_dev, &state);
if (error) {
- dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
+ dev_err(haptic->dev,
+ "failed to set pwm duty cycle: %d\n", error);
return error;
}
@@ -166,12 +167,17 @@ static int max77693_haptic_lowsys(struct max77693_haptic *haptic, bool enable)
static void max77693_haptic_enable(struct max77693_haptic *haptic)
{
+ struct pwm_state state;
int error;
if (haptic->enabled)
return;
- error = pwm_enable(haptic->pwm_dev);
+ pwm_init_state(haptic->pwm_dev, &state);
+ state.duty_cycle = (state.period + haptic->pwm_duty) / 2;
+ state.enabled = true;
+
+ error = pwm_apply_might_sleep(haptic->pwm_dev, &state);
if (error) {
dev_err(haptic->dev,
"failed to enable haptic pwm device: %d\n", error);
@@ -224,18 +230,13 @@ static void max77693_haptic_play_work(struct work_struct *work)
{
struct max77693_haptic *haptic =
container_of(work, struct max77693_haptic, work);
- int error;
-
- error = max77693_haptic_set_duty_cycle(haptic);
- if (error) {
- dev_err(haptic->dev, "failed to set duty cycle: %d\n", error);
- return;
- }
- if (haptic->magnitude)
- max77693_haptic_enable(haptic);
- else
+ if (!haptic->magnitude)
max77693_haptic_disable(haptic);
+ else if (haptic->enabled)
+ max77693_haptic_set_duty_cycle(haptic);
+ else
+ max77693_haptic_enable(haptic);
}
static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
@@ -340,12 +341,6 @@ static int max77693_haptic_probe(struct platform_device *pdev)
return PTR_ERR(haptic->pwm_dev);
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(haptic->pwm_dev);
-
haptic->motor_reg = devm_regulator_get(&pdev->dev, "haptic");
if (IS_ERR(haptic->motor_reg)) {
dev_err(&pdev->dev, "failed to get regulator\n");
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index f97f341ee0bb..d5e051a25a74 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -53,40 +53,30 @@ struct max8997_haptic {
unsigned int pattern_signal_period;
};
-static int max8997_haptic_set_duty_cycle(struct max8997_haptic *chip)
+static void max8997_haptic_set_internal_duty_cycle(struct max8997_haptic *chip)
{
- int ret = 0;
+ u8 duty_index = DIV_ROUND_UP(chip->level * 64, 100);
- if (chip->mode == MAX8997_EXTERNAL_MODE) {
- unsigned int duty = chip->pwm_period * chip->level / 100;
- ret = pwm_config(chip->pwm, duty, chip->pwm_period);
- } else {
- u8 duty_index = 0;
-
- duty_index = DIV_ROUND_UP(chip->level * 64, 100);
-
- switch (chip->internal_mode_pattern) {
- case 0:
- max8997_write_reg(chip->client,
- MAX8997_HAPTIC_REG_SIGPWMDC1, duty_index);
- break;
- case 1:
- max8997_write_reg(chip->client,
- MAX8997_HAPTIC_REG_SIGPWMDC2, duty_index);
- break;
- case 2:
- max8997_write_reg(chip->client,
- MAX8997_HAPTIC_REG_SIGPWMDC3, duty_index);
- break;
- case 3:
- max8997_write_reg(chip->client,
- MAX8997_HAPTIC_REG_SIGPWMDC4, duty_index);
- break;
- default:
- break;
- }
+ switch (chip->internal_mode_pattern) {
+ case 0:
+ max8997_write_reg(chip->client,
+ MAX8997_HAPTIC_REG_SIGPWMDC1, duty_index);
+ break;
+ case 1:
+ max8997_write_reg(chip->client,
+ MAX8997_HAPTIC_REG_SIGPWMDC2, duty_index);
+ break;
+ case 2:
+ max8997_write_reg(chip->client,
+ MAX8997_HAPTIC_REG_SIGPWMDC3, duty_index);
+ break;
+ case 3:
+ max8997_write_reg(chip->client,
+ MAX8997_HAPTIC_REG_SIGPWMDC4, duty_index);
+ break;
+ default:
+ break;
}
- return ret;
}
static void max8997_haptic_configure(struct max8997_haptic *chip)
@@ -155,11 +145,8 @@ static void max8997_haptic_enable(struct max8997_haptic *chip)
guard(mutex)(&chip->mutex);
- error = max8997_haptic_set_duty_cycle(chip);
- if (error) {
- dev_err(chip->dev, "set_pwm_cycle failed, error: %d\n", error);
- return;
- }
+ if (chip->mode != MAX8997_EXTERNAL_MODE)
+ max8997_haptic_set_internal_duty_cycle(chip);
if (!chip->enabled) {
error = regulator_enable(chip->regulator);
@@ -168,16 +155,32 @@ static void max8997_haptic_enable(struct max8997_haptic *chip)
return;
}
max8997_haptic_configure(chip);
- if (chip->mode == MAX8997_EXTERNAL_MODE) {
- error = pwm_enable(chip->pwm);
- if (error) {
- dev_err(chip->dev, "Failed to enable PWM\n");
- regulator_disable(chip->regulator);
- return;
- }
+ }
+
+ /*
+ * It would be more straight forward to configure the external PWM
+ * earlier i.e. when the internal duty_cycle is setup in internal mode.
+ * But historically this is done only after the regulator was enabled
+ * and max8997_haptic_configure() set the enable bit in
+ * MAX8997_HAPTIC_REG_CONF2. So better keep it this way.
+ */
+ if (chip->mode == MAX8997_EXTERNAL_MODE) {
+ struct pwm_state state;
+
+ pwm_init_state(chip->pwm, &state);
+ state.period = chip->pwm_period;
+ state.duty_cycle = chip->pwm_period * chip->level / 100;
+ state.enabled = true;
+
+ error = pwm_apply_might_sleep(chip->pwm, &state);
+ if (error) {
+ dev_err(chip->dev, "Failed to enable PWM\n");
+ regulator_disable(chip->regulator);
+ return;
}
- chip->enabled = true;
}
+
+ chip->enabled = true;
}
static void max8997_haptic_disable(struct max8997_haptic *chip)
@@ -282,11 +285,6 @@ static int max8997_haptic_probe(struct platform_device *pdev)
goto err_free_mem;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(chip->pwm);
break;
default:
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
deleted file mode 100644
index 6d046e236ba6..000000000000
--- a/drivers/input/misc/pcf50633-input.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 Input Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte, Andy Green and Werner Almesberger
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-
-#include <linux/mfd/pcf50633/core.h>
-
-#define PCF50633_OOCSTAT_ONKEY 0x01
-#define PCF50633_REG_OOCSTAT 0x12
-#define PCF50633_REG_OOCMODE 0x10
-
-struct pcf50633_input {
- struct pcf50633 *pcf;
- struct input_dev *input_dev;
-};
-
-static void
-pcf50633_input_irq(int irq, void *data)
-{
- struct pcf50633_input *input;
- int onkey_released;
-
- input = data;
-
- /* We report only one event depending on the key press status */
- onkey_released = pcf50633_reg_read(input->pcf, PCF50633_REG_OOCSTAT)
- & PCF50633_OOCSTAT_ONKEY;
-
- if (irq == PCF50633_IRQ_ONKEYF && !onkey_released)
- input_report_key(input->input_dev, KEY_POWER, 1);
- else if (irq == PCF50633_IRQ_ONKEYR && onkey_released)
- input_report_key(input->input_dev, KEY_POWER, 0);
-
- input_sync(input->input_dev);
-}
-
-static int pcf50633_input_probe(struct platform_device *pdev)
-{
- struct pcf50633_input *input;
- struct input_dev *input_dev;
- int ret;
-
-
- input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!input)
- return -ENOMEM;
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- kfree(input);
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, input);
- input->pcf = dev_to_pcf50633(pdev->dev.parent);
- input->input_dev = input_dev;
-
- input_dev->name = "PCF50633 PMU events";
- input_dev->id.bustype = BUS_I2C;
- input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_PWR);
- set_bit(KEY_POWER, input_dev->keybit);
-
- ret = input_register_device(input_dev);
- if (ret) {
- input_free_device(input_dev);
- kfree(input);
- return ret;
- }
- pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYR,
- pcf50633_input_irq, input);
- pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYF,
- pcf50633_input_irq, input);
-
- return 0;
-}
-
-static void pcf50633_input_remove(struct platform_device *pdev)
-{
- struct pcf50633_input *input = platform_get_drvdata(pdev);
-
- pcf50633_free_irq(input->pcf, PCF50633_IRQ_ONKEYR);
- pcf50633_free_irq(input->pcf, PCF50633_IRQ_ONKEYF);
-
- input_unregister_device(input->input_dev);
- kfree(input);
-}
-
-static struct platform_driver pcf50633_input_driver = {
- .driver = {
- .name = "pcf50633-input",
- },
- .probe = pcf50633_input_probe,
- .remove = pcf50633_input_remove,
-};
-module_platform_driver(pcf50633_input_driver);
-
-MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
-MODULE_DESCRIPTION("PCF50633 input driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pcf50633-input");
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index c0163b983ce6..5db58fc9e11b 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -82,6 +82,21 @@ config RMI4_F12
touchpads. For sensors that support relative pointing, F12 also
provides mouse input.
+config RMI4_F1A
+ bool "RMI4 Function 1A (0D pointing)"
+ help
+ Say Y here if you want to add support for RMI4 function 1A.
+
+ Function 1A provides capacitive keys support for RMI4 devices.
+
+config RMI4_F21
+ bool "RMI4 Function 21 (PRESSURE)"
+ help
+ Say Y here if you want to add support for RMI4 function 21.
+
+ Function 21 provides buttons/pressure handling for RMI4 devices.
+ This includes support for buttons/pressure on PressurePad.
+
config RMI4_F30
bool "RMI4 Function 30 (GPIO LED)"
help
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 02f14c846861..35ae29f72497 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -8,6 +8,8 @@ rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
rmi_core-$(CONFIG_RMI4_F03) += rmi_f03.o
rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
+rmi_core-$(CONFIG_RMI4_F1A) += rmi_f1a.o
+rmi_core-$(CONFIG_RMI4_F21) += rmi_f21.o
rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
rmi_core-$(CONFIG_RMI4_F34) += rmi_f34.o rmi_f34v7.o
rmi_core-$(CONFIG_RMI4_F3A) += rmi_f3a.o
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 3aee04837205..5f98c3bcfd46 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -360,6 +360,12 @@ static struct rmi_function_handler *fn_handlers[] = {
#ifdef CONFIG_RMI4_F12
&rmi_f12_handler,
#endif
+#ifdef CONFIG_RMI4_F1A
+ &rmi_f1a_handler,
+#endif
+#ifdef CONFIG_RMI4_F21
+ &rmi_f21_handler,
+#endif
#ifdef CONFIG_RMI4_F30
&rmi_f30_handler,
#endif
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 3bfe9013043e..e84495caab15 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -133,6 +133,8 @@ extern struct rmi_function_handler rmi_f01_handler;
extern struct rmi_function_handler rmi_f03_handler;
extern struct rmi_function_handler rmi_f11_handler;
extern struct rmi_function_handler rmi_f12_handler;
+extern struct rmi_function_handler rmi_f1a_handler;
+extern struct rmi_function_handler rmi_f21_handler;
extern struct rmi_function_handler rmi_f30_handler;
extern struct rmi_function_handler rmi_f34_handler;
extern struct rmi_function_handler rmi_f3a_handler;
diff --git a/drivers/input/rmi4/rmi_f1a.c b/drivers/input/rmi4/rmi_f1a.c
new file mode 100644
index 000000000000..765e9eae4cdc
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f1a.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 André Apitzsch <git@apitzsch.eu>
+ */
+
+#include <linux/input.h>
+#include <linux/property.h>
+#include "rmi_driver.h"
+
+struct f1a_data {
+ struct input_dev *input;
+
+ u32 *keymap;
+ unsigned int num_keys;
+};
+
+static int rmi_f1a_parse_device_properties(struct rmi_function *fn, struct f1a_data *f1a)
+{
+ static const char buttons_property[] = "linux,keycodes";
+ struct device *dev = &fn->dev;
+ u32 *buttonmap;
+ int n_keys;
+ int error;
+
+ if (!device_property_present(dev, buttons_property))
+ return 0;
+
+ n_keys = device_property_count_u32(dev, buttons_property);
+ if (n_keys <= 0) {
+ error = n_keys < 0 ? n_keys : -EINVAL;
+ dev_err(dev, "Invalid/malformed '%s' property: %d\n",
+ buttons_property, error);
+ return error;
+ }
+
+ buttonmap = devm_kmalloc_array(dev, n_keys, sizeof(*buttonmap),
+ GFP_KERNEL);
+ if (!buttonmap)
+ return -ENOMEM;
+
+ error = device_property_read_u32_array(dev, buttons_property,
+ buttonmap, n_keys);
+ if (error) {
+ dev_err(dev, "Failed to parse '%s' property: %d\n",
+ buttons_property, error);
+ return error;
+ }
+
+ f1a->keymap = buttonmap;
+ f1a->num_keys = n_keys;
+
+ return 0;
+}
+
+static irqreturn_t rmi_f1a_attention(int irq, void *ctx)
+{
+ struct rmi_function *fn = ctx;
+ struct f1a_data *f1a = dev_get_drvdata(&fn->dev);
+ char button_bitmask;
+ int key;
+ int error;
+
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr,
+ &button_bitmask, sizeof(button_bitmask));
+ if (error) {
+ dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
+ error);
+ return IRQ_RETVAL(error);
+ }
+
+ for (key = 0; key < f1a->num_keys; key++)
+ input_report_key(f1a->input, f1a->keymap[key],
+ button_bitmask & BIT(key));
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_f1a_config(struct rmi_function *fn)
+{
+ struct f1a_data *f1a = dev_get_drvdata(&fn->dev);
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+
+ if (f1a->num_keys)
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f1a_initialize(struct rmi_function *fn, struct f1a_data *f1a)
+{
+ int error;
+ int i;
+
+ error = rmi_f1a_parse_device_properties(fn, f1a);
+ if (error)
+ return error;
+
+ for (i = 0; i < f1a->num_keys; i++)
+ input_set_capability(f1a->input, EV_KEY, f1a->keymap[i]);
+
+ f1a->input->keycode = f1a->keymap;
+ f1a->input->keycodemax = f1a->num_keys;
+ f1a->input->keycodesize = sizeof(f1a->keymap[0]);
+
+ return 0;
+}
+
+static int rmi_f1a_probe(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f1a_data *f1a;
+ int error;
+
+ if (!drv_data->input) {
+ dev_info(&fn->dev, "F1A: no input device found, ignoring\n");
+ return -ENXIO;
+ }
+
+ f1a = devm_kzalloc(&fn->dev, sizeof(*f1a), GFP_KERNEL);
+ if (!f1a)
+ return -ENOMEM;
+
+ f1a->input = drv_data->input;
+
+ error = rmi_f1a_initialize(fn, f1a);
+ if (error)
+ return error;
+
+ dev_set_drvdata(&fn->dev, f1a);
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f1a_handler = {
+ .driver = {
+ .name = "rmi4_f1a",
+ },
+ .func = 0x1a,
+ .probe = rmi_f1a_probe,
+ .config = rmi_f1a_config,
+ .attention = rmi_f1a_attention,
+};
diff --git a/drivers/input/rmi4/rmi_f21.c b/drivers/input/rmi4/rmi_f21.c
new file mode 100644
index 000000000000..e3a9dfc3f0ec
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f21.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2025 Synaptics Incorporated
+ */
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define RMI_F21_SENSOR_COUNT_MASK GENMASK(3, 0)
+#define RMI_F21_FINGER_COUNT_PRESENT BIT(5)
+#define RMI_F21_NEW_REPORT_FORMAT BIT(6)
+
+#define RMI_F21_FINGER_COUNT_MASK GENMASK(3, 0)
+
+#define RMI_F21_MAX_SENSORS 16
+#define RMI_F21_MAX_FINGERS 16
+#define RMI_F21_DATA_REGS_MAX_SIZE (RMI_F21_MAX_SENSORS * 2 + \
+ RMI_F21_MAX_FINGERS * 2 + 1)
+
+#define RMI_F21_FORCE_CLICK_BIT BIT(0)
+
+#define RMI_F21_FORCEPAD_BUTTON_COUNT 1
+
+struct f21_data {
+ struct input_dev *input;
+ u16 key_code;
+
+ unsigned int attn_data_size;
+ unsigned int attn_data_button_offset;
+
+ unsigned int data_reg_size;
+ unsigned int data_reg_button_offset;
+ u8 data_regs[RMI_F21_DATA_REGS_MAX_SIZE];
+};
+
+static irqreturn_t rmi_f21_attention(int irq, void *ctx)
+{
+ struct rmi_function *fn = ctx;
+ struct f21_data *f21 = dev_get_drvdata(&fn->dev);
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+ u8 *pdata;
+ int error;
+ bool pressed;
+
+ if (drvdata->attn_data.data) {
+ if (drvdata->attn_data.size < f21->attn_data_size) {
+ dev_warn(&fn->dev, "f21 interrupt, but data is missing\n");
+ return IRQ_HANDLED;
+ }
+
+ pdata = drvdata->attn_data.data + f21->attn_data_button_offset;
+
+ drvdata->attn_data.data += f21->attn_data_size;
+ drvdata->attn_data.size -= f21->attn_data_size;
+ } else {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr,
+ f21->data_regs, f21->data_reg_size);
+ if (error) {
+ dev_err(&fn->dev, "failed to read f21 data registers: %d\n",
+ error);
+ return IRQ_RETVAL(error);
+ }
+
+ pdata = f21->data_regs + f21->data_reg_button_offset;
+ }
+
+ pressed = *pdata & RMI_F21_FORCE_CLICK_BIT;
+ input_report_key(f21->input, f21->key_code, pressed);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_f21_config(struct rmi_function *fn)
+{
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f21_initialize(struct rmi_function *fn, struct f21_data *f21)
+{
+ struct input_dev *input = f21->input;
+
+ f21->key_code = BTN_LEFT;
+
+ input->keycode = &f21->key_code;
+ input->keycodesize = sizeof(f21->key_code);
+ input->keycodemax = RMI_F21_FORCEPAD_BUTTON_COUNT;
+
+ input_set_capability(input, EV_KEY, f21->key_code);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ return 0;
+}
+
+static int rmi_f21_probe(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f21_data *f21;
+ unsigned int sensor_count;
+ unsigned int max_fingers;
+ unsigned int query15_offset;
+ u8 query15_data;
+ int error;
+
+ if (!drv_data->input) {
+ dev_info(&fn->dev, "f21: no input device found, ignoring\n");
+ return -ENXIO;
+ }
+
+ f21 = devm_kzalloc(&fn->dev, sizeof(*f21), GFP_KERNEL);
+ if (!f21)
+ return -ENOMEM;
+
+ f21->input = drv_data->input;
+
+ error = rmi_f21_initialize(fn, f21);
+ if (error)
+ return error;
+
+ dev_set_drvdata(&fn->dev, f21);
+
+ sensor_count = fn->fd.query_base_addr & RMI_F21_SENSOR_COUNT_MASK;
+ if (fn->fd.query_base_addr & RMI_F21_FINGER_COUNT_PRESENT) {
+ query15_offset = fn->fd.query_base_addr & RMI_F21_NEW_REPORT_FORMAT ? 2 : 1;
+ error = rmi_read_block(fn->rmi_dev,
+ fn->fd.query_base_addr + query15_offset,
+ &query15_data, sizeof(query15_data));
+ if (error)
+ return dev_err_probe(&fn->dev, error,
+ "failed to read 'query15' data");
+
+ max_fingers = query15_data & RMI_F21_FINGER_COUNT_MASK;
+ } else {
+ max_fingers = 5;
+ }
+
+ if (fn->fd.query_base_addr & RMI_F21_NEW_REPORT_FORMAT) {
+ /* Each finger uses one byte, and the button state uses one byte.*/
+ f21->attn_data_size = max_fingers + 1;
+ f21->attn_data_button_offset = f21->attn_data_size - 1;
+ /*
+ * Each sensor uses two bytes, the button state uses one byte,
+ * and each finger uses two bytes.
+ */
+ f21->data_reg_size = sensor_count * 2 + 1 + max_fingers * 2;
+ f21->data_reg_button_offset = sensor_count * 2;
+ } else {
+ /*
+ * Regardless of the transport each finger uses two bytes,
+ * and the button state uses one byte.
+ */
+ f21->attn_data_size = sensor_count * 2 + 1;
+ f21->attn_data_button_offset = sensor_count * 2;
+
+ f21->data_reg_size = f21->attn_data_size;
+ f21->data_reg_button_offset = f21->attn_data_button_offset;
+ }
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f21_handler = {
+ .driver = {
+ .name = "rmi4_f21",
+ },
+ .func = 0x21,
+ .probe = rmi_f21_probe,
+ .config = rmi_f21_config,
+ .attention = rmi_f21_attention,
+};
diff --git a/drivers/input/touch-overlay.c b/drivers/input/touch-overlay.c
new file mode 100644
index 000000000000..8806373f7a4a
--- /dev/null
+++ b/drivers/input/touch-overlay.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Helper functions for overlay objects on touchscreens
+ *
+ * Copyright (c) 2023 Javier Carrasco <javier.carrasco@wolfvision.net>
+ */
+
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touch-overlay.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/property.h>
+
+struct touch_overlay_segment {
+ struct list_head list;
+ u32 x_origin;
+ u32 y_origin;
+ u32 x_size;
+ u32 y_size;
+ u32 key;
+ bool pressed;
+ int slot;
+};
+
+static int touch_overlay_get_segment(struct fwnode_handle *segment_node,
+ struct touch_overlay_segment *segment,
+ struct input_dev *input)
+{
+ int error;
+
+ error = fwnode_property_read_u32(segment_node, "x-origin",
+ &segment->x_origin);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(segment_node, "y-origin",
+ &segment->y_origin);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(segment_node, "x-size",
+ &segment->x_size);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(segment_node, "y-size",
+ &segment->y_size);
+ if (error)
+ return error;
+
+ error = fwnode_property_read_u32(segment_node, "linux,code",
+ &segment->key);
+ if (!error)
+ input_set_capability(input, EV_KEY, segment->key);
+ else if (error != -EINVAL)
+ return error;
+
+ return 0;
+}
+
+/**
+ * touch_overlay_map - map overlay objects from the device tree and set
+ * key capabilities if buttons are defined.
+ * @list: pointer to the list that will hold the segments
+ * @input: pointer to the already allocated input_dev
+ *
+ * Returns 0 on success and error number otherwise.
+ *
+ * If buttons are defined, key capabilities are set accordingly.
+ */
+int touch_overlay_map(struct list_head *list, struct input_dev *input)
+{
+ struct fwnode_handle *fw_segment;
+ struct device *dev = input->dev.parent;
+ struct touch_overlay_segment *segment;
+ int error;
+
+ struct fwnode_handle *overlay __free(fwnode_handle) =
+ device_get_named_child_node(dev, "touch-overlay");
+ if (!overlay)
+ return 0;
+
+ fwnode_for_each_available_child_node(overlay, fw_segment) {
+ segment = devm_kzalloc(dev, sizeof(*segment), GFP_KERNEL);
+ if (!segment) {
+ fwnode_handle_put(fw_segment);
+ return -ENOMEM;
+ }
+ error = touch_overlay_get_segment(fw_segment, segment, input);
+ if (error) {
+ fwnode_handle_put(fw_segment);
+ return error;
+ }
+ list_add_tail(&segment->list, list);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(touch_overlay_map);
+
+/**
+ * touch_overlay_get_touchscreen_abs - get abs size from the touchscreen area.
+ * @list: pointer to the list that holds the segments
+ * @x: horizontal abs
+ * @y: vertical abs
+ */
+void touch_overlay_get_touchscreen_abs(struct list_head *list, u16 *x, u16 *y)
+{
+ struct touch_overlay_segment *segment;
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (!segment->key) {
+ *x = segment->x_size - 1;
+ *y = segment->y_size - 1;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(touch_overlay_get_touchscreen_abs);
+
+static bool touch_overlay_segment_event(struct touch_overlay_segment *seg,
+ struct input_mt_pos *pos)
+{
+ if (pos->x >= seg->x_origin && pos->x < (seg->x_origin + seg->x_size) &&
+ pos->y >= seg->y_origin && pos->y < (seg->y_origin + seg->y_size))
+ return true;
+
+ return false;
+}
+
+/**
+ * touch_overlay_mapped_touchscreen - check if a touchscreen area is mapped
+ * @list: pointer to the list that holds the segments
+ *
+ * Returns true if a touchscreen area is mapped or false otherwise.
+ */
+bool touch_overlay_mapped_touchscreen(struct list_head *list)
+{
+ struct touch_overlay_segment *segment;
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (!segment->key)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(touch_overlay_mapped_touchscreen);
+
+static bool touch_overlay_event_on_ts(struct list_head *list,
+ struct input_mt_pos *pos)
+{
+ struct touch_overlay_segment *segment;
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (segment->key)
+ continue;
+
+ if (touch_overlay_segment_event(segment, pos)) {
+ pos->x -= segment->x_origin;
+ pos->y -= segment->y_origin;
+ return true;
+ }
+ /* ignore touch events outside the defined area */
+ return false;
+ }
+
+ return true;
+}
+
+static bool touch_overlay_button_event(struct input_dev *input,
+ struct touch_overlay_segment *segment,
+ struct input_mt_pos *pos, int slot)
+{
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *s = &mt->slots[slot];
+ bool button_contact = touch_overlay_segment_event(segment, pos);
+
+ if (segment->slot == slot && segment->pressed) {
+ /* sliding out of the button releases it */
+ if (!button_contact) {
+ input_report_key(input, segment->key, false);
+ segment->pressed = false;
+ /* keep available for a possible touch event */
+ return false;
+ }
+ /* ignore sliding on the button while pressed */
+ s->frame = mt->frame;
+ return true;
+ } else if (button_contact) {
+ input_report_key(input, segment->key, true);
+ s->frame = mt->frame;
+ segment->slot = slot;
+ segment->pressed = true;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * touch_overlay_sync_frame - update the status of the segments and report
+ * buttons whose tracked slot is unused.
+ * @list: pointer to the list that holds the segments
+ * @input: pointer to the input device associated to the contact
+ */
+void touch_overlay_sync_frame(struct list_head *list, struct input_dev *input)
+{
+ struct touch_overlay_segment *segment;
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *s;
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (!segment->key)
+ continue;
+
+ s = &mt->slots[segment->slot];
+ if (!input_mt_is_used(mt, s) && segment->pressed) {
+ input_report_key(input, segment->key, false);
+ segment->pressed = false;
+ }
+ }
+}
+EXPORT_SYMBOL(touch_overlay_sync_frame);
+
+/**
+ * touch_overlay_process_contact - process contacts according to the overlay
+ * mapping. This function acts as a filter to release the calling driver
+ * from the contacts that are either related to overlay buttons or out of the
+ * overlay touchscreen area, if defined.
+ * @list: pointer to the list that holds the segments
+ * @input: pointer to the input device associated to the contact
+ * @pos: pointer to the contact position
+ * @slot: slot associated to the contact (0 if multitouch is not supported)
+ *
+ * Returns true if the contact was processed (reported for valid key events
+ * and dropped for contacts outside the overlay touchscreen area) or false
+ * if the contact must be processed by the caller. In that case this function
+ * shifts the (x,y) coordinates to the overlay touchscreen axis if required.
+ */
+bool touch_overlay_process_contact(struct list_head *list,
+ struct input_dev *input,
+ struct input_mt_pos *pos, int slot)
+{
+ struct touch_overlay_segment *segment;
+ struct list_head *ptr;
+
+ /*
+ * buttons must be prioritized over overlay touchscreens to account for
+ * overlappings e.g. a button inside the touchscreen area.
+ */
+ list_for_each(ptr, list) {
+ segment = list_entry(ptr, struct touch_overlay_segment, list);
+ if (segment->key &&
+ touch_overlay_button_event(input, segment, pos, slot))
+ return true;
+ }
+
+ /*
+ * valid contacts on the overlay touchscreen are left for the client
+ * to be processed/reported according to its (possibly) unique features.
+ */
+ return !touch_overlay_event_on_ts(list, pos);
+}
+EXPORT_SYMBOL(touch_overlay_process_contact);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Helper functions for overlay objects on touch devices");
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index f9db5cefb25b..8b4f3e3660b8 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -444,10 +444,11 @@ static int ad7879_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!(val & AD7879_GPIO_DATA);
}
-static void ad7879_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int ad7879_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct ad7879 *ts = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&ts->mutex);
if (value)
@@ -455,8 +456,10 @@ static void ad7879_gpio_set_value(struct gpio_chip *chip,
else
ts->cmd_crtl2 &= ~AD7879_GPIO_DATA;
- ad7879_write(ts, AD7879_REG_CTRL2, ts->cmd_crtl2);
+ ret = ad7879_write(ts, AD7879_REG_CTRL2, ts->cmd_crtl2);
mutex_unlock(&ts->mutex);
+
+ return ret;
}
static int ad7879_gpio_add(struct ad7879 *ts)
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 0d7bf18e2508..bf498bd4dea9 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -120,7 +120,6 @@ struct edt_ft5x06_ts_data {
struct regmap *regmap;
#if defined(CONFIG_DEBUG_FS)
- struct dentry *debug_dir;
u8 *raw_buffer;
size_t raw_bufsize;
#endif
@@ -815,23 +814,21 @@ static const struct file_operations debugfs_raw_data_fops = {
.read = edt_ft5x06_debugfs_raw_data_read,
};
-static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
- const char *debugfs_name)
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
- tsdata->debug_dir = debugfs_create_dir(debugfs_name, NULL);
+ struct dentry *debug_dir = tsdata->client->debugfs;
- debugfs_create_u16("num_x", S_IRUSR, tsdata->debug_dir, &tsdata->num_x);
- debugfs_create_u16("num_y", S_IRUSR, tsdata->debug_dir, &tsdata->num_y);
+ debugfs_create_u16("num_x", S_IRUSR, debug_dir, &tsdata->num_x);
+ debugfs_create_u16("num_y", S_IRUSR, debug_dir, &tsdata->num_y);
debugfs_create_file("mode", S_IRUSR | S_IWUSR,
- tsdata->debug_dir, tsdata, &debugfs_mode_fops);
+ debug_dir, tsdata, &debugfs_mode_fops);
debugfs_create_file("raw_data", S_IRUSR,
- tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
+ debug_dir, tsdata, &debugfs_raw_data_fops);
}
static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
- debugfs_remove_recursive(tsdata->debug_dir);
kfree(tsdata->raw_buffer);
}
@@ -842,8 +839,7 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
return -ENOSYS;
}
-static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
- const char *debugfs_name)
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
}
@@ -1349,7 +1345,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client)
if (error)
return error;
- edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
+ edt_ft5x06_ts_prepare_debugfs(tsdata);
dev_dbg(&client->dev,
"EDT FT5x06 initialized: IRQ %d, WAKE pin %d, Reset pin %d.\n",
@@ -1495,6 +1491,10 @@ static const struct edt_i2c_chip_data edt_ft8201_data = {
.max_support_points = 10,
};
+static const struct edt_i2c_chip_data edt_ft8716_data = {
+ .max_support_points = 10,
+};
+
static const struct edt_i2c_chip_data edt_ft8719_data = {
.max_support_points = 10,
};
@@ -1507,6 +1507,7 @@ static const struct i2c_device_id edt_ft5x06_ts_id[] = {
/* Note no edt- prefix for compatibility with the ft6236.c driver */
{ .name = "ft6236", .driver_data = (long)&edt_ft6236_data },
{ .name = "ft8201", .driver_data = (long)&edt_ft8201_data },
+ { .name = "ft8716", .driver_data = (long)&edt_ft8716_data },
{ .name = "ft8719", .driver_data = (long)&edt_ft8719_data },
{ /* sentinel */ }
};
@@ -1523,6 +1524,7 @@ static const struct of_device_id edt_ft5x06_of_match[] = {
/* Note focaltech vendor prefix for compatibility with ft6236.c */
{ .compatible = "focaltech,ft6236", .data = &edt_ft6236_data },
{ .compatible = "focaltech,ft8201", .data = &edt_ft8201_data },
+ { .compatible = "focaltech,ft8716", .data = &edt_ft8716_data },
{ .compatible = "focaltech,ft8719", .data = &edt_ft8719_data },
{ /* sentinel */ }
};
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index a3e8a51c9144..252dcae039f8 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -44,9 +44,11 @@
#define GOODIX_HAVE_KEY BIT(4)
#define GOODIX_BUFFER_STATUS_TIMEOUT 20
-#define RESOLUTION_LOC 1
-#define MAX_CONTACTS_LOC 5
-#define TRIGGER_LOC 6
+#define RESOLUTION_LOC 1
+#define MAX_CONTACTS_LOC 5
+#define TRIGGER_LOC 6
+
+#define GOODIX_POLL_INTERVAL_MS 17 /* 17ms = 60fps */
/* Our special handling for GPIO accesses through ACPI is x86 specific */
#if defined CONFIG_X86 && defined CONFIG_ACPI
@@ -497,6 +499,14 @@ sync:
input_sync(ts->input_dev);
}
+static void goodix_ts_work_i2c_poll(struct input_dev *input)
+{
+ struct goodix_ts_data *ts = input_get_drvdata(input);
+
+ goodix_process_events(ts);
+ goodix_i2c_write_u8(ts->client, GOODIX_READ_COOR_ADDR, 0);
+}
+
/**
* goodix_ts_irq_handler - The IRQ handler
*
@@ -513,13 +523,29 @@ static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void goodix_enable_irq(struct goodix_ts_data *ts)
+{
+ if (ts->client->irq)
+ enable_irq(ts->client->irq);
+}
+
+static void goodix_disable_irq(struct goodix_ts_data *ts)
+{
+ if (ts->client->irq)
+ disable_irq(ts->client->irq);
+}
+
static void goodix_free_irq(struct goodix_ts_data *ts)
{
- devm_free_irq(&ts->client->dev, ts->client->irq, ts);
+ if (ts->client->irq)
+ devm_free_irq(&ts->client->dev, ts->client->irq, ts);
}
static int goodix_request_irq(struct goodix_ts_data *ts)
{
+ if (!ts->client->irq)
+ return 0;
+
return devm_request_threaded_irq(&ts->client->dev, ts->client->irq,
NULL, goodix_ts_irq_handler,
ts->irq_flags, ts->client->name, ts);
@@ -1219,6 +1245,18 @@ retry_read_config:
return error;
}
+ input_set_drvdata(ts->input_dev, ts);
+
+ if (!ts->client->irq) {
+ error = input_setup_polling(ts->input_dev, goodix_ts_work_i2c_poll);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "could not set up polling mode, %d\n", error);
+ return error;
+ }
+ input_set_poll_interval(ts->input_dev, GOODIX_POLL_INTERVAL_MS);
+ }
+
error = input_register_device(ts->input_dev);
if (error) {
dev_err(&ts->client->dev,
@@ -1422,7 +1460,7 @@ static int goodix_suspend(struct device *dev)
/* We need gpio pins to suspend/resume */
if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) {
- disable_irq(client->irq);
+ goodix_disable_irq(ts);
return 0;
}
@@ -1466,7 +1504,7 @@ static int goodix_resume(struct device *dev)
int error;
if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) {
- enable_irq(client->irq);
+ goodix_enable_irq(ts);
return 0;
}
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 6475084aee1b..9b3901eec0a5 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -22,6 +22,7 @@
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/input/touch-overlay.h>
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
@@ -57,6 +58,7 @@ struct st1232_ts_data {
struct dev_pm_qos_request low_latency_req;
struct gpio_desc *reset_gpio;
const struct st_chip_info *chip_info;
+ struct list_head touch_overlay_list;
int read_buf_len;
u8 *read_buf;
};
@@ -156,6 +158,10 @@ static int st1232_ts_parse_and_report(struct st1232_ts_data *ts)
input_mt_assign_slots(input, slots, pos, n_contacts, 0);
for (i = 0; i < n_contacts; i++) {
+ if (touch_overlay_process_contact(&ts->touch_overlay_list,
+ input, &pos[i], slots[i]))
+ continue;
+
input_mt_slot(input, slots[i]);
input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
input_report_abs(input, ABS_MT_POSITION_X, pos[i].x);
@@ -164,6 +170,7 @@ static int st1232_ts_parse_and_report(struct st1232_ts_data *ts)
input_report_abs(input, ABS_MT_TOUCH_MAJOR, z[i]);
}
+ touch_overlay_sync_frame(&ts->touch_overlay_list, input);
input_mt_sync_frame(input);
input_sync(input);
@@ -292,18 +299,30 @@ static int st1232_ts_probe(struct i2c_client *client)
if (error)
return error;
- /* Read resolution from the chip */
- error = st1232_ts_read_resolution(ts, &max_x, &max_y);
- if (error) {
- dev_err(&client->dev,
- "Failed to read resolution: %d\n", error);
- return error;
- }
-
if (ts->chip_info->have_z)
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
ts->chip_info->max_area, 0, 0);
+ /* map overlay objects if defined in the device tree */
+ INIT_LIST_HEAD(&ts->touch_overlay_list);
+ error = touch_overlay_map(&ts->touch_overlay_list, input_dev);
+ if (error)
+ return error;
+
+ if (touch_overlay_mapped_touchscreen(&ts->touch_overlay_list)) {
+ /* Read resolution from the overlay touchscreen if defined */
+ touch_overlay_get_touchscreen_abs(&ts->touch_overlay_list,
+ &max_x, &max_y);
+ } else {
+ /* Read resolution from the chip */
+ error = st1232_ts_read_resolution(ts, &max_x, &max_y);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read resolution: %d\n", error);
+ return error;
+ }
+ }
+
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, max_x, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 7b5af6176de9..8de689b2c5ed 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -3638,7 +3638,7 @@ static int __init parse_ivrs_acpihid(char *str)
{
u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p, *addr;
- char acpiid[ACPIID_LEN] = {0};
+ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
int i;
addr = strchr(str, '@');
@@ -3664,7 +3664,7 @@ static int __init parse_ivrs_acpihid(char *str)
/* We have the '@', make it the terminator to get just the acpiid */
*addr++ = 0;
- if (strlen(str) > ACPIID_LEN + 1)
+ if (strlen(str) > ACPIID_LEN)
goto not_found;
if (sscanf(str, "=%s", acpiid) != 1)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 5968043ac802..2a8b46b948f0 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2997,9 +2997,9 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state)
/* ATS is being switched off, invalidate the entire ATC */
arm_smmu_atc_inv_master(master, IOMMU_NO_PASID);
}
- master->ats_enabled = state->ats_enabled;
arm_smmu_remove_master_domain(master, state->old_domain, state->ssid);
+ master->ats_enabled = state->ats_enabled;
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index be1aaaf8cd17..378104cd395e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -301,9 +301,11 @@ static void tegra241_vintf_user_handle_error(struct tegra241_vintf *vintf)
struct iommu_vevent_tegra241_cmdqv vevent_data;
int i;
- for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++)
- vevent_data.lvcmdq_err_map[i] =
- readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
+ for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
+ u64 err = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
+
+ vevent_data.lvcmdq_err_map[i] = cpu_to_le64(err);
+ }
iommufd_viommu_report_event(viommu, IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV,
&vevent_data, sizeof(vevent_data));
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 19955e222c2b..9c3ab9d9f69a 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -34,7 +34,7 @@
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
-#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+#define IS_GFX_DEVICE(pdev) pci_is_display(pdev)
#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c
index 2ca5809b238b..462b457ffd0c 100644
--- a/drivers/iommu/iommufd/viommu.c
+++ b/drivers/iommu/iommufd/viommu.c
@@ -339,7 +339,7 @@ iommufd_hw_queue_alloc_phys(struct iommu_hw_queue_alloc *cmd,
}
*base_pa = (page_to_pfn(pages[0]) << PAGE_SHIFT) + offset;
- kfree(pages);
+ kvfree(pages);
return access;
out_unpin:
@@ -349,7 +349,7 @@ out_detach:
out_destroy:
iommufd_access_destroy_internal(viommu->ictx, access);
out_free:
- kfree(pages);
+ kvfree(pages);
return ERR_PTR(rc);
}
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 2d0d31ba2886..0eae2f4bdc5e 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -1283,7 +1283,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
unsigned long *ptr;
ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size);
- if (_io_pte_none(*ptr) || !_io_pte_present(*ptr))
+ if (!ptr)
return 0;
return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1));
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 532db1de201b..b39d6f134ab2 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -998,8 +998,7 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
iommu_dma_get_resv_regions(dev, head);
}
-static const struct iommu_ops viommu_ops;
-static struct virtio_driver virtio_iommu_drv;
+static const struct bus_type *virtio_bus_type;
static int viommu_match_node(struct device *dev, const void *data)
{
@@ -1008,8 +1007,9 @@ static int viommu_match_node(struct device *dev, const void *data)
static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
- fwnode, viommu_match_node);
+ struct device *dev = bus_find_device(virtio_bus_type, NULL, fwnode,
+ viommu_match_node);
+
put_device(dev);
return dev ? dev_to_virtio(dev)->priv : NULL;
@@ -1160,6 +1160,9 @@ static int viommu_probe(struct virtio_device *vdev)
if (!viommu)
return -ENOMEM;
+ /* Borrow this for easy lookups later */
+ virtio_bus_type = dev->bus;
+
spin_lock_init(&viommu->request_lock);
ida_init(&viommu->domain_ids);
viommu->dev = dev;
@@ -1229,10 +1232,10 @@ static int viommu_probe(struct virtio_device *vdev)
if (ret)
goto err_free_vqs;
- iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
-
vdev->priv = viommu;
+ iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
+
dev_info(dev, "input address: %u bits\n",
order_base_2(viommu->geometry.aperture_end));
dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 39a6ae1d574b..6d12c6ab9ea4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -554,6 +554,7 @@ config IMX_MU_MSI
tristate "i.MX MU used as MSI controller"
depends on OF && HAS_IOMEM
depends on ARCH_MXC || COMPILE_TEST
+ depends on ARM || ARM64
default m if ARCH_MXC
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c
index 340640fdbdf6..9290ac741949 100644
--- a/drivers/irqchip/irq-gic-v5-its.c
+++ b/drivers/irqchip/irq-gic-v5-its.c
@@ -973,7 +973,6 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
irqd = irq_get_irq_data(virq + i);
irqd_set_single_target(irqd);
irqd_set_affinity_on_activate(irqd);
- irqd_set_resend_when_in_progress(irqd);
}
return 0;
diff --git a/drivers/irqchip/irq-gic-v5-iwb.c b/drivers/irqchip/irq-gic-v5-iwb.c
index ed72fbdd4900..ad9fdc14d1c6 100644
--- a/drivers/irqchip/irq-gic-v5-iwb.c
+++ b/drivers/irqchip/irq-gic-v5-iwb.c
@@ -241,7 +241,6 @@ static int gicv5_iwb_device_probe(struct platform_device *pdev)
struct gicv5_iwb_chip_data *iwb_node;
void __iomem *iwb_base;
struct resource *res;
- int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -254,16 +253,10 @@ static int gicv5_iwb_device_probe(struct platform_device *pdev)
}
iwb_node = gicv5_iwb_init_bases(iwb_base, pdev);
- if (IS_ERR(iwb_node)) {
- ret = PTR_ERR(iwb_node);
- goto out_unmap;
- }
+ if (IS_ERR(iwb_node))
+ return PTR_ERR(iwb_node);
return 0;
-
-out_unmap:
- iounmap(iwb_base);
- return ret;
}
static const struct of_device_id gicv5_iwb_of_match[] = {
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index 454c7f16dd4d..908944009c21 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -133,13 +133,13 @@ int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
{
const struct msi_parent_ops *ops = d->msi_parent_ops;
u32 busmask = BIT(bus_token);
- struct fwnode_handle *fwh;
if (!ops)
return 0;
- fwh = d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode)
- : fwspec->fwnode;
+ struct fwnode_handle *fwh __free(fwnode_handle) =
+ d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode)
+ : fwnode_handle_get(fwspec->fwnode);
if (fwh != d->fwnode || fwspec->param_count != 0)
return 0;
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index d3232d6d8dce..54833717f8a7 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -177,6 +177,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
.ops = &gicp_domain_ops,
};
struct mvebu_gicp *gicp;
+ void __iomem *base;
int ret, i;
gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
@@ -236,6 +237,15 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
return -ENODEV;
}
+ base = ioremap(gicp->res->start, resource_size(gicp->res));
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "ioremap() failed. Unable to clear pending interrupts.\n");
+ } else {
+ for (i = 0; i < 64; i++)
+ writel(i, base + GICP_CLRSPI_NSR_OFFSET);
+ iounmap(base);
+ }
+
return msi_create_parent_irq_domain(&info, &gicp_msi_parent_ops) ? 0 : -ENOMEM;
}
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index 74a2a28f9403..643c8e459611 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -308,7 +308,6 @@ static const struct msi_parent_ops imsic_msi_parent_ops = {
int imsic_irqdomain_init(void)
{
struct irq_domain_info info = {
- .fwnode = imsic->fwnode,
.ops = &imsic_base_domain_ops,
.host_data = imsic,
};
@@ -325,6 +324,7 @@ int imsic_irqdomain_init(void)
}
/* Create Base IRQ domain */
+ info.fwnode = imsic->fwnode,
imsic->base_domain = msi_create_parent_irq_domain(&info, &imsic_msi_parent_ops);
if (!imsic->base_domain) {
pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 2b05722d4dbe..ea8a0ab47afd 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -39,12 +39,13 @@
#include "hfc_pci.h"
+static void hfcpci_softirq(struct timer_list *unused);
static const char *hfcpci_revision = "2.0";
static int HFC_cnt;
static uint debug;
static uint poll, tics;
-static struct timer_list hfc_tl;
+static DEFINE_TIMER(hfc_tl, hfcpci_softirq);
static unsigned long hfc_jiffies;
MODULE_AUTHOR("Karsten Keil");
@@ -2305,8 +2306,7 @@ hfcpci_softirq(struct timer_list *unused)
hfc_jiffies = jiffies + 1;
else
hfc_jiffies += tics;
- hfc_tl.expires = hfc_jiffies;
- add_timer(&hfc_tl);
+ mod_timer(&hfc_tl, hfc_jiffies);
}
static int __init
@@ -2332,10 +2332,8 @@ HFC_init(void)
if (poll != HFCPCI_BTRANS_THRESHOLD) {
printk(KERN_INFO "%s: Using alternative poll value of %d\n",
__func__, poll);
- timer_setup(&hfc_tl, hfcpci_softirq, 0);
- hfc_tl.expires = jiffies + tics;
- hfc_jiffies = hfc_tl.expires;
- add_timer(&hfc_tl);
+ hfc_jiffies = jiffies + tics;
+ mod_timer(&hfc_tl, hfc_jiffies);
} else
tics = 0; /* indicate the use of controller's timer */
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index c9027f9c4bb7..8923d2df4704 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -471,7 +471,7 @@ static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
gc->get_direction = sso_gpio_get_dir;
gc->direction_output = sso_gpio_dir_out;
gc->get = sso_gpio_get;
- gc->set_rv = sso_gpio_set;
+ gc->set = sso_gpio_set;
gc->label = "lgm-sso";
gc->base = -1;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7d4c071a6cd0..0344189bb991 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -473,7 +473,7 @@ static int pca9532_configure(struct i2c_client *client,
data->gpio.label = "gpio-pca9532";
data->gpio.direction_input = pca9532_gpio_direction_input;
data->gpio.direction_output = pca9532_gpio_direction_output;
- data->gpio.set_rv = pca9532_gpio_set_value;
+ data->gpio.set = pca9532_gpio_set_value;
data->gpio.get = pca9532_gpio_get_value;
data->gpio.request = pca9532_gpio_request_pin;
data->gpio.can_sleep = 1;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 70d109246088..2007fe6217ec 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -737,7 +737,7 @@ static int pca955x_probe(struct i2c_client *client)
pca955x->gpio.label = "gpio-pca955x";
pca955x->gpio.direction_input = pca955x_gpio_direction_input;
pca955x->gpio.direction_output = pca955x_gpio_direction_output;
- pca955x->gpio.set_rv = pca955x_gpio_set_value;
+ pca955x->gpio.set = pca955x_gpio_set_value;
pca955x->gpio.get = pca955x_gpio_get_value;
pca955x->gpio.request = pca955x_gpio_request_pin;
pca955x->gpio.free = pca955x_gpio_free_pin;
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 89c165c8ee9c..fd0e8bab9a4b 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -637,7 +637,7 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.base = -1;
tca->gpio.owner = THIS_MODULE;
tca->gpio.direction_output = tca6507_gpio_direction_output;
- tca->gpio.set_rv = tca6507_gpio_set_value;
+ tca->gpio.set = tca6507_gpio_set_value;
tca->gpio.parent = dev;
err = devm_gpiochip_add_data(dev, &tca->gpio, tca);
if (err) {
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 4fef4797b110..02432d4a5ccd 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,15 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config AST2700_MBOX
+ tristate "ASPEED AST2700 IPC driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ Mailbox driver implementation for ASPEED AST27XX SoCs. This driver
+ can be used to send message between different processors in SoC.
+ The driver provides mailbox support for sending interrupts to the
+ clients. Say Y here if you want to build this driver.
+
config CV1800_MBOX
tristate "cv1800 mailbox"
depends on ARCH_SOPHGO || COMPILE_TEST
@@ -350,4 +359,14 @@ config CIX_MBOX
is unidirectional. Say Y here if you want to use the CIX Mailbox
support.
+config BCM74110_MAILBOX
+ tristate "Brcmstb BCM74110 Mailbox"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ Broadcom STB mailbox driver present starting with brcmstb bcm74110
+ SoCs. The mailbox is a communication channel between the host
+ processor and coprocessor that handles various power management task
+ and more.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 786a46587ba1..98a68f838486 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o
+
obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
@@ -74,3 +76,5 @@ obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o
obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o
+
+obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c
new file mode 100644
index 000000000000..83c6afe5411f
--- /dev/null
+++ b/drivers/mailbox/ast2700-mailbox.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Each bit in the register represents an IPC ID */
+#define IPCR_TX_TRIG 0x00
+#define IPCR_ENABLE 0x04
+#define IPCR_STATUS 0x08
+#define RX_IRQ(n) BIT(n)
+#define RX_IRQ_MASK 0xf
+#define IPCR_DATA 0x10
+
+struct ast2700_mbox_data {
+ u8 num_chans;
+ u8 msg_size;
+};
+
+struct ast2700_mbox {
+ struct mbox_controller mbox;
+ u8 msg_size;
+ void __iomem *tx_regs;
+ void __iomem *rx_regs;
+ spinlock_t lock;
+};
+
+static inline int ch_num(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx)
+{
+ return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx));
+}
+
+static irqreturn_t ast2700_mbox_irq(int irq, void *p)
+{
+ struct ast2700_mbox *mb = p;
+ void __iomem *data_reg;
+ int num_words = mb->msg_size / sizeof(u32);
+ u32 *word_data;
+ u32 status;
+ int n, i;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mb->rx_regs + IPCR_ENABLE) &
+ readl(mb->rx_regs + IPCR_STATUS);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < mb->mbox.num_chans; ++n) {
+ struct mbox_chan *chan = &mb->mbox.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n;
+ word_data = chan->con_priv;
+ /* Read the message data */
+ for (i = 0; i < num_words; i++)
+ word_data[i] = readl(data_reg + i * sizeof(u32));
+
+ mbox_chan_received_data(chan, chan->con_priv);
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx;
+ u32 *word_data = data;
+ int num_words = mb->msg_size / sizeof(u32);
+ int i;
+
+ if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx);
+ return -ENODEV;
+ }
+
+ if (!(ast2700_mbox_tx_done(mb, idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx);
+ return -EBUSY;
+ }
+
+ /* Write the message data */
+ for (i = 0 ; i < num_words; i++)
+ writel(word_data[i], data_reg + i * sizeof(u32));
+
+ writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG);
+ dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx);
+
+ return 0;
+}
+
+static int ast2700_mbox_startup(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) | BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ return 0;
+}
+
+static void ast2700_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) & ~BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+}
+
+static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+
+ return ast2700_mbox_tx_done(mb, idx);
+}
+
+static const struct mbox_chan_ops ast2700_mbox_chan_ops = {
+ .send_data = ast2700_mbox_send_data,
+ .startup = ast2700_mbox_startup,
+ .shutdown = ast2700_mbox_shutdown,
+ .last_tx_done = ast2700_mbox_last_tx_done,
+};
+
+static int ast2700_mbox_probe(struct platform_device *pdev)
+{
+ struct ast2700_mbox *mb;
+ const struct ast2700_mbox_data *dev_data;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ dev_data = device_get_match_data(&pdev->dev);
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ /* con_priv of each channel is used to store the message received */
+ for (int i = 0; i < dev_data->num_chans; i++) {
+ mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size,
+ sizeof(u8), GFP_KERNEL);
+ if (!mb->mbox.chans[i].con_priv)
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx");
+ if (IS_ERR(mb->tx_regs))
+ return PTR_ERR(mb->tx_regs);
+
+ mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx");
+ if (IS_ERR(mb->rx_regs))
+ return PTR_ERR(mb->rx_regs);
+
+ mb->msg_size = dev_data->msg_size;
+ mb->mbox.dev = dev;
+ mb->mbox.num_chans = dev_data->num_chans;
+ mb->mbox.ops = &ast2700_mbox_chan_ops;
+ mb->mbox.txdone_irq = false;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.txpoll_period = 5;
+ spin_lock_init(&mb->lock);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mb->mbox);
+}
+
+static const struct ast2700_mbox_data ast2700_dev_data = {
+ .num_chans = 4,
+ .msg_size = 0x20,
+};
+
+static const struct of_device_id ast2700_mbox_of_match[] = {
+ { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match);
+
+static struct platform_driver ast2700_mbox_driver = {
+ .driver = {
+ .name = "ast2700-mailbox",
+ .of_match_table = ast2700_mbox_of_match,
+ },
+ .probe = ast2700_mbox_probe,
+};
+module_platform_driver(ast2700_mbox_driver);
+
+MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED AST2700 IPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/bcm74110-mailbox.c b/drivers/mailbox/bcm74110-mailbox.c
new file mode 100644
index 000000000000..2e7e86f3e6a4
--- /dev/null
+++ b/drivers/mailbox/bcm74110-mailbox.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom BCM74110 Mailbox Driver
+ *
+ * Copyright (c) 2025 Broadcom
+ */
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/mailbox_controller.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+
+#define BCM_MBOX_BASE(sel) ((sel) * 0x40)
+#define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800)
+
+#define BCM_MBOX_CFGA 0x0
+#define BCM_MBOX_CFGB 0x4
+#define BCM_MBOX_CFGC 0x8
+#define BCM_MBOX_CFGD 0xc
+#define BCM_MBOX_CTRL 0x10
+#define BCM_MBOX_CTRL_EN BIT(0)
+#define BCM_MBOX_CTRL_CLR BIT(1)
+#define BCM_MBOX_STATUS0 0x14
+#define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28)
+#define BCM_MBOX_STATUS0_FULL BIT(29)
+#define BCM_MBOX_STATUS1 0x18
+#define BCM_MBOX_STATUS2 0x1c
+#define BCM_MBOX_WDATA 0x20
+#define BCM_MBOX_RDATA 0x28
+
+#define BCM_MBOX_IRQ_STATUS 0x0
+#define BCM_MBOX_IRQ_SET 0x4
+#define BCM_MBOX_IRQ_CLEAR 0x8
+#define BCM_MBOX_IRQ_MASK_STATUS 0xc
+#define BCM_MBOX_IRQ_MASK_SET 0x10
+#define BCM_MBOX_IRQ_MASK_CLEAR 0x14
+#define BCM_MBOX_IRQ_TIMEOUT BIT(0)
+#define BCM_MBOX_IRQ_NOT_EMPTY BIT(1)
+#define BCM_MBOX_IRQ_FULL BIT(2)
+#define BCM_MBOX_IRQ_LOW_WM BIT(3)
+#define BCM_MBOX_IRQ_HIGH_WM BIT(4)
+
+#define BCM_LINK_CODE0 0xbe0
+#define BCM_LINK_CODE1 0xbe1
+#define BCM_LINK_CODE2 0xbe2
+
+enum {
+ BCM_MSG_FUNC_LINK_START = 0,
+ BCM_MSG_FUNC_LINK_STOP,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ BCM_MSG_FUNC_MAX,
+};
+
+enum {
+ BCM_MSG_SVC_INIT = 0,
+ BCM_MSG_SVC_PMC,
+ BCM_MSG_SVC_SCMI,
+ BCM_MSG_SVC_DPFE,
+ BCM_MSG_SVC_MAX,
+};
+
+struct bcm74110_mbox_msg {
+ struct list_head list_entry;
+#define BCM_MSG_VERSION_MASK GENMASK(31, 29)
+#define BCM_MSG_VERSION 0x1
+#define BCM_MSG_REQ_MASK BIT(28)
+#define BCM_MSG_RPLY_MASK BIT(27)
+#define BCM_MSG_SVC_MASK GENMASK(26, 24)
+#define BCM_MSG_FUNC_MASK GENMASK(23, 16)
+#define BCM_MSG_LENGTH_MASK GENMASK(15, 4)
+#define BCM_MSG_SLOT_MASK GENMASK(3, 0)
+
+#define BCM_MSG_SET_FIELD(hdr, field, val) \
+ do { \
+ hdr &= ~BCM_MSG_##field##_MASK; \
+ hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \
+ } while (0)
+
+#define BCM_MSG_GET_FIELD(hdr, field) \
+ FIELD_GET(BCM_MSG_##field##_MASK, hdr)
+ u32 msg;
+};
+
+struct bcm74110_mbox_chan {
+ struct bcm74110_mbox *mbox;
+ bool en;
+ int slot;
+ int type;
+};
+
+struct bcm74110_mbox {
+ struct platform_device *pdev;
+ void __iomem *base;
+
+ int tx_chan;
+ int rx_chan;
+ int rx_irq;
+ struct list_head rx_svc_init_list;
+ spinlock_t rx_svc_list_lock;
+
+ struct mbox_controller controller;
+ struct bcm74110_mbox_chan *mbox_chan;
+};
+
+#define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \
+static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+#define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \
+static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \
+ u32 off) \
+{ \
+ return readl_relaxed(mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl(
+ struct mbox_controller *cntrl)
+{
+ return container_of(cntrl, struct bcm74110_mbox, controller);
+}
+
+static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val)
+{
+ struct bcm74110_mbox_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ INIT_LIST_HEAD(&msg->list_entry);
+ msg->msg = val;
+
+ spin_lock(&mbox->rx_svc_list_lock);
+ list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list);
+ spin_unlock(&mbox->rx_svc_list_lock);
+}
+
+static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox)
+{
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ struct mbox_chan *chan;
+ u32 msg, status;
+ int type;
+
+ do {
+ msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA);
+ status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0);
+
+ dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ type = BCM_MSG_GET_FIELD(msg, SVC);
+ switch (type) {
+ case BCM_MSG_SVC_INIT:
+ bcm74110_rx_push_init_msg(mbox, msg);
+ break;
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ chan = &mbox->controller.chans[type];
+ chan_priv = chan->con_priv;
+ if (chan_priv->en)
+ mbox_chan_received_data(chan, NULL);
+ else
+ dev_warn(dev, "Channel not enabled\n");
+ break;
+ default:
+ dev_warn(dev, "Unsupported msg received\n");
+ }
+ } while (status & BCM_MBOX_STATUS0_NOT_EMPTY);
+}
+
+static irqreturn_t bcm74110_mbox_isr(int irq, void *data)
+{
+ struct bcm74110_mbox *mbox = data;
+ u32 status;
+
+ status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS);
+
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+
+ if (status & BCM_MBOX_IRQ_NOT_EMPTY)
+ bcm74110_rx_process_msg(mbox);
+ else
+ dev_warn(&mbox->pdev->dev, "Spurious interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox)
+{
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET);
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+}
+
+static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list,
+ list_entry) {
+ if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) {
+ list_del(&msg->list_entry);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ if (!found)
+ return -EINVAL;
+
+ *val = msg->msg;
+ kfree(msg);
+
+ return 0;
+}
+
+static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ LIST_HEAD(list_temp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_splice_init(&mbox->rx_svc_init_list, &list_temp);
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) {
+ list_del(&msg->list_entry);
+ kfree(msg);
+ }
+}
+
+#define BCM_DEQUEUE_TIMEOUT_MS 30
+static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ int ret, timeout = 0;
+
+ do {
+ ret = bcm74110_rx_pop_init_msg(mbox, func_type, val);
+
+ if (!ret)
+ return 0;
+
+ /* TODO: Figure out what is a good sleep here. */
+ usleep_range(1000, 2000);
+ timeout++;
+ } while (timeout < BCM_DEQUEUE_TIMEOUT_MS);
+
+ dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n");
+ return -ETIMEDOUT;
+}
+
+static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func,
+ int length, int slot)
+{
+ u32 msg = 0;
+
+ BCM_MSG_SET_FIELD(msg, REQ, req);
+ BCM_MSG_SET_FIELD(msg, RPLY, rply);
+ BCM_MSG_SET_FIELD(msg, SVC, svc);
+ BCM_MSG_SET_FIELD(msg, FUNC, func);
+ BCM_MSG_SET_FIELD(msg, LENGTH, length);
+ BCM_MSG_SET_FIELD(msg, SLOT, slot);
+
+ return msg;
+}
+
+static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int val;
+
+ /* We can potentially poll with timeout here instead */
+ val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0);
+ if (val & BCM_MBOX_STATUS0_FULL) {
+ dev_err(&mbox->pdev->dev, "Mailbox full\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA);
+
+ return 0;
+}
+
+#define BCM_MBOX_LINK_TRAINING_RETRIES 5
+static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox)
+{
+ int ret, retries = 0;
+ u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0;
+
+ do {
+ switch (len) {
+ case 0:
+ retries++;
+ dev_warn(&mbox->pdev->dev,
+ "Link train failed, trying again... %d\n",
+ retries);
+ if (retries > BCM_MBOX_LINK_TRAINING_RETRIES)
+ return -EINVAL;
+ len = BCM_LINK_CODE0;
+ fallthrough;
+ case BCM_LINK_CODE0:
+ case BCM_LINK_CODE1:
+ case BCM_LINK_CODE2:
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_START,
+ len, BCM_MSG_SVC_INIT);
+ break;
+ default:
+ break;
+ }
+
+ bcm74110_mbox_tx_msg(mbox, msg);
+
+ /* No response expected for LINK_CODE2 */
+ if (len == BCM_LINK_CODE2)
+ return 0;
+
+ orig_len = len;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox,
+ BCM_MSG_GET_FIELD(msg, FUNC),
+ &msg);
+ if (ret) {
+ len = 0;
+ continue;
+ }
+
+ if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) ||
+ (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) ||
+ (BCM_MSG_GET_FIELD(msg, SLOT) != 0) ||
+ (BCM_MSG_GET_FIELD(msg, RPLY) != 1) ||
+ (BCM_MSG_GET_FIELD(msg, REQ) != 0)) {
+ len = 0;
+ continue;
+ }
+
+ len = BCM_MSG_GET_FIELD(msg, LENGTH);
+
+ /* Make sure sequence is good */
+ if (len != (orig_len + 1)) {
+ len = 0;
+ continue;
+ }
+ } while (1);
+
+ return -EINVAL;
+}
+
+static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int ret;
+ u32 recv_msg;
+
+ ret = bcm74110_mbox_tx_msg(mbox, msg);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC),
+ &recv_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * Modify tx message to verify rx ack.
+ * Flip RPLY/REQ for synchronous messages
+ */
+ if (BCM_MSG_GET_FIELD(msg, REQ) == 1) {
+ BCM_MSG_SET_FIELD(msg, RPLY, 1);
+ BCM_MSG_SET_FIELD(msg, REQ, 0);
+ }
+
+ if (msg != recv_msg) {
+ dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */
+#define BCM_MBOX_HAB_MEM_IDX_START 0x30
+#define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0
+static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox)
+{
+ u32 msg = 0;
+ int ret;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ 0, BCM_MSG_SVC_INIT);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm74110_mbox_init(struct bcm74110_mbox *mbox)
+{
+ int ret = 0;
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Clear status & restart tx/rx*/
+ bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR,
+ BCM_MBOX_CTRL);
+
+ /* Unmask irq */
+ bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR);
+
+ ret = bcm74110_mbox_link_training(mbox);
+ if (ret) {
+ dev_err(&mbox->pdev->dev, "Training failed\n");
+ return ret;
+ }
+
+ return bcm74110_mbox_shmem_init(mbox);
+}
+
+static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+ u32 msg;
+
+ switch (chan_priv->type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0,
+ 128 + 28, chan_priv->slot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bcm74110_mbox_tx_msg(chan_priv->mbox, msg);
+}
+
+static int bcm74110_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = true;
+
+ return 0;
+}
+
+static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = false;
+}
+
+static const struct mbox_chan_ops bcm74110_mbox_chan_ops = {
+ .send_data = bcm74110_mbox_send_data,
+ .startup = bcm74110_mbox_chan_startup,
+ .shutdown = bcm74110_mbox_chan_shutdown,
+};
+
+static void bcm74110_mbox_shutdown(struct platform_device *pdev)
+{
+ struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev);
+ u32 msg;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_STOP,
+ 0, 0);
+
+ bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+
+ /* Even if we don't receive ACK, lets shut it down */
+
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Flush queues */
+ bcm74110_rx_flush_msg(mbox);
+}
+
+static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl,
+ const struct of_phandle_args *p)
+{
+ struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl);
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ int slot, type;
+
+ if (p->args_count != 2) {
+ dev_err(dev, "Invalid arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = p->args[0];
+ slot = p->args[1];
+
+ switch (type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) {
+ dev_err(dev, "Not enough shared memory\n");
+ return ERR_PTR(-EINVAL);
+ }
+ chan_priv = cntrl->chans[type].con_priv;
+ chan_priv->slot = slot;
+ chan_priv->type = type;
+ break;
+ default:
+ dev_err(dev, "Invalid channel type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cntrl->chans[type];
+}
+
+static int bcm74110_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_mbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->pdev = pdev;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find tx channel\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find rx channel\n");
+
+ mbox->rx_irq = platform_get_irq(pdev, 0);
+ if (mbox->rx_irq < 0)
+ return mbox->rx_irq;
+
+ INIT_LIST_HEAD(&mbox->rx_svc_init_list);
+ spin_lock_init(&mbox->rx_svc_list_lock);
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr,
+ IRQF_NO_SUSPEND, pdev->name, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+
+ mbox->controller.ops = &bcm74110_mbox_chan_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = BCM_MSG_SVC_MAX;
+ mbox->controller.of_xlate = &bcm74110_mbox_of_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->mbox_chan),
+ GFP_KERNEL);
+ if (!mbox->mbox_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < BCM_MSG_SVC_MAX; i++) {
+ mbox->mbox_chan[i].mbox = mbox;
+ mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i];
+ }
+
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_mbox_init(mbox);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bcm74110_mbox_of_match[] = {
+ { .compatible = "brcm,bcm74110-mbox", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match);
+
+static struct platform_driver bcm74110_mbox_driver = {
+ .driver = {
+ .name = "bcm74110-mbox",
+ .of_match_table = bcm74110_mbox_of_match,
+ },
+ .probe = bcm74110_mbox_probe,
+ .shutdown = bcm74110_mbox_shutdown,
+};
+module_platform_driver(bcm74110_mbox_driver);
+
+MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>");
+MODULE_DESCRIPTION("BCM74110 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index ab4e8d1954a1..532929916e99 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -390,7 +390,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task = kzalloc(sizeof(*task), GFP_ATOMIC);
if (!task) {
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return -ENOMEM;
}
@@ -440,7 +440,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
list_move_tail(&task->list_entry, &thread->task_busy_list);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
@@ -488,7 +488,7 @@ done:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
}
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
@@ -528,7 +528,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
@@ -543,7 +543,7 @@ wait:
return -EFAULT;
}
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index f6714c233f5a..0a00719b2482 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -306,6 +306,22 @@ static void pcc_chan_acknowledge(struct pcc_chan_info *pchan)
pcc_chan_reg_read_modify_write(&pchan->db);
}
+static void *write_response(struct pcc_chan_info *pchan)
+{
+ struct pcc_header pcc_header;
+ void *buffer;
+ int data_len;
+
+ memcpy_fromio(&pcc_header, pchan->chan.shmem,
+ sizeof(pcc_header));
+ data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header);
+
+ buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len);
+ if (buffer != NULL)
+ memcpy_fromio(buffer, pchan->chan.shmem, data_len);
+ return buffer;
+}
+
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
@@ -317,6 +333,8 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
+ struct pcc_header *pcc_header = chan->active_req;
+ void *handle = NULL;
pchan = chan->con_priv;
@@ -340,7 +358,17 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
* required to avoid any possible race in updatation of this flag.
*/
pchan->chan_in_use = false;
- mbox_chan_received_data(chan, NULL);
+
+ if (pchan->chan.rx_alloc)
+ handle = write_response(pchan);
+
+ if (chan->active_req) {
+ pcc_header = chan->active_req;
+ if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY)
+ mbox_chan_txdone(chan, 0);
+ }
+
+ mbox_chan_received_data(chan, handle);
pcc_chan_acknowledge(pchan);
@@ -384,9 +412,24 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pcc_mchan = &pchan->chan;
pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr,
pcc_mchan->shmem_size);
- if (pcc_mchan->shmem)
- return pcc_mchan;
+ if (!pcc_mchan->shmem)
+ goto err;
+
+ pcc_mchan->manage_writes = false;
+
+ /* This indicates that the channel is ready to accept messages.
+ * This needs to happen after the channel has registered
+ * its callback. There is no access point to do that in
+ * the mailbox API. That implies that the mailbox client must
+ * have set the allocate callback function prior to
+ * sending any messages.
+ */
+ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+
+ return pcc_mchan;
+err:
mbox_free_channel(chan);
return ERR_PTR(-ENXIO);
}
@@ -417,8 +460,38 @@ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+static int pcc_write_to_buffer(struct mbox_chan *chan, void *data)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan;
+ struct pcc_header *pcc_header = data;
+
+ if (!pchan->chan.manage_writes)
+ return 0;
+
+ /* The PCC header length includes the command field
+ * but not the other values from the header.
+ */
+ int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header);
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val) {
+ pr_info("%s pchan->cmd_complete not set", __func__);
+ return -1;
+ }
+ memcpy_toio(pcc_mbox_chan->shmem, data, len);
+ return 0;
+}
+
+
/**
- * pcc_send_data - Called from Mailbox Controller code. Used
+ * pcc_send_data - Called from Mailbox Controller code. If
+ * pchan->chan.rx_alloc is set, then the command complete
+ * flag is checked and the data is written to the shared
+ * buffer io memory.
+ *
+ * If pchan->chan.rx_alloc is not set, then it is used
* here only to ring the channel doorbell. The PCC client
* specific read/write is done in the client driver in
* order to maintain atomicity over PCC channel once
@@ -434,17 +507,37 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
int ret;
struct pcc_chan_info *pchan = chan->con_priv;
+ ret = pcc_write_to_buffer(chan, data);
+ if (ret)
+ return ret;
+
ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
if (ret)
return ret;
ret = pcc_chan_reg_read_modify_write(&pchan->db);
+
if (!ret && pchan->plat_irq > 0)
pchan->chan_in_use = true;
return ret;
}
+
+static bool pcc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val)
+ return false;
+ else
+ return true;
+}
+
+
+
/**
* pcc_startup - Called from Mailbox Controller code. Used here
* to request the interrupt.
@@ -490,6 +583,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
.startup = pcc_startup,
.shutdown = pcc_shutdown,
+ .last_tx_done = pcc_last_tx_done,
};
/**
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index ea44ffb5ce1a..d957d989c0ce 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -312,8 +312,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- ipcc->irq_domain = irq_domain_create_tree(of_fwnode_handle(pdev->dev.of_node),
- &qcom_ipcc_irq_ops, ipcc);
+ ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c711db6f8f5c..cf17fd46e255 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -215,16 +215,19 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (test_bit(DROP_WRITES, &fc->flags) &&
- (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) ||
+ fc->random_write_corrupt)) {
ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
} else if (test_bit(ERROR_WRITES, &fc->flags) &&
- (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == WRITE) ||
+ fc->random_write_corrupt)) {
ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
} else if (test_bit(ERROR_READS, &fc->flags) &&
- (fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) {
+ ((fc->corrupt_bio_byte && fc->corrupt_bio_rw == READ) ||
+ fc->random_read_corrupt)) {
ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set";
return -EINVAL;
}
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index b90f34259fbb..8b50c908c6f4 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -241,10 +241,11 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
/*
* First retrieve the target metadata.
*/
- scnprintf(target_metadata_buf, DM_IMA_TARGET_METADATA_BUF_LEN,
- "target_index=%d,target_begin=%llu,target_len=%llu,",
- i, ti->begin, ti->len);
- target_metadata_buf_len = strlen(target_metadata_buf);
+ target_metadata_buf_len =
+ scnprintf(target_metadata_buf,
+ DM_IMA_TARGET_METADATA_BUF_LEN,
+ "target_index=%d,target_begin=%llu,target_len=%llu,",
+ i, ti->begin, ti->len);
/*
* Then retrieve the actual target data.
@@ -448,11 +449,9 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
if (r)
goto error;
- scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
- "%sname=%s,uuid=%s;device_resume=no_data;",
- DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l = strlen(device_table_data);
-
+ l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
+ "%sname=%s,uuid=%s;device_resume=no_data;",
+ DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
capacity_len = strlen(capacity_str);
@@ -561,10 +560,9 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error;
- scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
- "%sname=%s,uuid=%s;device_remove=no_data;",
- DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l = strlen(device_table_data);
+ l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
+ "%sname=%s,uuid=%s;device_remove=no_data;",
+ DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
memcpy(device_table_data + l, remove_all_str, remove_all_len);
@@ -647,10 +645,9 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error2;
- scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
- "%sname=%s,uuid=%s;table_clear=no_data;",
- DM_IMA_VERSION_STR, dev_name, dev_uuid);
- l = strlen(device_table_data);
+ l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
+ "%sname=%s,uuid=%s;table_clear=no_data;",
+ DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
capacity_len = strlen(capacity_str);
@@ -706,7 +703,7 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
char *old_device_data = NULL, *new_device_data = NULL, *combined_device_data = NULL;
char *new_dev_name = NULL, *new_dev_uuid = NULL, *capacity_str = NULL;
bool noio = true;
- int r;
+ int r, len;
if (dm_ima_alloc_and_copy_device_data(md, &new_device_data,
md->ima.active_table.num_targets, noio))
@@ -728,12 +725,11 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
md->ima.active_table.device_metadata = new_device_data;
md->ima.active_table.device_metadata_len = strlen(new_device_data);
- scnprintf(combined_device_data, DM_IMA_DEVICE_BUF_LEN * 2,
- "%s%snew_name=%s,new_uuid=%s;%s", DM_IMA_VERSION_STR, old_device_data,
- new_dev_name, new_dev_uuid, capacity_str);
+ len = scnprintf(combined_device_data, DM_IMA_DEVICE_BUF_LEN * 2,
+ "%s%snew_name=%s,new_uuid=%s;%s", DM_IMA_VERSION_STR, old_device_data,
+ new_dev_name, new_dev_uuid, capacity_str);
- dm_ima_measure_data("dm_device_rename", combined_device_data, strlen(combined_device_data),
- noio);
+ dm_ima_measure_data("dm_device_rename", combined_device_data, len, noio);
goto exit;
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index 3e4cb81ce512..d0b883fabfeb 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -117,16 +117,16 @@ int dm_register_path_selector(struct path_selector_type *pst)
}
EXPORT_SYMBOL_GPL(dm_register_path_selector);
-int dm_unregister_path_selector(struct path_selector_type *pst)
+void dm_unregister_path_selector(struct path_selector_type *pst)
{
struct ps_internal *psi;
down_write(&_ps_lock);
psi = __find_path_selector_type(pst->name);
- if (!psi) {
+ if (WARN_ON(!psi)) {
up_write(&_ps_lock);
- return -EINVAL;
+ return;
}
list_del(&psi->list);
@@ -134,7 +134,5 @@ int dm_unregister_path_selector(struct path_selector_type *pst)
up_write(&_ps_lock);
kfree(psi);
-
- return 0;
}
EXPORT_SYMBOL_GPL(dm_unregister_path_selector);
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index 3861b2d8b963..7b2270532e64 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -96,7 +96,7 @@ struct path_selector_type {
int dm_register_path_selector(struct path_selector_type *type);
/* Unregister a path selector */
-int dm_unregister_path_selector(struct path_selector_type *type);
+void dm_unregister_path_selector(struct path_selector_type *type);
/* Returns a registered path selector type */
struct path_selector_type *dm_get_path_selector(const char *name);
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index b49e10d76d03..f07e773d9cc0 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -541,8 +541,10 @@ static int __init dm_hst_init(void)
{
int r = dm_register_path_selector(&hst_ps);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ return r;
+ }
DMINFO("version " HST_VERSION " loaded");
@@ -551,10 +553,7 @@ static int __init dm_hst_init(void)
static void __exit dm_hst_exit(void)
{
- int r = dm_unregister_path_selector(&hst_ps);
-
- if (r < 0)
- DMERR("unregister failed %d", r);
+ dm_unregister_path_selector(&hst_ps);
}
module_init(dm_hst_init);
diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
index 716807e511ee..80415a045c68 100644
--- a/drivers/md/dm-ps-io-affinity.c
+++ b/drivers/md/dm-ps-io-affinity.c
@@ -260,10 +260,7 @@ static int __init dm_ioa_init(void)
static void __exit dm_ioa_exit(void)
{
- int ret = dm_unregister_path_selector(&ioa_ps);
-
- if (ret < 0)
- DMERR("unregister failed %d", ret);
+ dm_unregister_path_selector(&ioa_ps);
}
module_init(dm_ioa_init);
diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c
index e305f05ad1e5..9c68701ed7a4 100644
--- a/drivers/md/dm-ps-queue-length.c
+++ b/drivers/md/dm-ps-queue-length.c
@@ -260,8 +260,10 @@ static int __init dm_ql_init(void)
{
int r = dm_register_path_selector(&ql_ps);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ return r;
+ }
DMINFO("version " QL_VERSION " loaded");
@@ -270,10 +272,7 @@ static int __init dm_ql_init(void)
static void __exit dm_ql_exit(void)
{
- int r = dm_unregister_path_selector(&ql_ps);
-
- if (r < 0)
- DMERR("unregister failed %d", r);
+ dm_unregister_path_selector(&ql_ps);
}
module_init(dm_ql_init);
diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
index d1745b123dc1..0c12f4073461 100644
--- a/drivers/md/dm-ps-round-robin.c
+++ b/drivers/md/dm-ps-round-robin.c
@@ -220,8 +220,10 @@ static int __init dm_rr_init(void)
{
int r = dm_register_path_selector(&rr_ps);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ return r;
+ }
DMINFO("version " RR_VERSION " loaded");
@@ -230,10 +232,7 @@ static int __init dm_rr_init(void)
static void __exit dm_rr_exit(void)
{
- int r = dm_unregister_path_selector(&rr_ps);
-
- if (r < 0)
- DMERR("unregister failed %d", r);
+ dm_unregister_path_selector(&rr_ps);
}
module_init(dm_rr_init);
diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c
index 969d31c40272..0543fe7969c4 100644
--- a/drivers/md/dm-ps-service-time.c
+++ b/drivers/md/dm-ps-service-time.c
@@ -341,8 +341,10 @@ static int __init dm_st_init(void)
{
int r = dm_register_path_selector(&st_ps);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ return r;
+ }
DMINFO("version " ST_VERSION " loaded");
@@ -351,10 +353,7 @@ static int __init dm_st_init(void)
static void __exit dm_st_exit(void)
{
- int r = dm_unregister_path_selector(&st_ps);
-
- if (r < 0)
- DMERR("unregister failed %d", r);
+ dm_unregister_path_selector(&st_ps);
}
module_init(dm_st_init);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index e8c0a8c6fb51..79ea85d18e24 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -14,7 +14,6 @@
#include "raid5.h"
#include "raid10.h"
#include "md-bitmap.h"
-#include "dm-core.h"
#include <linux/device-mapper.h>
@@ -439,7 +438,7 @@ static bool rs_is_reshapable(struct raid_set *rs)
/* Return true, if raid set in @rs is recovering */
static bool rs_is_recovering(struct raid_set *rs)
{
- return rs->md.recovery_cp < rs->md.dev_sectors;
+ return rs->md.resync_offset < rs->md.dev_sectors;
}
/* Return true, if raid set in @rs is reshaping */
@@ -769,7 +768,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
rs->md.layout = raid_type->algorithm;
rs->md.new_layout = rs->md.layout;
rs->md.delta_disks = 0;
- rs->md.recovery_cp = MaxSector;
+ rs->md.resync_offset = MaxSector;
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
@@ -913,7 +912,7 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
rs->md.external = 0;
rs->md.persistent = 1;
rs->md.major_version = 2;
- } else if (rebuild && !rs->md.recovery_cp) {
+ } else if (rebuild && !rs->md.resync_offset) {
/*
* Without metadata, we will not be able to tell if the array
* is in-sync or not - we must assume it is not. Therefore,
@@ -1696,20 +1695,20 @@ static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
{
/* raid0 does not recover */
if (rs_is_raid0(rs))
- rs->md.recovery_cp = MaxSector;
+ rs->md.resync_offset = MaxSector;
/*
* A raid6 set has to be recovered either
* completely or for the grown part to
* ensure proper parity and Q-Syndrome
*/
else if (rs_is_raid6(rs))
- rs->md.recovery_cp = dev_sectors;
+ rs->md.resync_offset = dev_sectors;
/*
* Other raid set types may skip recovery
* depending on the 'nosync' flag.
*/
else
- rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
+ rs->md.resync_offset = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
? MaxSector : dev_sectors;
}
@@ -2144,7 +2143,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->events = cpu_to_le64(mddev->events);
sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
- sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
+ sb->array_resync_offset = cpu_to_le64(mddev->resync_offset);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
@@ -2335,18 +2334,18 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
}
if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
- mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
+ mddev->resync_offset = le64_to_cpu(sb->array_resync_offset);
/*
* During load, we set FirstUse if a new superblock was written.
* There are two reasons we might not have a superblock:
* 1) The raid set is brand new - in which case, all of the
* devices must have their In_sync bit set. Also,
- * recovery_cp must be 0, unless forced.
+ * resync_offset must be 0, unless forced.
* 2) This is a new device being added to an old raid set
* and the new device needs to be rebuilt - in which
* case the In_sync bit will /not/ be set and
- * recovery_cp must be MaxSector.
+ * resync_offset must be MaxSector.
* 3) This is/are a new device(s) being added to an old
* raid set during takeover to a higher raid level
* to provide capacity for redundancy or during reshape
@@ -2391,8 +2390,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
new_devs > 1 ? "s" : "");
return -EINVAL;
} else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
- DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
- (unsigned long long) mddev->recovery_cp);
+ DMERR("'rebuild' specified while raid set is not in-sync (resync_offset=%llu)",
+ (unsigned long long) mddev->resync_offset);
return -EINVAL;
} else if (rs_is_reshaping(rs)) {
DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
@@ -2532,6 +2531,10 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
struct md_rdev *rdev, *freshest;
struct mddev *mddev = &rs->md;
+ /* Respect resynchronization requested with "sync" argument. */
+ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
+ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+
freshest = NULL;
rdev_for_each(rdev, mddev) {
if (test_bit(Journal, &rdev->flags))
@@ -2697,11 +2700,11 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
}
out:
/*
- * Raise recovery_cp in case data_offset != 0 to
+ * Raise resync_offset in case data_offset != 0 to
* avoid false recovery positives in the constructor.
*/
- if (rs->md.recovery_cp < rs->md.dev_sectors)
- rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
+ if (rs->md.resync_offset < rs->md.dev_sectors)
+ rs->md.resync_offset += rs->dev[0].rdev.data_offset;
/* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
rdev_for_each(rdev, &rs->md) {
@@ -2756,7 +2759,7 @@ static int rs_setup_takeover(struct raid_set *rs)
}
clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
while (d--) {
rdev = &rs->dev[d].rdev;
@@ -2764,7 +2767,7 @@ static int rs_setup_takeover(struct raid_set *rs)
if (test_bit(d, (void *) rs->rebuild_disks)) {
clear_bit(In_sync, &rdev->flags);
clear_bit(Faulty, &rdev->flags);
- mddev->recovery_cp = rdev->recovery_offset = 0;
+ mddev->resync_offset = rdev->recovery_offset = 0;
/* Bitmap has to be created when we do an "up" takeover */
set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
}
@@ -3222,7 +3225,7 @@ size_check:
if (r)
goto bad;
- rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
+ rs_setup_recovery(rs, rs->md.resync_offset < rs->md.dev_sectors ? rs->md.resync_offset : rs->md.dev_sectors);
} else {
/* This is no size change or it is shrinking, update size and record in superblocks */
r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
@@ -3305,7 +3308,7 @@ size_check:
/* Disable/enable discard support on raid set. */
configure_discard_support(rs);
- rs->md.dm_gendisk = ti->table->md->disk;
+ rs->md.dm_gendisk = dm_disk(dm_table_get_md(ti->table));
mddev_unlock(&rs->md);
return 0;
@@ -3446,7 +3449,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
} else {
if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
- r = mddev->recovery_cp;
+ r = mddev->resync_offset;
else
r = mddev->curr_resync_completed;
@@ -4074,9 +4077,9 @@ static int raid_preresume(struct dm_target *ti)
}
/* Check for any resize/reshape on @rs and adjust/initiate */
- if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset && mddev->resync_offset < MaxSector) {
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
- mddev->resync_min = mddev->recovery_cp;
+ mddev->resync_min = mddev->resync_offset;
if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
mddev->resync_max_sectors = mddev->dev_sectors;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d9d5e6aa5707..ad0a60a07b93 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -899,17 +899,17 @@ static bool dm_table_supports_dax(struct dm_table *t,
return true;
}
-static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_is_not_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
/* request-based cannot stack on partitions! */
if (bdev_is_partition(bdev))
- return false;
+ return true;
- return queue_is_mq(q);
+ return !queue_is_mq(q);
}
static int dm_table_determine_type(struct dm_table *t)
@@ -1005,7 +1005,7 @@ verify_rq_based:
/* Non-request-stackable devices can't be used for request-based dm */
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_is_rq_stackable, NULL)) {
+ ti->type->iterate_devices(ti, device_is_not_rq_stackable, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 05cf4e3f2bbe..007bb93e5fca 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4111,8 +4111,8 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
- DM_TARGET_IMMUTABLE,
- .version = {1, 23, 0},
+ DM_TARGET_IMMUTABLE | DM_TARGET_PASSES_CRYPTO,
+ .version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4497,7 +4497,8 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 23, 0},
+ .features = DM_TARGET_PASSES_CRYPTO,
+ .version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c
index ae11941c90a9..0613c82bbe8e 100644
--- a/drivers/md/dm-vdo/funnel-workqueue.c
+++ b/drivers/md/dm-vdo/funnel-workqueue.c
@@ -252,8 +252,7 @@ static void service_work_queue(struct simple_work_queue *queue)
* This speeds up some performance tests; that "other work" might include other VDO
* threads.
*/
- if (need_resched())
- cond_resched();
+ cond_resched();
}
run_finish_hook(queue);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 631a887b487c..d382a390d39a 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -191,7 +191,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
u8 *want_digest, u8 *data)
{
if (unlikely(verity_hash(v, io, data, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io), true)))
+ verity_io_real_digest(v, io))))
return 0;
return memcmp(verity_io_real_digest(v, io), want_digest,
@@ -392,7 +392,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
/* Always re-validate the corrected block against the expected hash */
r = verity_hash(v, io, fio->output, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io), true);
+ verity_io_real_digest(v, io));
if (unlikely(r < 0))
return r;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 81186bded1ce..66a00a8ccb39 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -19,7 +19,6 @@
#include "dm-audit.h"
#include <linux/module.h>
#include <linux/reboot.h>
-#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/jump_label.h>
#include <linux/security.h>
@@ -61,9 +60,6 @@ module_param_array_named(use_bh_bytes, dm_verity_use_bh_bytes, uint, NULL, 0644)
static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
-/* Is at least one dm-verity instance using ahash_tfm instead of shash_tfm? */
-static DEFINE_STATIC_KEY_FALSE(ahash_enabled);
-
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
@@ -118,100 +114,21 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
return block >> (level * v->hash_per_block_bits);
}
-static int verity_ahash_update(struct dm_verity *v, struct ahash_request *req,
- const u8 *data, size_t len,
- struct crypto_wait *wait)
-{
- struct scatterlist sg;
-
- if (likely(!is_vmalloc_addr(data))) {
- sg_init_one(&sg, data, len);
- ahash_request_set_crypt(req, &sg, NULL, len);
- return crypto_wait_req(crypto_ahash_update(req), wait);
- }
-
- do {
- int r;
- size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
-
- flush_kernel_vmap_range((void *)data, this_step);
- sg_init_table(&sg, 1);
- sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
- ahash_request_set_crypt(req, &sg, NULL, this_step);
- r = crypto_wait_req(crypto_ahash_update(req), wait);
- if (unlikely(r))
- return r;
- data += this_step;
- len -= this_step;
- } while (len);
-
- return 0;
-}
-
-/*
- * Wrapper for crypto_ahash_init, which handles verity salting.
- */
-static int verity_ahash_init(struct dm_verity *v, struct ahash_request *req,
- struct crypto_wait *wait, bool may_sleep)
-{
- int r;
-
- ahash_request_set_tfm(req, v->ahash_tfm);
- ahash_request_set_callback(req,
- may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
- crypto_req_done, (void *)wait);
- crypto_init_wait(wait);
-
- r = crypto_wait_req(crypto_ahash_init(req), wait);
-
- if (unlikely(r < 0)) {
- if (r != -ENOMEM)
- DMERR("crypto_ahash_init failed: %d", r);
- return r;
- }
-
- if (likely(v->salt_size && (v->version >= 1)))
- r = verity_ahash_update(v, req, v->salt, v->salt_size, wait);
-
- return r;
-}
-
-static int verity_ahash_final(struct dm_verity *v, struct ahash_request *req,
- u8 *digest, struct crypto_wait *wait)
-{
- int r;
-
- if (unlikely(v->salt_size && (!v->version))) {
- r = verity_ahash_update(v, req, v->salt, v->salt_size, wait);
-
- if (r < 0) {
- DMERR("%s failed updating salt: %d", __func__, r);
- goto out;
- }
- }
-
- ahash_request_set_crypt(req, NULL, digest, 0);
- r = crypto_wait_req(crypto_ahash_final(req), wait);
-out:
- return r;
-}
-
int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
- const u8 *data, size_t len, u8 *digest, bool may_sleep)
+ const u8 *data, size_t len, u8 *digest)
{
+ struct shash_desc *desc = &io->hash_desc;
int r;
- if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm) {
- struct ahash_request *req = verity_io_hash_req(v, io);
- struct crypto_wait wait;
-
- r = verity_ahash_init(v, req, &wait, may_sleep) ?:
- verity_ahash_update(v, req, data, len, &wait) ?:
- verity_ahash_final(v, req, digest, &wait);
+ desc->tfm = v->shash_tfm;
+ if (unlikely(v->initial_hashstate == NULL)) {
+ /* Version 0: salt at end */
+ r = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, data, len) ?:
+ crypto_shash_update(desc, v->salt, v->salt_size) ?:
+ crypto_shash_final(desc, digest);
} else {
- struct shash_desc *desc = verity_io_hash_req(v, io);
-
- desc->tfm = v->shash_tfm;
+ /* Version 1: salt at beginning */
r = crypto_shash_import(desc, v->initial_hashstate) ?:
crypto_shash_finup(desc, data, len, digest);
}
@@ -362,7 +279,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits,
- verity_io_real_digest(v, io), !io->in_bh);
+ verity_io_real_digest(v, io));
if (unlikely(r < 0))
goto release_ret_r;
@@ -465,7 +382,7 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
goto free_ret;
r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io), true);
+ verity_io_real_digest(v, io));
if (unlikely(r))
goto free_ret;
@@ -581,7 +498,7 @@ static int verity_verify_io(struct dm_verity_io *io)
}
r = verity_hash(v, io, data, block_size,
- verity_io_real_digest(v, io), !io->in_bh);
+ verity_io_real_digest(v, io));
if (unlikely(r < 0)) {
kunmap_local(data);
return r;
@@ -1092,12 +1009,7 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->zero_digest);
verity_free_sig(v);
- if (v->ahash_tfm) {
- static_branch_dec(&ahash_enabled);
- crypto_free_ahash(v->ahash_tfm);
- } else {
- crypto_free_shash(v->shash_tfm);
- }
+ crypto_free_shash(v->shash_tfm);
kfree(v->alg_name);
@@ -1157,7 +1069,8 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
if (!v->zero_digest)
return r;
- io = kmalloc(sizeof(*io) + v->hash_reqsize, GFP_KERNEL);
+ io = kmalloc(sizeof(*io) + crypto_shash_descsize(v->shash_tfm),
+ GFP_KERNEL);
if (!io)
return r; /* verity_dtr will free zero_digest */
@@ -1168,7 +1081,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
goto out;
r = verity_hash(v, io, zero_data, 1 << v->data_dev_block_bits,
- v->zero_digest, true);
+ v->zero_digest);
out:
kfree(io);
@@ -1324,9 +1237,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
{
struct dm_target *ti = v->ti;
- struct crypto_ahash *ahash;
- struct crypto_shash *shash = NULL;
- const char *driver_name;
+ struct crypto_shash *shash;
v->alg_name = kstrdup(alg_name, GFP_KERNEL);
if (!v->alg_name) {
@@ -1334,50 +1245,14 @@ static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
return -ENOMEM;
}
- /*
- * Allocate the hash transformation object that this dm-verity instance
- * will use. The vast majority of dm-verity users use CPU-based
- * hashing, so when possible use the shash API to minimize the crypto
- * API overhead. If the ahash API resolves to a different driver
- * (likely an off-CPU hardware offload), use ahash instead. Also use
- * ahash if the obsolete dm-verity format with the appended salt is
- * being used, so that quirk only needs to be handled in one place.
- */
- ahash = crypto_alloc_ahash(alg_name, 0,
- v->use_bh_wq ? CRYPTO_ALG_ASYNC : 0);
- if (IS_ERR(ahash)) {
+ shash = crypto_alloc_shash(alg_name, 0, 0);
+ if (IS_ERR(shash)) {
ti->error = "Cannot initialize hash function";
- return PTR_ERR(ahash);
- }
- driver_name = crypto_ahash_driver_name(ahash);
- if (v->version >= 1 /* salt prepended, not appended? */) {
- shash = crypto_alloc_shash(alg_name, 0, 0);
- if (!IS_ERR(shash) &&
- strcmp(crypto_shash_driver_name(shash), driver_name) != 0) {
- /*
- * ahash gave a different driver than shash, so probably
- * this is a case of real hardware offload. Use ahash.
- */
- crypto_free_shash(shash);
- shash = NULL;
- }
- }
- if (!IS_ERR_OR_NULL(shash)) {
- crypto_free_ahash(ahash);
- ahash = NULL;
- v->shash_tfm = shash;
- v->digest_size = crypto_shash_digestsize(shash);
- v->hash_reqsize = sizeof(struct shash_desc) +
- crypto_shash_descsize(shash);
- DMINFO("%s using shash \"%s\"", alg_name, driver_name);
- } else {
- v->ahash_tfm = ahash;
- static_branch_inc(&ahash_enabled);
- v->digest_size = crypto_ahash_digestsize(ahash);
- v->hash_reqsize = sizeof(struct ahash_request) +
- crypto_ahash_reqsize(ahash);
- DMINFO("%s using ahash \"%s\"", alg_name, driver_name);
+ return PTR_ERR(shash);
}
+ v->shash_tfm = shash;
+ v->digest_size = crypto_shash_digestsize(shash);
+ DMINFO("%s using \"%s\"", alg_name, crypto_shash_driver_name(shash));
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
ti->error = "Digest size too big";
return -EINVAL;
@@ -1402,7 +1277,7 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg)
return -EINVAL;
}
}
- if (v->shash_tfm) {
+ if (v->version) { /* Version 1: salt at beginning */
SHASH_DESC_ON_STACK(desc, v->shash_tfm);
int r;
@@ -1681,7 +1556,8 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->per_io_data_size = sizeof(struct dm_verity_io) + v->hash_reqsize;
+ ti->per_io_data_size = sizeof(struct dm_verity_io) +
+ crypto_shash_descsize(v->shash_tfm);
r = verity_fec_ctr(v);
if (r)
@@ -1788,10 +1664,7 @@ static int verity_preresume(struct dm_target *ti)
bdev = dm_disk(dm_table_get_md(ti->table))->part0;
root_digest.digest = v->root_digest;
root_digest.digest_len = v->digest_size;
- if (static_branch_unlikely(&ahash_enabled) && !v->shash_tfm)
- root_digest.alg = crypto_ahash_alg_name(v->ahash_tfm);
- else
- root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
+ root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest,
sizeof(root_digest));
@@ -1817,7 +1690,7 @@ static struct target_type verity_target = {
.name = "verity",
/* Note: the LSMs depend on the singleton and immutable features */
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 8cbb57862ae1..6d141abd965c 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -39,11 +39,10 @@ struct dm_verity {
struct dm_target *ti;
struct dm_bufio_client *bufio;
char *alg_name;
- struct crypto_ahash *ahash_tfm; /* either this or shash_tfm is set */
- struct crypto_shash *shash_tfm; /* either this or ahash_tfm is set */
+ struct crypto_shash *shash_tfm;
u8 *root_digest; /* digest of the root block */
u8 *salt; /* salt: its size is salt_size */
- u8 *initial_hashstate; /* salted initial state, if shash_tfm is set */
+ u8 *initial_hashstate; /* salted initial state, if version >= 1 */
u8 *zero_digest; /* digest for a zero block */
#ifdef CONFIG_SECURITY
u8 *root_digest_sig; /* signature of the root digest */
@@ -61,7 +60,6 @@ struct dm_verity {
bool hash_failed:1; /* set if hash of any block failed */
bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */
unsigned int digest_size; /* digest size for the current hash algorithm */
- unsigned int hash_reqsize; /* the size of temporary space for crypto */
enum verity_mode mode; /* mode for handling verification errors */
enum verity_mode error_mode;/* mode for handling I/O errors */
unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
@@ -100,19 +98,13 @@ struct dm_verity_io {
u8 want_digest[HASH_MAX_DIGESTSIZE];
/*
- * This struct is followed by a variable-sized hash request of size
- * v->hash_reqsize, either a struct ahash_request or a struct shash_desc
- * (depending on whether ahash_tfm or shash_tfm is being used). To
- * access it, use verity_io_hash_req().
+ * Temporary space for hashing. This is variable-length and must be at
+ * the end of the struct. struct shash_desc is just the fixed part;
+ * it's followed by a context of size crypto_shash_descsize(shash_tfm).
*/
+ struct shash_desc hash_desc;
};
-static inline void *verity_io_hash_req(struct dm_verity *v,
- struct dm_verity_io *io)
-{
- return io + 1;
-}
-
static inline u8 *verity_io_real_digest(struct dm_verity *v,
struct dm_verity_io *io)
{
@@ -126,7 +118,7 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
}
extern int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
- const u8 *data, size_t len, u8 *digest, bool may_sleep);
+ const u8 *data, size_t len, u8 *digest);
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 3d31b82e0730..78e17dd4d01b 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -467,8 +467,6 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
bdev_offset_from_zone_start(disk->part0,
clone->bi_iter.bi_sector);
}
-
- return;
}
static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 5da3db06da10..9da329078ea4 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1062,7 +1062,7 @@ static int dmz_iterate_devices(struct dm_target *ti,
struct dmz_target *dmz = ti->private;
unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
sector_t capacity;
- int i, r;
+ int i, r = 0;
for (i = 0; i < dmz->nr_ddevs; i++) {
capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2d8402778e5c..a44e8c2dccee 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1024,10 +1024,8 @@ static void dm_wq_requeue_work(struct work_struct *work)
*
* 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
*/
-static void dm_io_complete(struct dm_io *io)
+static inline void dm_io_complete(struct dm_io *io)
{
- bool first_requeue;
-
/*
* Only dm_io that has been split needs two stage requeue, otherwise
* we may run into long bio clone chain during suspend and OOM could
@@ -1036,12 +1034,7 @@ static void dm_io_complete(struct dm_io *io)
* Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
* also aren't handled via the first stage requeue.
*/
- if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
- first_requeue = true;
- else
- first_requeue = false;
-
- __dm_io_complete(io, first_requeue);
+ __dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT));
}
/*
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 7f524a26cebc..334b71404930 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1987,12 +1987,12 @@ static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s,
md_bitmap_set_memory_bits(bitmap, sec, 1);
md_bitmap_file_set_bit(bitmap, sec);
- if (sec < bitmap->mddev->recovery_cp)
+ if (sec < bitmap->mddev->resync_offset)
/* We are asserting that the array is dirty,
- * so move the recovery_cp address back so
+ * so move the resync_offset address back so
* that it is obvious that it is dirty
*/
- bitmap->mddev->recovery_cp = sec;
+ bitmap->mddev->resync_offset = sec;
}
}
@@ -2258,7 +2258,7 @@ static int bitmap_load(struct mddev *mddev)
|| bitmap->events_cleared == mddev->events)
/* no need to keep dirty bits to optimise a
* re-add of a missing device */
- start = mddev->recovery_cp;
+ start = mddev->resync_offset;
mutex_lock(&mddev->bitmap_info.mutex);
err = md_bitmap_init_from_disk(bitmap, start);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 94221d964d4f..5497eaee96e7 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -337,11 +337,11 @@ static void recover_bitmaps(struct md_thread *thread)
md_wakeup_thread(mddev->sync_thread);
if (hi > 0) {
- if (lo < mddev->recovery_cp)
- mddev->recovery_cp = lo;
+ if (lo < mddev->resync_offset)
+ mddev->resync_offset = lo;
/* wake up thread to continue resync in case resync
* is not finished */
- if (mddev->recovery_cp != MaxSector) {
+ if (mddev->resync_offset != MaxSector) {
/*
* clear the REMOTE flag since we will launch
* resync thread in current node.
@@ -863,9 +863,9 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
lockres_free(bm_lockres);
continue;
}
- if ((hi > 0) && (lo < mddev->recovery_cp)) {
+ if ((hi > 0) && (lo < mddev->resync_offset)) {
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- mddev->recovery_cp = lo;
+ mddev->resync_offset = lo;
md_check_recovery(mddev);
}
@@ -1027,7 +1027,7 @@ static int leave(struct mddev *mddev)
* Also, we should send BITMAP_NEEDS_SYNC message in
* case reshaping is interrupted.
*/
- if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
+ if ((cinfo->slot_number > 0 && mddev->resync_offset != MaxSector) ||
(mddev->reshape_position != MaxSector &&
test_bit(MD_CLOSING, &mddev->flags)))
resync_bitmap(mddev);
@@ -1605,8 +1605,8 @@ static int gather_bitmaps(struct md_rdev *rdev)
pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
goto out;
}
- if ((hi > 0) && (lo < mddev->recovery_cp))
- mddev->recovery_cp = lo;
+ if ((hi > 0) && (lo < mddev->resync_offset))
+ mddev->resync_offset = lo;
}
out:
return err;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 046fe85c76fe..1baaf52c603c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -339,6 +339,7 @@ static int start_readonly;
* so all the races disappear.
*/
static bool create_on_open = true;
+static bool legacy_async_del_gendisk = true;
/*
* We have a system wide 'event count' that is incremented
@@ -637,6 +638,12 @@ static void __mddev_put(struct mddev *mddev)
return;
/*
+ * If array is freed by stopping array, MD_DELETED is set by
+ * do_md_stop(), MD_DELETED is still set here in case mddev is freed
+ * directly by closing a mddev that is created by create_on_open.
+ */
+ set_bit(MD_DELETED, &mddev->flags);
+ /*
* Call queue_work inside the spinlock so that flush_workqueue() after
* mddev_find will succeed in waiting for the work to be done.
*/
@@ -871,15 +878,18 @@ void mddev_unlock(struct mddev *mddev)
export_rdev(rdev, mddev);
}
- /* Call del_gendisk after release reconfig_mutex to avoid
- * deadlock (e.g. call del_gendisk under the lock and an
- * access to sysfs files waits the lock)
- * And MD_DELETED is only used for md raid which is set in
- * do_md_stop. dm raid only uses md_stop to stop. So dm raid
- * doesn't need to check MD_DELETED when getting reconfig lock
- */
- if (test_bit(MD_DELETED, &mddev->flags))
- del_gendisk(mddev->gendisk);
+ if (!legacy_async_del_gendisk) {
+ /*
+ * Call del_gendisk after release reconfig_mutex to avoid
+ * deadlock (e.g. call del_gendisk under the lock and an
+ * access to sysfs files waits the lock)
+ * And MD_DELETED is only used for md raid which is set in
+ * do_md_stop. dm raid only uses md_stop to stop. So dm raid
+ * doesn't need to check MD_DELETED when getting reconfig lock
+ */
+ if (test_bit(MD_DELETED, &mddev->flags))
+ del_gendisk(mddev->gendisk);
+ }
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -1409,13 +1419,13 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru
mddev->layout = -1;
if (sb->state & (1<<MD_SB_CLEAN))
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
- mddev->recovery_cp = sb->recovery_cp;
+ mddev->resync_offset = sb->recovery_cp;
} else
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
}
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
@@ -1541,10 +1551,10 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
- sb->recovery_cp = mddev->recovery_cp;
+ sb->recovery_cp = mddev->resync_offset;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
- if (mddev->recovery_cp == MaxSector)
+ if (mddev->resync_offset == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
sb->recovery_cp = 0;
@@ -1895,7 +1905,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->bitmap_info.default_space = (4096-1024) >> 9;
mddev->reshape_backwards = 0;
- mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
+ mddev->resync_offset = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
@@ -2081,7 +2091,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
- sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
+ sb->resync_offset = cpu_to_le64(mddev->resync_offset);
else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
sb->resync_offset = cpu_to_le64(MaxSector);
else
@@ -2761,7 +2771,7 @@ repeat:
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
- && (mddev->in_sync && mddev->recovery_cp == MaxSector)
+ && (mddev->in_sync && mddev->resync_offset == MaxSector)
&& mddev->can_decrease_events
&& mddev->events != 1) {
mddev->events--;
@@ -4297,9 +4307,9 @@ __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(struct mddev *mddev, char *page)
{
- if (mddev->recovery_cp == MaxSector)
+ if (mddev->resync_offset == MaxSector)
return sprintf(page, "none\n");
- return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
+ return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset);
}
static ssize_t
@@ -4325,7 +4335,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
err = -EBUSY;
if (!err) {
- mddev->recovery_cp = n;
+ mddev->resync_offset = n;
if (mddev->pers)
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
@@ -4829,9 +4839,42 @@ out_unlock:
static struct md_sysfs_entry md_metadata =
__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors)
+{
+ return rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < sectors;
+}
+
+static enum sync_action md_get_active_sync_action(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ bool is_recover = false;
+
+ if (mddev->resync_offset < MaxSector)
+ return ACTION_RESYNC;
+
+ if (mddev->reshape_position != MaxSector)
+ return ACTION_RESHAPE;
+
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev_needs_recovery(rdev, MaxSector)) {
+ is_recover = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_recover ? ACTION_RECOVER : ACTION_IDLE;
+}
+
enum sync_action md_sync_action(struct mddev *mddev)
{
unsigned long recovery = mddev->recovery;
+ enum sync_action active_action;
/*
* frozen has the highest priority, means running sync_thread will be
@@ -4855,8 +4898,17 @@ enum sync_action md_sync_action(struct mddev *mddev)
!test_bit(MD_RECOVERY_NEEDED, &recovery))
return ACTION_IDLE;
- if (test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
- mddev->reshape_position != MaxSector)
+ /*
+ * Check if any sync operation (resync/recover/reshape) is
+ * currently active. This ensures that only one sync operation
+ * can run at a time. Returns the type of active operation, or
+ * ACTION_IDLE if none are active.
+ */
+ active_action = md_get_active_sync_action(mddev);
+ if (active_action != ACTION_IDLE)
+ return active_action;
+
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return ACTION_RESHAPE;
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
@@ -5812,6 +5864,13 @@ static void md_kobj_release(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
+ if (legacy_async_del_gendisk) {
+ if (mddev->sysfs_state)
+ sysfs_put(mddev->sysfs_state);
+ if (mddev->sysfs_level)
+ sysfs_put(mddev->sysfs_level);
+ del_gendisk(mddev->gendisk);
+ }
put_disk(mddev->gendisk);
}
@@ -6015,6 +6074,9 @@ static int md_alloc_and_put(dev_t dev, char *name)
{
struct mddev *mddev = md_alloc(dev, name);
+ if (legacy_async_del_gendisk)
+ pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n");
+
if (IS_ERR(mddev))
return PTR_ERR(mddev);
mddev_put(mddev);
@@ -6417,7 +6479,7 @@ static void md_clean(struct mddev *mddev)
mddev->external_size = 0;
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
@@ -6425,10 +6487,22 @@ static void md_clean(struct mddev *mddev)
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
- /* if UNTIL_STOP is set, it's cleared here */
- mddev->hold_active = 0;
- /* Don't clear MD_CLOSING, or mddev can be opened again. */
- mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
+
+ /*
+ * For legacy_async_del_gendisk mode, it can stop the array in the
+ * middle of assembling it, then it still can access the array. So
+ * it needs to clear MD_CLOSING. If not legacy_async_del_gendisk,
+ * it can't open the array again after stopping it. So it doesn't
+ * clear MD_CLOSING.
+ */
+ if (legacy_async_del_gendisk && mddev->hold_active) {
+ clear_bit(MD_CLOSING, &mddev->flags);
+ } else {
+ /* if UNTIL_STOP is set, it's cleared here */
+ mddev->hold_active = 0;
+ /* Don't clear MD_CLOSING, or mddev can be opened again. */
+ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
+ }
mddev->sb_flags = 0;
mddev->ro = MD_RDWR;
mddev->metadata_type[0] = 0;
@@ -6652,7 +6726,8 @@ static int do_md_stop(struct mddev *mddev, int mode)
export_array(mddev);
md_clean(mddev);
- set_bit(MD_DELETED, &mddev->flags);
+ if (!legacy_async_del_gendisk)
+ set_bit(MD_DELETED, &mddev->flags);
}
md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@ -7362,9 +7437,9 @@ int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
* openned
*/
if (info->state & (1<<MD_SB_CLEAN))
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
else
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
mddev->persistent = ! info->not_persistent;
mddev->external = 0;
@@ -8303,7 +8378,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "\tresync=REMOTE");
return 1;
}
- if (mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset < MaxSector) {
seq_printf(seq, "\tresync=PENDING");
return 1;
}
@@ -8946,7 +9021,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
return mddev->resync_min;
case ACTION_RESYNC:
if (!mddev->bitmap)
- return mddev->recovery_cp;
+ return mddev->resync_offset;
return 0;
case ACTION_RESHAPE:
/*
@@ -8962,11 +9037,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
start = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
- if (rdev->raid_disk >= 0 &&
- !test_bit(Journal, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < start)
+ if (rdev_needs_recovery(rdev, start))
start = rdev->recovery_offset;
rcu_read_unlock();
@@ -9184,8 +9255,8 @@ void md_do_sync(struct md_thread *thread)
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
- j > mddev->recovery_cp)
- mddev->recovery_cp = j;
+ j > mddev->resync_offset)
+ mddev->resync_offset = j;
update_time = jiffies;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
@@ -9305,19 +9376,19 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync > MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
- if (mddev->curr_resync >= mddev->recovery_cp) {
+ if (mddev->curr_resync >= mddev->resync_offset) {
pr_debug("md: checkpointing %s of %s.\n",
desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
- mddev->recovery_cp =
+ mddev->resync_offset =
mddev->curr_resync_completed;
else
- mddev->recovery_cp =
+ mddev->resync_offset =
mddev->curr_resync;
}
} else
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
@@ -9325,12 +9396,8 @@ void md_do_sync(struct md_thread *thread)
test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
- if (rdev->raid_disk >= 0 &&
- mddev->delta_disks >= 0 &&
- !test_bit(Journal, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < mddev->curr_resync)
+ if (mddev->delta_disks >= 0 &&
+ rdev_needs_recovery(rdev, mddev->curr_resync))
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
@@ -9421,6 +9488,12 @@ static bool rdev_is_spare(struct md_rdev *rdev)
static bool rdev_addable(struct md_rdev *rdev)
{
+ struct mddev *mddev;
+
+ mddev = READ_ONCE(rdev->mddev);
+ if (!mddev)
+ return false;
+
/* rdev is already used, don't add it again. */
if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 ||
test_bit(Faulty, &rdev->flags))
@@ -9431,7 +9504,7 @@ static bool rdev_addable(struct md_rdev *rdev)
return true;
/* Allow to add if array is read-write. */
- if (md_is_rdwr(rdev->mddev))
+ if (md_is_rdwr(mddev))
return true;
/*
@@ -9533,7 +9606,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
}
/* Check if resync is in progress. */
- if (mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset < MaxSector) {
remove_spares(mddev, NULL);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
@@ -9714,7 +9787,7 @@ void md_check_recovery(struct mddev *mddev)
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
(mddev->safemode == 2
- && !mddev->in_sync && mddev->recovery_cp == MaxSector)
+ && !mddev->in_sync && mddev->resync_offset == MaxSector)
))
return;
@@ -9771,8 +9844,8 @@ void md_check_recovery(struct mddev *mddev)
* remove disk.
*/
rdev_for_each_safe(rdev, tmp, mddev) {
- if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
- rdev->raid_disk < 0)
+ if (rdev->raid_disk < 0 &&
+ test_and_clear_bit(ClusterRemove, &rdev->flags))
md_kick_rdev_from_array(rdev);
}
}
@@ -10078,8 +10151,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
/* Check for change of roles in the active devices */
rdev_for_each_safe(rdev2, tmp, mddev) {
- if (test_bit(Faulty, &rdev2->flags))
+ if (test_bit(Faulty, &rdev2->flags)) {
+ if (test_bit(ClusterRemove, &rdev2->flags))
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
continue;
+ }
/* Check if the roles changed */
role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
@@ -10377,6 +10453,7 @@ module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
+module_param(legacy_async_del_gendisk, bool, 0600);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 67b365621507..51af29a03079 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -523,7 +523,7 @@ struct mddev {
unsigned long normal_io_events; /* IO event timestamp */
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
- sector_t recovery_cp;
+ sector_t resync_offset;
sector_t resync_min; /* user requested sync
* starts here */
sector_t resync_max; /* resync should pause
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index cbe2a9054cb9..f1d8811a542a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -674,7 +674,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->raid_disks--;
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -717,7 +717,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->raid_disks += mddev->delta_disks;
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -760,7 +760,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->delta_disks = 1 - mddev->raid_disks;
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index b8b3a9069701..52881e6032da 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -283,7 +283,7 @@ static inline int raid1_check_read_range(struct md_rdev *rdev,
static inline bool raid1_should_read_first(struct mddev *mddev,
sector_t this_sector, int len)
{
- if ((mddev->recovery_cp < this_sector + len))
+ if ((mddev->resync_offset < this_sector + len))
return true;
if (mddev_is_clustered(mddev) &&
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 64b8176907a9..408c26398321 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -127,10 +127,9 @@ static inline struct r1bio *get_resync_r1bio(struct bio *bio)
return get_resync_pages(bio)->raid_bio;
}
-static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf)
{
- struct pool_info *pi = data;
- int size = offsetof(struct r1bio, bios[pi->raid_disks]);
+ int size = offsetof(struct r1bio, bios[conf->raid_disks * 2]);
/* allocate a r1bio with room for raid_disks entries in the bios array */
return kzalloc(size, gfp_flags);
@@ -145,18 +144,18 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
- struct pool_info *pi = data;
+ struct r1conf *conf = data;
struct r1bio *r1_bio;
struct bio *bio;
int need_pages;
int j;
struct resync_pages *rps;
- r1_bio = r1bio_pool_alloc(gfp_flags, pi);
+ r1_bio = r1bio_pool_alloc(gfp_flags, conf);
if (!r1_bio)
return NULL;
- rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
+ rps = kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages),
gfp_flags);
if (!rps)
goto out_free_r1bio;
@@ -164,7 +163,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
/*
* Allocate bios : 1 for reading, n-1 for writing
*/
- for (j = pi->raid_disks ; j-- ; ) {
+ for (j = conf->raid_disks * 2; j-- ; ) {
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
@@ -177,11 +176,11 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* If this is a user-requested check/repair, allocate
* RESYNC_PAGES for each bio.
*/
- if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
- need_pages = pi->raid_disks;
+ if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery))
+ need_pages = conf->raid_disks * 2;
else
need_pages = 1;
- for (j = 0; j < pi->raid_disks; j++) {
+ for (j = 0; j < conf->raid_disks * 2; j++) {
struct resync_pages *rp = &rps[j];
bio = r1_bio->bios[j];
@@ -207,7 +206,7 @@ out_free_pages:
resync_free_pages(&rps[j]);
out_free_bio:
- while (++j < pi->raid_disks) {
+ while (++j < conf->raid_disks * 2) {
bio_uninit(r1_bio->bios[j]);
kfree(r1_bio->bios[j]);
}
@@ -220,12 +219,12 @@ out_free_r1bio:
static void r1buf_pool_free(void *__r1_bio, void *data)
{
- struct pool_info *pi = data;
+ struct r1conf *conf = data;
int i;
struct r1bio *r1bio = __r1_bio;
struct resync_pages *rp = NULL;
- for (i = pi->raid_disks; i--; ) {
+ for (i = conf->raid_disks * 2; i--; ) {
rp = get_resync_pages(r1bio->bios[i]);
resync_free_pages(rp);
bio_uninit(r1bio->bios[i]);
@@ -255,7 +254,7 @@ static void free_r1bio(struct r1bio *r1_bio)
struct r1conf *conf = r1_bio->mddev->private;
put_all_bios(conf, r1_bio);
- mempool_free(r1_bio, &conf->r1bio_pool);
+ mempool_free(r1_bio, conf->r1bio_pool);
}
static void put_buf(struct r1bio *r1_bio)
@@ -1305,9 +1304,8 @@ alloc_r1bio(struct mddev *mddev, struct bio *bio)
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
- /* Ensure no bio records IO_BLOCKED */
- memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 2]));
init_r1bio(r1_bio, mddev, bio);
return r1_bio;
}
@@ -2747,7 +2745,7 @@ static int init_resync(struct r1conf *conf)
BUG_ON(mempool_initialized(&conf->r1buf_pool));
return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
- r1buf_pool_free, conf->poolinfo);
+ r1buf_pool_free, conf);
}
static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
@@ -2757,7 +2755,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
struct bio *bio;
int i;
- for (i = conf->poolinfo->raid_disks; i--; ) {
+ for (i = conf->raid_disks * 2; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
bio_reset(bio, NULL, 0);
@@ -2822,7 +2820,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
}
if (mddev->bitmap == NULL &&
- mddev->recovery_cp == MaxSector &&
+ mddev->resync_offset == MaxSector &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
conf->fullsync == 0) {
*skipped = 1;
@@ -3085,6 +3083,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
int i;
struct raid1_info *disk;
struct md_rdev *rdev;
+ size_t r1bio_size;
int err = -ENOMEM;
conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
@@ -3121,21 +3120,15 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf->tmppage)
goto abort;
- conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
- if (!conf->poolinfo)
- goto abort;
- conf->poolinfo->raid_disks = mddev->raid_disks * 2;
- err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
- rbio_pool_free, conf->poolinfo);
- if (err)
+ r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
+ conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, r1bio_size);
+ if (!conf->r1bio_pool)
goto abort;
err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
if (err)
goto abort;
- conf->poolinfo->mddev = mddev;
-
err = -EINVAL;
spin_lock_init(&conf->device_lock);
conf->raid_disks = mddev->raid_disks;
@@ -3198,10 +3191,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
abort:
if (conf) {
- mempool_exit(&conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
kfree(conf->nr_pending);
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
@@ -3282,9 +3274,9 @@ static int raid1_run(struct mddev *mddev)
}
if (conf->raid_disks - mddev->degraded == 1)
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
@@ -3311,10 +3303,9 @@ static void raid1_free(struct mddev *mddev, void *priv)
{
struct r1conf *conf = priv;
- mempool_exit(&conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
kfree(conf->nr_pending);
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
@@ -3345,8 +3336,8 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > mddev->dev_sectors) {
- mddev->recovery_cp = mddev->dev_sectors;
+ mddev->resync_offset > mddev->dev_sectors) {
+ mddev->resync_offset = mddev->dev_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
@@ -3367,17 +3358,13 @@ static int raid1_reshape(struct mddev *mddev)
* At the same time, we "pack" the devices so that all the missing
* devices have the higher raid_disk numbers.
*/
- mempool_t newpool, oldpool;
- struct pool_info *newpoolinfo;
+ mempool_t *newpool, *oldpool;
+ size_t new_r1bio_size;
struct raid1_info *newmirrors;
struct r1conf *conf = mddev->private;
int cnt, raid_disks;
unsigned long flags;
int d, d2;
- int ret;
-
- memset(&newpool, 0, sizeof(newpool));
- memset(&oldpool, 0, sizeof(oldpool));
/* Cannot change chunk_size, layout, or level */
if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
@@ -3403,24 +3390,16 @@ static int raid1_reshape(struct mddev *mddev)
return -EBUSY;
}
- newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
- if (!newpoolinfo)
+ new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]);
+ newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size);
+ if (!newpool) {
return -ENOMEM;
- newpoolinfo->mddev = mddev;
- newpoolinfo->raid_disks = raid_disks * 2;
-
- ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
- rbio_pool_free, newpoolinfo);
- if (ret) {
- kfree(newpoolinfo);
- return ret;
}
newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
raid_disks, 2),
GFP_KERNEL);
if (!newmirrors) {
- kfree(newpoolinfo);
- mempool_exit(&newpool);
+ mempool_destroy(newpool);
return -ENOMEM;
}
@@ -3429,7 +3408,6 @@ static int raid1_reshape(struct mddev *mddev)
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
conf->r1bio_pool = newpool;
- init_waitqueue_head(&conf->r1bio_pool.wait);
for (d = d2 = 0; d < conf->raid_disks; d++) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
@@ -3446,8 +3424,6 @@ static int raid1_reshape(struct mddev *mddev)
}
kfree(conf->mirrors);
conf->mirrors = newmirrors;
- kfree(conf->poolinfo);
- conf->poolinfo = newpoolinfo;
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded += (raid_disks - conf->raid_disks);
@@ -3461,7 +3437,7 @@ static int raid1_reshape(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- mempool_exit(&oldpool);
+ mempool_destroy(oldpool);
return 0;
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 33f318fcc268..d236ef179cfb 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -49,22 +49,6 @@ struct raid1_info {
sector_t seq_start;
};
-/*
- * memory pools need a pointer to the mddev, so they can force an unplug
- * when memory is tight, and a count of the number of drives that the
- * pool was allocated for, so they know how much to allocate and free.
- * mddev->raid_disks cannot be used, as it can change while a pool is active
- * These two datums are stored in a kmalloced struct.
- * The 'raid_disks' here is twice the raid_disks in r1conf.
- * This allows space for each 'real' device can have a replacement in the
- * second half of the array.
- */
-
-struct pool_info {
- struct mddev *mddev;
- int raid_disks;
-};
-
struct r1conf {
struct mddev *mddev;
struct raid1_info *mirrors; /* twice 'raid_disks' to
@@ -114,11 +98,7 @@ struct r1conf {
*/
int recovery_disabled;
- /* poolinfo contains information about the content of the
- * mempools - it changes when the array grows or shrinks
- */
- struct pool_info *poolinfo;
- mempool_t r1bio_pool;
+ mempool_t *r1bio_pool;
mempool_t r1buf_pool;
struct bio_set bio_split;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 95dc354a86a0..b60c30bfb6c7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2117,7 +2117,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int last = conf->geo.raid_disks - 1;
struct raid10_info *p;
- if (mddev->recovery_cp < MaxSector)
+ if (mddev->resync_offset < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
* very different from resync
*/
@@ -3185,7 +3185,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* of a clean array, like RAID1 does.
*/
if (mddev->bitmap == NULL &&
- mddev->recovery_cp == MaxSector &&
+ mddev->resync_offset == MaxSector &&
mddev->reshape_position == MaxSector &&
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
@@ -4145,7 +4145,7 @@ static int raid10_run(struct mddev *mddev)
disk->recovery_disabled = mddev->recovery_disabled - 1;
}
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
pr_info("md/raid10:%s: active with %d out of %d devices\n",
@@ -4245,8 +4245,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
md_set_array_sectors(mddev, size);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > oldsize) {
- mddev->recovery_cp = oldsize;
+ mddev->resync_offset > oldsize) {
+ mddev->resync_offset = oldsize;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
calc_sectors(conf, sectors);
@@ -4275,7 +4275,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
mddev->delta_disks = mddev->raid_disks;
mddev->raid_disks *= 2;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev->dev_sectors = size;
conf = setup_conf(mddev);
@@ -5087,8 +5087,8 @@ static void raid10_finish_reshape(struct mddev *mddev)
return;
if (mddev->delta_disks > 0) {
- if (mddev->recovery_cp > mddev->resync_max_sectors) {
- mddev->recovery_cp = mddev->resync_max_sectors;
+ if (mddev->resync_offset > mddev->resync_max_sectors) {
+ mddev->resync_offset = mddev->resync_max_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->resync_max_sectors = mddev->array_sectors;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index c0fb335311aa..56b234683ee6 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1163,7 +1163,7 @@ static int ppl_load_distributed(struct ppl_log *log)
le64_to_cpu(pplhdr->generation));
/* attempt to recover from log if we are starting a dirty array */
- if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
+ if (pplhdr && !mddev->pers && mddev->resync_offset != MaxSector)
ret = ppl_recover(log, pplhdr, pplhdr_offset);
/* write empty header if we are starting the array */
@@ -1422,14 +1422,14 @@ int ppl_init_log(struct r5conf *conf)
if (ret) {
goto err;
- } else if (!mddev->pers && mddev->recovery_cp == 0 &&
+ } else if (!mddev->pers && mddev->resync_offset == 0 &&
ppl_conf->recovered_entries > 0 &&
ppl_conf->mismatch_count == 0) {
/*
* If we are starting a dirty array and the recovery succeeds
* without any issues, set the array as clean.
*/
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
} else if (mddev->pers && ppl_conf->mismatch_count > 0) {
/* no mismatch allowed when enabling PPL for a running array */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ec61ee7b218..023649fe2476 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3740,7 +3740,7 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& (rdev->recovery_offset <= sh->sector
- || rdev->mddev->recovery_cp <= sh->sector))
+ || rdev->mddev->resync_offset <= sh->sector))
rv = 1;
return rv;
}
@@ -3832,7 +3832,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
* is missing/faulty, then we need to read everything we can.
*/
if (!force_rcw &&
- sh->sector < sh->raid_conf->mddev->recovery_cp)
+ sh->sector < sh->raid_conf->mddev->resync_offset)
/* reconstruct-write isn't being forced */
return 0;
for (i = 0; i < s->failed && i < 2; i++) {
@@ -4097,7 +4097,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
int disks)
{
int rmw = 0, rcw = 0, i;
- sector_t recovery_cp = conf->mddev->recovery_cp;
+ sector_t resync_offset = conf->mddev->resync_offset;
/* Check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
@@ -4107,14 +4107,14 @@ static int handle_stripe_dirtying(struct r5conf *conf,
* generate correct data from the parity.
*/
if (conf->rmw_level == PARITY_DISABLE_RMW ||
- (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
+ (resync_offset < MaxSector && sh->sector >= resync_offset &&
s->failed == 0)) {
/* Calculate the real rcw later - for now make it
* look like rcw is cheaper
*/
rcw = 1; rmw = 2;
- pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
- conf->rmw_level, (unsigned long long)recovery_cp,
+ pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n",
+ conf->rmw_level, (unsigned long long)resync_offset,
(unsigned long long)sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
@@ -4770,14 +4770,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
* we must be recovering.
- * else if we are after recovery_cp, we must be syncing
+ * else if we are after resync_offset, we must be syncing
* else if MD_RECOVERY_REQUESTED is set, we also are syncing.
* else we can only be replacing
* sync and recovery both need to read all devices, and so
* use the same flag.
*/
if (do_recovery ||
- sh->sector >= conf->mddev->recovery_cp ||
+ sh->sector >= conf->mddev->resync_offset ||
test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
s->syncing = 1;
else
@@ -7780,7 +7780,7 @@ static int raid5_run(struct mddev *mddev)
int first = 1;
int ret = -EIO;
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
@@ -7921,7 +7921,7 @@ static int raid5_run(struct mddev *mddev)
mdname(mddev));
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
- } else if (mddev->recovery_cp == MaxSector)
+ } else if (mddev->resync_offset == MaxSector)
set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
}
@@ -7988,7 +7988,7 @@ static int raid5_run(struct mddev *mddev)
mddev->resync_max_sectors = mddev->dev_sectors;
if (mddev->degraded > dirty_parity_disks &&
- mddev->recovery_cp != MaxSector) {
+ mddev->resync_offset != MaxSector) {
if (test_bit(MD_HAS_PPL, &mddev->flags))
pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n",
mdname(mddev));
@@ -8328,8 +8328,8 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > mddev->dev_sectors) {
- mddev->recovery_cp = mddev->dev_sectors;
+ mddev->resync_offset > mddev->dev_sectors) {
+ mddev->resync_offset = mddev->dev_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
@@ -8423,7 +8423,7 @@ static int raid5_start_reshape(struct mddev *mddev)
return -EINVAL;
/* raid5 can't handle concurrent reshape and recovery */
- if (mddev->recovery_cp < MaxSector)
+ if (mddev->resync_offset < MaxSector)
return -EBUSY;
for (i = 0; i < conf->raid_disks; i++)
if (conf->disks[i].replacement)
@@ -8648,7 +8648,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
mddev->raid_disks += 1;
mddev->delta_disks = 1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
return setup_conf(mddev);
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index a31a8a6a4946..5aa3d45a691a 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -651,7 +651,7 @@ static int cxd2820r_probe(struct i2c_client *client)
priv->gpio_chip.parent = &client->dev;
priv->gpio_chip.owner = THIS_MODULE;
priv->gpio_chip.direction_output = cxd2820r_gpio_direction_output;
- priv->gpio_chip.set_rv = cxd2820r_gpio_set;
+ priv->gpio_chip.set = cxd2820r_gpio_set;
priv->gpio_chip.get = cxd2820r_gpio_get;
priv->gpio_chip.base = -1; /* Dynamic allocation */
priv->gpio_chip.ngpio = GPIO_COUNT;
diff --git a/drivers/media/i2c/alvium-csi2.c b/drivers/media/i2c/alvium-csi2.c
index 05b708bd0a64..1f088acecf36 100644
--- a/drivers/media/i2c/alvium-csi2.c
+++ b/drivers/media/i2c/alvium-csi2.c
@@ -1841,7 +1841,6 @@ static int alvium_s_stream(struct v4l2_subdev *sd, int enable)
} else {
alvium_set_stream_mipi(alvium, enable);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 487bcabb4a19..1c889c878abd 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -787,10 +787,8 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl)
rval = -EINVAL;
}
- if (pm_status > 0) {
- pm_runtime_mark_last_busy(&client->dev);
+ if (pm_status > 0)
pm_runtime_put_autosuspend(&client->dev);
- }
return rval;
}
@@ -1914,7 +1912,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
if (!enable) {
ccs_stop_streaming(sensor);
sensor->streaming = false;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
@@ -1929,7 +1926,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
rval = ccs_start_streaming(sensor);
if (rval < 0) {
sensor->streaming = false;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
@@ -2677,7 +2673,6 @@ nvm_show(struct device *dev, struct device_attribute *attr, char *buf)
return -ENODEV;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
/*
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index bc74499b0a96..a80da2b4a8fa 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -235,7 +235,7 @@ static int ub913_gpiochip_probe(struct ub913_data *priv)
gc->ngpio = UB913_NUM_GPIOS;
gc->get_direction = ub913_gpio_get_direction;
gc->direction_output = ub913_gpio_direction_out;
- gc->set_rv = ub913_gpio_set;
+ gc->set = ub913_gpio_set;
gc->of_xlate = ub913_gpio_of_xlate;
gc->of_gpio_n_cells = 2;
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index a865bfc89500..e3fc9d66970a 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -361,7 +361,7 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
gc->direction_input = ub953_gpio_direction_in;
gc->direction_output = ub953_gpio_direction_out;
gc->get = ub953_gpio_get;
- gc->set_rv = ub953_gpio_set;
+ gc->set = ub953_gpio_set;
gc->of_xlate = ub953_gpio_of_xlate;
gc->of_gpio_n_cells = 2;
diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
index 3a4d100b9199..d434721ba8ed 100644
--- a/drivers/media/i2c/dw9768.c
+++ b/drivers/media/i2c/dw9768.c
@@ -374,7 +374,6 @@ static int dw9768_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static int dw9768_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_autosuspend(sd->dev);
return 0;
diff --git a/drivers/media/i2c/gc0308.c b/drivers/media/i2c/gc0308.c
index 069f42785b3c..cbcda0e18ff1 100644
--- a/drivers/media/i2c/gc0308.c
+++ b/drivers/media/i2c/gc0308.c
@@ -974,7 +974,6 @@ static int gc0308_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret)
dev_err(gc0308->dev, "failed to set control: %d\n", ret);
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return ret;
@@ -1157,14 +1156,12 @@ static int gc0308_start_stream(struct gc0308 *gc0308)
return 0;
disable_pm:
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return ret;
}
static int gc0308_stop_stream(struct gc0308 *gc0308)
{
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return 0;
}
diff --git a/drivers/media/i2c/gc2145.c b/drivers/media/i2c/gc2145.c
index ba02161d46e7..559a851669aa 100644
--- a/drivers/media/i2c/gc2145.c
+++ b/drivers/media/i2c/gc2145.c
@@ -963,7 +963,6 @@ static int gc2145_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
@@ -985,7 +984,6 @@ static int gc2145_disable_streams(struct v4l2_subdev *sd,
if (ret)
dev_err(&client->dev, "%s failed to write regs\n", __func__);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -1193,7 +1191,6 @@ static int gc2145_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 3b4f68543342..3faf48f34af4 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -771,7 +771,6 @@ static int imx219_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
@@ -793,7 +792,6 @@ static int imx219_disable_streams(struct v4l2_subdev *sd,
__v4l2_ctrl_grab(imx219->vflip, false);
__v4l2_ctrl_grab(imx219->hflip, false);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index da618c8cbadc..67e8bb432d10 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1143,7 +1143,6 @@ static int imx283_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return ret;
@@ -1163,7 +1162,6 @@ static int imx283_disable_streams(struct v4l2_subdev *sd,
if (ret)
dev_err(imx283->dev, "Failed to stop stream\n");
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return ret;
@@ -1558,7 +1556,6 @@ static int imx283_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return 0;
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 4f3f386c5353..ec172556612e 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -869,7 +869,6 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
return ret;
@@ -1099,7 +1098,6 @@ static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
}
} else {
imx290_stop_streaming(imx290);
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
}
@@ -1294,7 +1292,6 @@ static int imx290_subdev_init(struct imx290 *imx290)
* will already be prevented even before the delay.
*/
v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
imx290->sd.internal_ops = &imx290_internal_ops;
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index f3bec16b527c..61116f4e3f76 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -604,7 +604,6 @@ static int imx296_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
ret = imx296_stream_off(sensor);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
goto unlock;
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 278e743646ea..276bf4d6f39d 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -952,7 +952,6 @@ static int imx415_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
ret = imx415_stream_off(sensor);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
goto unlock;
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 1d0b5f56f989..7c0961688d61 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1220,7 +1220,7 @@ static int max9286_register_gpio(struct max9286_priv *priv)
gpio->owner = THIS_MODULE;
gpio->ngpio = 2;
gpio->base = -1;
- gpio->set_rv = max9286_gpiochip_set;
+ gpio->set = max9286_gpiochip_set;
gpio->get = max9286_gpiochip_get;
gpio->can_sleep = true;
diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
index 015e42fbe246..c8ae7890d9fa 100644
--- a/drivers/media/i2c/max96717.c
+++ b/drivers/media/i2c/max96717.c
@@ -355,7 +355,7 @@ static int max96717_gpiochip_probe(struct max96717_priv *priv)
gc->get_direction = max96717_gpio_get_direction;
gc->direction_input = max96717_gpio_direction_in;
gc->direction_output = max96717_gpio_direction_out;
- gc->set_rv = max96717_gpiochip_set;
+ gc->set = max96717_gpiochip_set;
gc->get = max96717_gpiochip_get;
/* Disable GPIO forwarding */
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
index 3f540ca40f3c..aa3fd6c6c76c 100644
--- a/drivers/media/i2c/mt9m114.c
+++ b/drivers/media/i2c/mt9m114.c
@@ -974,7 +974,6 @@ static int mt9m114_start_streaming(struct mt9m114 *sensor,
return 0;
error:
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -988,7 +987,6 @@ static int mt9m114_stop_streaming(struct mt9m114 *sensor)
ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_SUSPEND);
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1046,7 +1044,6 @@ static int mt9m114_pa_g_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1113,7 +1110,6 @@ static int mt9m114_pa_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1565,7 +1561,6 @@ static int mt9m114_ifp_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -2472,7 +2467,6 @@ static int mt9m114_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index 1c3a449f9354..7d740ad3926f 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -497,7 +497,6 @@ static int ov4689_s_stream(struct v4l2_subdev *sd, int on)
} else {
cci_write(ov4689->regmap, OV4689_REG_CTRL_MODE,
OV4689_MODE_SW_STANDBY, NULL);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -702,7 +701,6 @@ static int ov4689_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -999,7 +997,6 @@ static int ov4689_probe(struct i2c_client *client)
goto err_clean_subdev_pm;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 0dae0438aa80..84198613381d 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -3341,7 +3341,6 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
return 0;
@@ -3417,7 +3416,6 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
return ret;
@@ -3754,7 +3752,6 @@ out:
mutex_unlock(&sensor->lock);
if (!enable || ret) {
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
}
@@ -3965,7 +3962,6 @@ static int ov5640_probe(struct i2c_client *client)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 004d0ee5c3f5..58c846a44376 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -808,7 +808,6 @@ static int ov5645_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(ov5645->dev);
pm_runtime_put_autosuspend(ov5645->dev);
return ret;
@@ -979,7 +978,6 @@ static int ov5645_disable_streams(struct v4l2_subdev *sd,
OV5645_SYSTEM_CTRL0_STOP);
rpm_put:
- pm_runtime_mark_last_busy(ov5645->dev);
pm_runtime_put_autosuspend(ov5645->dev);
return ret;
@@ -1196,7 +1194,6 @@ static int ov5645_probe(struct i2c_client *client)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov64a40.c b/drivers/media/i2c/ov64a40.c
index a5da4fe47e0b..2031cbd05c26 100644
--- a/drivers/media/i2c/ov64a40.c
+++ b/drivers/media/i2c/ov64a40.c
@@ -2990,7 +2990,6 @@ static int ov64a40_start_streaming(struct ov64a40 *ov64a40,
return 0;
error_power_off:
- pm_runtime_mark_last_busy(ov64a40->dev);
pm_runtime_put_autosuspend(ov64a40->dev);
return ret;
@@ -3000,7 +2999,6 @@ static int ov64a40_stop_streaming(struct ov64a40 *ov64a40,
struct v4l2_subdev_state *state)
{
cci_update_bits(ov64a40->cci, OV64A40_REG_SMIA, BIT(0), 0, NULL);
- pm_runtime_mark_last_busy(ov64a40->dev);
pm_runtime_put_autosuspend(ov64a40->dev);
__v4l2_ctrl_grab(ov64a40->link_freq, false);
@@ -3329,10 +3327,8 @@ static int ov64a40_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- if (pm_status > 0) {
- pm_runtime_mark_last_busy(ov64a40->dev);
+ if (pm_status > 0)
pm_runtime_put_autosuspend(ov64a40->dev);
- }
return ret;
}
@@ -3622,7 +3618,6 @@ static int ov64a40_probe(struct i2c_client *client)
goto error_subdev_cleanup;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
index 95f9ae794846..6b7193eaea1f 100644
--- a/drivers/media/i2c/ov8858.c
+++ b/drivers/media/i2c/ov8858.c
@@ -1391,7 +1391,6 @@ static int ov8858_s_stream(struct v4l2_subdev *sd, int on)
}
} else {
ov8858_stop_stream(ov8858);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
@@ -1945,7 +1944,6 @@ static int ov8858_probe(struct i2c_client *client)
goto err_power_off;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f4568e87f018..41ae25b0911f 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -465,7 +465,6 @@ static int mipid02_disable_streams(struct v4l2_subdev *sd,
if (ret)
goto error;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
error:
@@ -542,7 +541,6 @@ error:
cci_write(bridge->regmap, MIPID02_DATA_LANE0_REG1, 0, &ret);
cci_write(bridge->regmap, MIPID02_DATA_LANE1_REG1, 0, &ret);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 143aa1359aba..bcfc274cf891 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -816,7 +816,6 @@ static int tc358746_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
err_out:
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return err;
@@ -838,7 +837,6 @@ err_out:
if (err)
return err;
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return v4l2_subdev_call(src, video, s_stream, 0);
@@ -1016,7 +1014,6 @@ tc358746_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
err = tc358746_read(tc358746, reg->reg, &val);
reg->val = val;
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return err;
@@ -1032,7 +1029,6 @@ tc358746_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
tc358746_write(tc358746, (u32)reg->reg, (u32)reg->val);
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return 0;
@@ -1395,7 +1391,6 @@ static int tc358746_init_hw(struct tc358746 *tc358746)
}
err = tc358746_read(tc358746, CHIPID_REG, &val);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
if (err)
return -ENODEV;
diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c
index 8852c56431fe..775cfba188d8 100644
--- a/drivers/media/i2c/thp7312.c
+++ b/drivers/media/i2c/thp7312.c
@@ -808,7 +808,6 @@ static int thp7312_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
thp7312_stream_enable(thp7312, false);
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
v4l2_subdev_unlock_state(sd_state);
@@ -839,7 +838,6 @@ static int thp7312_s_stream(struct v4l2_subdev *sd, int enable)
goto finish_unlock;
finish_pm:
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
finish_unlock:
v4l2_subdev_unlock_state(sd_state);
@@ -1147,7 +1145,6 @@ static int thp7312_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
return ret;
@@ -2183,7 +2180,6 @@ static int thp7312_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
dev_info(dev, "THP7312 firmware version %02u.%02u\n",
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
index c0754fd03b1d..7c39183dd44b 100644
--- a/drivers/media/i2c/vd55g1.c
+++ b/drivers/media/i2c/vd55g1.c
@@ -1104,7 +1104,6 @@ static int vd55g1_disable_streams(struct v4l2_subdev *sd,
vd55g1_grab_ctrls(sensor, false);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1338,7 +1337,6 @@ static int vd55g1_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1433,7 +1431,6 @@ static int vd55g1_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1895,7 +1892,6 @@ static int vd55g1_probe(struct i2c_client *client)
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 4000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
ret = vd55g1_subdev_init(sensor);
diff --git a/drivers/media/i2c/vd56g3.c b/drivers/media/i2c/vd56g3.c
index 5d951ad0b478..d66e21ba4498 100644
--- a/drivers/media/i2c/vd56g3.c
+++ b/drivers/media/i2c/vd56g3.c
@@ -493,7 +493,6 @@ static int vd56g3_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -577,7 +576,6 @@ static int vd56g3_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1021,7 +1019,6 @@ static int vd56g3_disable_streams(struct v4l2_subdev *sd,
__v4l2_ctrl_grab(sensor->vflip_ctrl, false);
__v4l2_ctrl_grab(sensor->patgen_ctrl, false);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1527,7 +1524,6 @@ static int vd56g3_probe(struct i2c_client *client)
}
/* Sensor could now be powered off (after the autosuspend delay) */
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
dev_dbg(dev, "Successfully probe %s sensor\n",
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 0dd991d70d53..1eee2d4f5b40 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -288,7 +288,6 @@ static int amg88xx_read(struct device *dev, enum hwmon_sensor_types type,
return tmp;
tmp = regmap_bulk_read(data->regmap, AMG88XX_REG_TTHL, &buf, 2);
- pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
pm_runtime_put_autosuspend(regmap_get_device(data->regmap));
if (tmp)
return tmp;
@@ -527,7 +526,6 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
error_rpm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
error_del_list:
video_i2c_del_list(vq, VB2_BUF_STATE_QUEUED);
@@ -544,7 +542,6 @@ static void stop_streaming(struct vb2_queue *vq)
kthread_stop(data->kthread_vid_cap);
data->kthread_vid_cap = NULL;
- pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
pm_runtime_put_autosuspend(regmap_get_device(data->regmap));
video_i2c_del_list(vq, VB2_BUF_STATE_ERROR);
@@ -853,7 +850,6 @@ static int video_i2c_probe(struct i2c_client *client)
if (ret < 0)
goto error_pm_disable;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
diff --git a/drivers/media/pci/solo6x10/solo6x10-gpio.c b/drivers/media/pci/solo6x10/solo6x10-gpio.c
index b16a8453a62a..71848741c55c 100644
--- a/drivers/media/pci/solo6x10/solo6x10-gpio.c
+++ b/drivers/media/pci/solo6x10/solo6x10-gpio.c
@@ -158,7 +158,7 @@ int solo_gpio_init(struct solo_dev *solo_dev)
solo_dev->gpio_dev.get_direction = solo_gpiochip_get_direction;
solo_dev->gpio_dev.get = solo_gpiochip_get;
- solo_dev->gpio_dev.set_rv = solo_gpiochip_set;
+ solo_dev->gpio_dev.set = solo_gpiochip_set;
ret = gpiochip_add_data(&solo_dev->gpio_dev, solo_dev);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
index fd71f0c43ac3..a9ce032cc5a2 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -451,7 +451,6 @@ static void wave5_vpu_dec_finish_decode(struct vpu_instance *inst)
if (q_status.report_queue_count == 0 &&
(q_status.instance_queue_count == 0 || dec_info.sequence_changed)) {
dev_dbg(inst->dev->dev, "%s: finishing job.\n", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
@@ -1364,7 +1363,6 @@ static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count
}
}
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return ret;
@@ -1498,7 +1496,6 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
else
streamoff_capture(q);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
}
@@ -1662,7 +1659,6 @@ static void wave5_vpu_dec_device_run(void *priv)
finish_job_and_return:
dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
index 1e5fc5f8b856..35913a7de834 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
@@ -1391,12 +1391,10 @@ static int wave5_vpu_enc_start_streaming(struct vb2_queue *q, unsigned int count
if (ret)
goto return_buffers;
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return 0;
return_buffers:
wave5_return_bufs(q, VB2_BUF_STATE_QUEUED);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return ret;
}
@@ -1465,7 +1463,6 @@ static void wave5_vpu_enc_stop_streaming(struct vb2_queue *q)
else
streamoff_capture(inst, q);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
}
@@ -1520,7 +1517,6 @@ static void wave5_vpu_enc_device_run(void *priv)
break;
}
dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return;
default:
@@ -1529,7 +1525,6 @@ static void wave5_vpu_enc_device_run(void *priv)
break;
}
dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c
index 0e56a4331b0d..45f8f6904867 100644
--- a/drivers/media/platform/nvidia/tegra-vde/h264.c
+++ b/drivers/media/platform/nvidia/tegra-vde/h264.c
@@ -585,7 +585,6 @@ static int tegra_vde_decode_begin(struct tegra_vde *vde,
return 0;
put_runtime_pm:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
unlock:
@@ -612,7 +611,6 @@ static void tegra_vde_decode_abort(struct tegra_vde *vde)
if (err)
dev_err(dev, "DEC end: Failed to assert HW reset: %d\n", err);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
mutex_unlock(&vde->lock);
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.c b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
index 221dcd09e1e1..b3ed06297953 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_queue.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
@@ -142,7 +142,6 @@ int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size)
}
mutex_unlock(&core->lock);
- pm_runtime_mark_last_busy(core->dev);
pm_runtime_put_autosuspend(core->dev);
return 0;
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 8dd5a9b0d060..e32f8862a9f9 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -48,7 +48,8 @@ static int core_clks_enable(struct venus_core *core)
int ret;
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
- dev_pm_opp_put(opp);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
for (i = 0; i < res->clks_num; i++) {
if (IS_V6(core)) {
@@ -660,7 +661,8 @@ static int decide_core(struct venus_inst *inst)
/*TODO : divide this inst->load by work_route */
opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
- dev_pm_opp_put(opp);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
min_loaded_core(inst, &min_coreid, &min_load, false);
min_loaded_core(inst, &min_lp_coreid, &min_lp_load, true);
@@ -1121,7 +1123,8 @@ static int load_scale_v4(struct venus_inst *inst)
freq = max(freq_core1, freq_core2);
opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
- dev_pm_opp_put(opp);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
if (freq > max_freq) {
dev_dbg(dev, VDBGL "requested clock rate: %lu scaling clock rate : %lu\n",
@@ -1131,7 +1134,8 @@ static int load_scale_v4(struct venus_inst *inst)
}
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
- dev_pm_opp_put(opp);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
set_freq:
diff --git a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
index b30891718d8d..d60d92d2ffa1 100644
--- a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
+++ b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
@@ -950,7 +950,6 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
kfree(job);
}
- pm_runtime_mark_last_busy(pispbe->dev);
pm_runtime_put_autosuspend(pispbe->dev);
dev_dbg(pispbe->dev, "Nodes streaming now 0x%x\n",
@@ -1742,7 +1741,6 @@ static int pispbe_probe(struct platform_device *pdev)
if (ret)
goto disable_devs_err;
- pm_runtime_mark_last_busy(pispbe->dev);
pm_runtime_put_autosuspend(pispbe->dev);
return 0;
diff --git a/drivers/media/platform/rockchip/rkvdec/rkvdec.c b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
index d707088ec0dc..d3b31f461194 100644
--- a/drivers/media/platform/rockchip/rkvdec/rkvdec.c
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
@@ -765,7 +765,6 @@ static void rkvdec_job_finish(struct rkvdec_ctx *ctx,
{
struct rkvdec_dev *rkvdec = ctx->dev;
- pm_runtime_mark_last_busy(rkvdec->dev);
pm_runtime_put_autosuspend(rkvdec->dev);
rkvdec_job_finish_no_pm(ctx, result);
}
@@ -1159,13 +1158,6 @@ static int rkvdec_probe(struct platform_device *pdev)
return ret;
}
- if (iommu_get_domain_for_dev(&pdev->dev)) {
- rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev);
-
- if (!rkvdec->empty_domain)
- dev_warn(rkvdec->dev, "cannot alloc new empty domain\n");
- }
-
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
irq = platform_get_irq(pdev, 0);
@@ -1188,6 +1180,15 @@ static int rkvdec_probe(struct platform_device *pdev)
if (ret)
goto err_disable_runtime_pm;
+ if (iommu_get_domain_for_dev(&pdev->dev)) {
+ rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev);
+
+ if (IS_ERR(rkvdec->empty_domain)) {
+ rkvdec->empty_domain = NULL;
+ dev_warn(rkvdec->dev, "cannot alloc new empty domain\n");
+ }
+ }
+
return 0;
err_disable_runtime_pm:
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 8542238e0fb1..fa972effd4a2 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -89,7 +89,6 @@ static void hantro_job_finish(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
enum vb2_buffer_state result)
{
- pm_runtime_mark_last_busy(vpu->dev);
pm_runtime_put_autosuspend(vpu->dev);
clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index bf6d8fa983bf..a6418ef782bc 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -48,10 +48,8 @@ static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
if (val >= 0)
ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);
- if (pmdev) {
- pm_runtime_mark_last_busy(pmdev);
+ if (pmdev)
pm_runtime_put_autosuspend(pmdev);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 7f3f47db4c98..e4275f8ee5db 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -555,7 +555,6 @@ EXPORT_SYMBOL(memstick_add_host);
*/
void memstick_remove_host(struct memstick_host *host)
{
- host->removing = 1;
flush_workqueue(workqueue);
mutex_lock(&host->lock);
if (host->card)
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 3878136227e4..5b5e9354fb2e 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -812,6 +812,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
int err;
host->eject = true;
+ msh->removing = true;
cancel_work_sync(&host->handle_req);
cancel_delayed_work_sync(&host->poll_card);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index a5f9241fa3f2..50bf3260f65d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -965,7 +965,7 @@ static const struct gpio_chip gpio_chip_template = {
.ngpio = 32,
.direction_input = sm501_gpio_input,
.direction_output = sm501_gpio_output,
- .set_rv = sm501_gpio_set,
+ .set = sm501_gpio_set,
.get = sm501_gpio_get,
};
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 03bd5cd66798..8a144ec52201 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -620,7 +620,7 @@ static int tps65010_probe(struct i2c_client *client)
tps->chip.parent = &client->dev;
tps->chip.owner = THIS_MODULE;
- tps->chip.set_rv = tps65010_gpio_set;
+ tps->chip.set = tps65010_gpio_set;
tps->chip.direction_output = tps65010_output;
/* NOTE: only partial support for inputs; nyet IRQs */
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index fd71ba29f6b5..4b450d78a65f 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -570,7 +570,7 @@ static int ucb1x00_probe(struct mcp *mcp)
ucb->gpio.owner = THIS_MODULE;
ucb->gpio.base = pdata->gpio_base;
ucb->gpio.ngpio = 10;
- ucb->gpio.set_rv = ucb1x00_gpio_set;
+ ucb->gpio.set = ucb1x00_gpio_set;
ucb->gpio.get = ucb1x00_gpio_get;
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index ff8f4404d10f..8eddbaa1fccd 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -438,7 +438,7 @@ static int pci1xxxx_gpio_setup(struct pci1xxxx_gpio *priv, int irq)
gchip->direction_output = pci1xxxx_gpio_direction_output;
gchip->get_direction = pci1xxxx_gpio_get_direction;
gchip->get = pci1xxxx_gpio_get;
- gchip->set_rv = pci1xxxx_gpio_set;
+ gchip->set = pci1xxxx_gpio_set;
gchip->set_config = pci1xxxx_gpio_set_config;
gchip->dbg_show = NULL;
gchip->base = -1;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index c4e5e2c977be..1c156a3f845e 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -37,6 +37,8 @@
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
+#define COMMAND_ENABLE_DOORBELL BIT(6)
+#define COMMAND_DISABLE_DOORBELL BIT(7)
#define PCI_ENDPOINT_TEST_STATUS 0x8
#define STATUS_READ_SUCCESS BIT(0)
@@ -48,6 +50,11 @@
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
+#define STATUS_DOORBELL_SUCCESS BIT(9)
+#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
+#define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
+#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
+#define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
@@ -62,6 +69,7 @@
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
+
#define FLAG_USE_DMA BIT(0)
#define PCI_ENDPOINT_TEST_CAPS 0x30
@@ -70,6 +78,10 @@
#define CAP_MSIX BIT(2)
#define CAP_INTX BIT(3)
+#define PCI_ENDPOINT_TEST_DB_BAR 0x34
+#define PCI_ENDPOINT_TEST_DB_OFFSET 0x38
+#define PCI_ENDPOINT_TEST_DB_DATA 0x3c
+
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
@@ -100,6 +112,7 @@ enum pci_barno {
BAR_3,
BAR_4,
BAR_5,
+ NO_BAR = -1,
};
struct pci_endpoint_test {
@@ -841,6 +854,73 @@ static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
return 0;
}
+static int pci_endpoint_test_doorbell(struct pci_endpoint_test *test)
+{
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ int irq_type = test->irq_type;
+ enum pci_barno bar;
+ u32 data, status;
+ u32 addr;
+ int left;
+
+ if (irq_type < PCITEST_IRQ_TYPE_INTX ||
+ irq_type > PCITEST_IRQ_TYPE_MSIX) {
+ dev_err(dev, "Invalid IRQ type\n");
+ return -EINVAL;
+ }
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ COMMAND_ENABLE_DOORBELL);
+
+ left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
+
+ status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+ if (!left || (status & STATUS_DOORBELL_ENABLE_FAIL)) {
+ dev_err(dev, "Failed to enable doorbell\n");
+ return -EINVAL;
+ }
+
+ data = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_DATA);
+ addr = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_OFFSET);
+ bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0);
+
+ bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
+
+ writel(data, test->bar[bar] + addr);
+
+ left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
+
+ status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+
+ if (!left || !(status & STATUS_DOORBELL_SUCCESS))
+ dev_err(dev, "Failed to trigger doorbell in endpoint\n");
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ COMMAND_DISABLE_DOORBELL);
+
+ wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
+
+ status |= pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+
+ if (status & STATUS_DOORBELL_DISABLE_FAIL) {
+ dev_err(dev, "Failed to disable doorbell\n");
+ return -EINVAL;
+ }
+
+ if (!(status & STATUS_DOORBELL_SUCCESS))
+ return -EINVAL;
+
+ return 0;
+}
+
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -891,6 +971,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
case PCITEST_CLEAR_IRQ:
ret = pci_endpoint_test_clear_irq(test);
break;
+ case PCITEST_DOORBELL:
+ ret = pci_endpoint_test_doorbell(test);
+ break;
}
ret:
diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c
index 0b1a6350c02b..7964e46c7448 100644
--- a/drivers/misc/ti_fpc202.c
+++ b/drivers/misc/ti_fpc202.c
@@ -333,7 +333,7 @@ static int fpc202_probe(struct i2c_client *client)
priv->gpio.base = -1;
priv->gpio.direction_input = fpc202_gpio_direction_input;
priv->gpio.direction_output = fpc202_gpio_direction_output;
- priv->gpio.set_rv = fpc202_gpio_set;
+ priv->gpio.set = fpc202_gpio_set;
priv->gpio.get = fpc202_gpio_get;
priv->gpio.ngpio = FPC202_GPIO_COUNT;
priv->gpio.parent = dev;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 42878474e56e..60dbc815e501 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -99,6 +99,9 @@
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
+#define CD_STABLE_TIMEOUT_US 1000000
+#define CD_STABLE_MAX_SLEEP_US 10
+
/**
* struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map
*
@@ -206,12 +209,15 @@ struct sdhci_arasan_data {
* 19MHz instead
*/
#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
+/* Enable CD stable check before power-up */
+#define SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE BIT(3)
};
struct sdhci_arasan_of_data {
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
const struct sdhci_pltfm_data *pdata;
const struct sdhci_arasan_clk_ops *clk_ops;
+ u32 quirks;
};
static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
@@ -514,6 +520,24 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
+static void sdhci_arasan_set_power_and_bus_voltage(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /*
+ * Ensure that the card detect logic has stabilized before powering up, this is
+ * necessary after a host controller reset.
+ */
+ if (mode == MMC_POWER_UP && sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE)
+ read_poll_timeout(sdhci_readl, reg, reg & SDHCI_CD_STABLE, CD_STABLE_MAX_SLEEP_US,
+ CD_STABLE_TIMEOUT_US, false, host, SDHCI_PRESENT_STATE);
+
+ sdhci_set_power_and_bus_voltage(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -521,7 +545,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_set_power_and_bus_voltage,
+ .set_power = sdhci_arasan_set_power_and_bus_voltage,
.hw_reset = sdhci_arasan_hw_reset,
};
@@ -570,7 +594,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_set_power_and_bus_voltage,
+ .set_power = sdhci_arasan_set_power_and_bus_voltage,
.irq = sdhci_arasan_cqhci_irq,
};
@@ -1447,6 +1471,7 @@ static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
.pdata = &sdhci_arasan_zynqmp_pdata,
.clk_ops = &zynqmp_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static const struct sdhci_arasan_clk_ops versal_clk_ops = {
@@ -1457,6 +1482,7 @@ static const struct sdhci_arasan_clk_ops versal_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
.pdata = &sdhci_arasan_zynqmp_pdata,
.clk_ops = &versal_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
@@ -1467,6 +1493,7 @@ static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_versal_net_data = {
.pdata = &sdhci_arasan_versal_net_pdata,
.clk_ops = &versal_net_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
@@ -1937,6 +1964,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-sdhci-5.1"))
sdhci_arasan_update_clockmultiplier(host, 0x0);
+ sdhci_arasan->quirks |= data->quirks;
+
if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 4c2ae71770f7..3a1de477e9af 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -287,6 +287,20 @@
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
+static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev)
+{
+ int aer;
+ u32 value;
+
+ /* mask the replay timer timeout of AER */
+ aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (aer) {
+ pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
+ value |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
+ }
+}
+
static inline void gl9750_wt_on(struct sdhci_host *host)
{
u32 wt_value;
@@ -607,7 +621,6 @@ static void gl9750_hw_setting(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev;
- int aer;
u32 value;
pdev = slot->chip->pdev;
@@ -626,12 +639,7 @@ static void gl9750_hw_setting(struct sdhci_host *host)
pci_set_power_state(pdev, PCI_D0);
/* mask the replay timer timeout of AER */
- aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (aer) {
- pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
- value |= PCI_ERR_COR_REP_TIMER;
- pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
- }
+ sdhci_gli_mask_replay_timer_timeout(pdev);
gl9750_wt_off(host);
}
@@ -806,7 +814,6 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
- int aer;
u32 value;
gl9755_wt_on(pdev);
@@ -841,12 +848,7 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
pci_set_power_state(pdev, PCI_D0);
/* mask the replay timer timeout of AER */
- aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (aer) {
- pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
- value |= PCI_ERR_COR_REP_TIMER;
- pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
- }
+ sdhci_gli_mask_replay_timer_timeout(pdev);
gl9755_wt_off(pdev);
}
@@ -1751,7 +1753,7 @@ cleanup:
return ret;
}
-static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+static void gl9763e_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
@@ -1780,6 +1782,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5);
pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
+ /* mask the replay timer timeout of AER */
+ sdhci_gli_mask_replay_timer_timeout(pdev);
+
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
@@ -1923,7 +1928,7 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
gli_pcie_enable_msi(slot);
host->mmc_host_ops.hs400_enhanced_strobe =
gl9763e_hs400_enhanced_strobe;
- gli_set_gl9763e(slot);
+ gl9763e_hw_setting(slot);
sdhci_enable_v4_mode(host);
return 0;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index e4fc345be7e5..17e62c61b6e6 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -156,6 +156,7 @@ struct sdhci_am654_data {
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
+#define SDHCI_AM654_QUIRK_DISABLE_HS400 BIT(2)
};
struct window {
@@ -765,6 +766,7 @@ static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ struct device *dev = mmc_dev(host->mmc);
u32 ctl_cfg_2 = 0;
u32 mask;
u32 val;
@@ -820,6 +822,12 @@ static int sdhci_am654_init(struct sdhci_host *host)
if (ret)
goto err_cleanup_host;
+ if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_DISABLE_HS400 &&
+ host->mmc->caps2 & (MMC_CAP2_HS400 | MMC_CAP2_HS400_ES)) {
+ dev_info(dev, "HS400 mode not supported on this silicon revision, disabling it\n");
+ host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
+ }
+
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
@@ -883,6 +891,12 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
return 0;
}
+static const struct soc_device_attribute sdhci_am654_descope_hs400[] = {
+ { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62PX", .revision = "SR1.1" },
+ { /* sentinel */ }
+};
+
static const struct of_device_id sdhci_am654_of_match[] = {
{
.compatible = "ti,am654-sdhci-5.1",
@@ -970,6 +984,10 @@ static int sdhci_am654_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "parsing dt failed\n");
+ soc = soc_device_match(sdhci_am654_descope_hs400);
+ if (soc)
+ sdhci_am654->quirks |= SDHCI_AM654_QUIRK_DISABLE_HS400;
+
host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
diff --git a/drivers/most/core.c b/drivers/most/core.c
index a635d5082ebb..da319d108ea1 100644
--- a/drivers/most/core.c
+++ b/drivers/most/core.c
@@ -538,8 +538,8 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
dev = bus_find_device_by_name(&mostbus, NULL, mdev);
if (!dev)
return NULL;
- put_device(dev);
iface = dev_get_drvdata(dev);
+ put_device(dev);
list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
if (!strcmp(dev_name(&c->dev), mdev_ch))
return c;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 2fca8e84ab10..4edc8e6b6b64 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -95,13 +95,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
static void ad_tx_machine(struct port *port);
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
+static void ad_periodic_machine(struct port *port);
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
static void ad_agg_selection_logic(struct aggregator *aggregator,
bool *update_slave_arr);
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
-static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
static void ad_enable_collecting(struct port *port);
static void ad_disable_distributing(struct port *port,
bool *update_slave_arr);
@@ -1307,10 +1307,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
+ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
+ * machine state diagram, the statue should be
+ * Partner_Oper_Port_State.Synchronization = FALSE;
+ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
+ * start current_while_timer(Short Timeout);
+ * Actor_Oper_Port_State.Expired = TRUE;
+ */
+ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
@@ -1417,11 +1423,10 @@ static void ad_tx_machine(struct port *port)
/**
* ad_periodic_machine - handle a port's periodic state machine
* @port: the port we're looking at
- * @bond_params: bond parameters we will use
*
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
*/
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
+static void ad_periodic_machine(struct port *port)
{
periodic_states_t last_state;
@@ -1430,8 +1435,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
- !bond_params->lacp_active) {
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
/* check if state machine should change state */
@@ -1955,16 +1959,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
/**
* ad_initialize_port - initialize a given port's parameters
* @port: the port we're looking at
- * @lacp_fast: boolean. whether fast periodic should be used
+ * @bond_params: bond parameters we will use
*/
-static void ad_initialize_port(struct port *port, int lacp_fast)
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
{
static const struct port_params tmpl = {
.system_priority = 0xffff,
.key = 1,
.port_number = 1,
.port_priority = 0xff,
- .port_state = 1,
+ .port_state = 0,
};
static const struct lacpdu lacpdu = {
.subtype = 0x01,
@@ -1982,12 +1986,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
+ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
+ if (bond_params->lacp_active) {
+ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ }
- if (lacp_fast)
+ if (bond_params->lacp_fast)
port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
@@ -2201,7 +2207,7 @@ void bond_3ad_bind_slave(struct slave *slave)
/* port initialization */
port = &(SLAVE_AD_INFO(slave)->port);
- ad_initialize_port(port, bond->params.lacp_fast);
+ ad_initialize_port(port, &bond->params);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
@@ -2513,7 +2519,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
ad_rx_machine(NULL, port);
- ad_periodic_machine(port, &bond->params);
+ ad_periodic_machine(port);
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
@@ -2883,6 +2889,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
spin_unlock_bh(&bond->mode_lock);
}
+/**
+ * bond_3ad_update_lacp_active - change the lacp active
+ * @bond: bonding struct
+ *
+ * Update actor_oper_port_state when lacp_active is modified.
+ */
+void bond_3ad_update_lacp_active(struct bonding *bond)
+{
+ struct port *port = NULL;
+ struct list_head *iter;
+ struct slave *slave;
+ int lacp_active;
+
+ lacp_active = bond->params.lacp_active;
+ spin_lock_bh(&bond->mode_lock);
+ bond_for_each_slave(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave)->port);
+ if (lacp_active)
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ else
+ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
+ }
+ spin_unlock_bh(&bond->mode_lock);
+}
+
size_t bond_3ad_stats_size(void)
{
return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 1d639a3be6ba..3b6f815c55ff 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1660,6 +1660,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
newval->string, newval->value);
bond->params.lacp_active = newval->value;
+ bond_3ad_update_lacp_active(bond);
return 0;
}
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 5a95877b7419..313e1d241f01 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -607,8 +607,8 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
gpio->get_direction = mcp251x_gpio_get_direction;
gpio->get = mcp251x_gpio_get;
gpio->get_multiple = mcp251x_gpio_get_multiple;
- gpio->set_rv = mcp251x_gpio_set;
- gpio->set_multiple_rv = mcp251x_gpio_set_multiple;
+ gpio->set = mcp251x_gpio_set;
+ gpio->set_multiple = mcp251x_gpio_set_multiple;
gpio->base = -1;
gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
gpio->names = mcp251x_gpio_names;
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 9942fb6f7f4b..829b1f087e9e 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2078,7 +2078,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
/* Start search operation */
reg = ARL_SRCH_STDN;
- b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg);
+ b53_write8(priv, B53_ARLIO_PAGE, offset, reg);
do {
ret = b53_arl_search_wait(priv);
diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c
index 76e490070e9c..c354abdafc1b 100644
--- a/drivers/net/dsa/microchip/ksz8.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -36,15 +36,14 @@
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
- regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
+ ksz_rmw8(dev, addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
- regmap_update_bits(ksz_regmap_8(dev),
- dev->dev_ops->get_port_addr(port, offset),
- bits, set ? bits : 0);
+ ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits,
+ set ? bits : 0);
}
/**
@@ -1955,16 +1954,19 @@ int ksz8_setup(struct dsa_switch *ds)
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
/* Enable aggressive back off algorithm in half duplex mode. */
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1,
- SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ if (ret)
+ return ret;
/*
* Make sure unicast VLAN boundary is set as default and
* enable no excessive collision drop.
*/
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ if (ret)
+ return ret;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 7292bfe2f7ca..9568cc391fe3 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1447,6 +1447,7 @@ static const struct regmap_range ksz8873_valid_regs[] = {
regmap_reg_range(0x3f, 0x3f),
/* advanced control registers */
+ regmap_reg_range(0x43, 0x43),
regmap_reg_range(0x60, 0x6f),
regmap_reg_range(0x70, 0x75),
regmap_reg_range(0x76, 0x78),
@@ -2456,6 +2457,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
+ /* HSR ports are setup once so need to use the assigned membership
+ * when the port is enabled.
+ */
+ if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
+ (dev->hsr_ports & BIT(port)))
+ port_member = dev->hsr_ports;
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index e5bed4237ff4..548b85befbf4 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2187,7 +2187,7 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
gc->direction_input = mt7530_gpio_direction_input;
gc->direction_output = mt7530_gpio_direction_output;
gc->get = mt7530_gpio_get;
- gc->set_rv = mt7530_gpio_set;
+ gc->set = mt7530_gpio_set;
gc->base = -1;
gc->ngpio = 15;
gc->can_sleep = true;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 4f9687ab3b2b..9d31b8258268 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -2317,7 +2317,7 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.parent = vsc->dev;
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
- vsc->gc.set_rv = vsc73xx_gpio_set;
+ vsc->gc.set = vsc73xx_gpio_set;
vsc->gc.direction_input = vsc73xx_gpio_direction_input;
vsc->gc.direction_output = vsc73xx_gpio_direction_output;
vsc->gc.get_direction = vsc73xx_gpio_get_direction;
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 9ab964c536e1..a802f95df99d 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -579,6 +579,8 @@ static struct platform_driver airoha_npu_driver = {
};
module_platform_driver(airoha_npu_driver);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index c354d536bc66..88694b08afa1 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -508,9 +508,11 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
}
-struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
- u32 hash)
+static struct airoha_foe_entry *
+airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
{
+ lockdep_assert_held(&ppe_lock);
+
if (hash < PPE_SRAM_NUM_ENTRIES) {
u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
struct airoha_eth *eth = ppe->eth;
@@ -537,6 +539,18 @@ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
return ppe->foe + hash * sizeof(struct airoha_foe_entry);
}
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ spin_unlock_bh(&ppe_lock);
+
+ return hwe;
+}
+
static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
struct airoha_foe_entry *hwe)
{
@@ -651,7 +665,7 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *f;
int type;
- hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
if (!hwe_p)
return -EINVAL;
@@ -703,7 +717,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
spin_lock_bh(&ppe_lock);
- hwe = airoha_ppe_foe_get_entry(ppe, hash);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
if (!hwe)
goto unlock;
@@ -722,10 +736,8 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
continue;
}
- if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
- e->hash = 0xffff;
+ if (!airoha_ppe_foe_compare_entry(e, hwe))
continue;
- }
airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
commit_done = true;
@@ -818,7 +830,7 @@ airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
u32 ib1, state;
int idle;
- hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
if (!hwe)
continue;
@@ -855,7 +867,7 @@ static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
if (e->hash == 0xffff)
goto unlock;
- hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
if (!hwe_p)
goto unlock;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5578ddcb465d..31e3d825b4bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -926,15 +926,21 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
+ unsigned int *offset,
gfp_t gfp)
{
netmem_ref netmem;
- netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
if (!netmem)
return 0;
- *mapping = page_pool_get_dma_addr_netmem(netmem);
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
return netmem;
}
@@ -1029,7 +1035,7 @@ static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_addr_t mapping;
netmem_ref netmem;
- netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
if (!netmem)
return -ENOMEM;
@@ -3819,7 +3825,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size / rx_size_fac;
pp.nid = numa_node;
- pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
@@ -3851,6 +3856,12 @@ err_destroy_pp:
return PTR_ERR(pool);
}
+static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
u16 mem_size;
@@ -3889,6 +3900,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
+ bnxt_enable_rx_page_pool(rxr);
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
if (rc < 0)
@@ -5320,7 +5332,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
- netdev_assert_locked(bp->dev);
+ netdev_assert_locked_or_invisible(bp->dev);
/* Under netdev instance lock and all our NAPIs have been disabled.
* It's safe to delete the hash table.
@@ -8004,7 +8016,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
rx_rings = min_t(int, rx_rings, hwr.grp);
hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
- if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ if (bnxt_ulp_registered(bp->edev) &&
+ hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
hwr.cp = min_t(int, hwr.cp, hwr.stat);
rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
@@ -8012,6 +8025,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.rx = rx_rings << 1;
tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ if (hwr.tx != bp->tx_nr_rings) {
+ netdev_warn(bp->dev,
+ "Able to reserve only %d out of %d requested TX rings\n",
+ hwr.tx, bp->tx_nr_rings);
+ }
bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
@@ -12839,6 +12857,17 @@ static int bnxt_set_xps_mapping(struct bnxt *bp)
return rc;
}
+static int bnxt_tx_nr_rings(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
+ bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12856,6 +12885,13 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (rc)
return rc;
+ /* Make adjustments if reserved TX rings are less than requested */
+ bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ if (bp->tx_nr_rings_xdp) {
+ bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
+ }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -16031,6 +16067,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
goto err_reset;
}
+ bnxt_enable_rx_page_pool(rxr);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
@@ -16312,7 +16349,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
bp->rx_nr_rings = bp->cp_nr_rings;
bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
}
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
@@ -16344,7 +16381,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bnxt_trim_dflt_sh_rings(bp);
else
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
@@ -16357,7 +16394,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
if (sh)
bnxt_trim_dflt_sh_rings(bp);
@@ -16366,7 +16403,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -16400,7 +16437,7 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
if (rc)
goto init_dflt_ring_err;
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
bnxt_set_dflt_rfs(bp);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ce95fad8cedd..16d28a8b3b56 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3090,7 +3090,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(p++) += ((u64)val) << 32;
+ *p += ((u64)val) << 32;
}
}
@@ -5113,7 +5113,8 @@ static const struct macb_config sama7g5_gem_config = {
static const struct macb_config sama7g5_emac_config = {
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
- MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -5398,19 +5399,16 @@ static void macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ unregister_netdev(dev);
phy_exit(bp->sgmii_phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
- unregister_netdev(dev);
+ device_set_wakeup_enable(&bp->pdev->dev, 0);
cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- if (!pm_runtime_suspended(&pdev->dev)) {
- macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
- bp->rx_clk, bp->tsu_clk);
- pm_runtime_set_suspended(&pdev->dev);
- }
+ pm_runtime_set_suspended(&pdev->dev);
phylink_destroy(bp->phylink);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cc60ee454bf9..6bbf6e5584e5 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1099,7 +1099,7 @@ get_stats (struct net_device *dev)
dev->stats.rx_bytes += dr32(OctetRcvOk);
dev->stats.tx_bytes += dr32(OctetXmtOk);
- dev->stats.multicast = dr32(McstFramesRcvdOk);
+ dev->stats.multicast += dr32(McstFramesRcvdOk);
dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d730af4a50c7..bb5d2fa15736 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3856,8 +3856,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 5d0c0906878d..a863f7841210 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1750,16 +1750,17 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
- if (!netdev->phydev)
+ if (!phydev)
return;
- phy_disconnect(netdev->phydev);
+ phy_disconnect(phydev);
if (of_phy_is_fixed_link(priv->dev->of_node))
of_phy_deregister_fixed_link(priv->dev->of_node);
if (priv->use_ncsi)
- fixed_phy_unregister(netdev->phydev);
+ fixed_phy_unregister(phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 0c588e03b15e..d09e456f14c0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -371,8 +371,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
of_node_put(ptp_node);
}
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
if (ptp)
info->phc_index = ptp->phc_index;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index f63a29e2e031..de0fb272c847 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -829,19 +829,29 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
struct platform_device *ierb_pdev;
struct device_node *ierb_node;
+ int ret;
ierb_node = of_find_compatible_node(NULL, NULL,
"fsl,ls1028a-enetc-ierb");
- if (!ierb_node || !of_device_is_available(ierb_node))
+ if (!ierb_node)
return -ENODEV;
+ if (!of_device_is_available(ierb_node)) {
+ of_node_put(ierb_node);
+ return -ENODEV;
+ }
+
ierb_pdev = of_find_device_by_node(ierb_node);
of_node_put(ierb_node);
if (!ierb_pdev)
return -EPROBE_DEFER;
- return enetc_ierb_register_pf(ierb_pdev, pdev);
+ ret = enetc_ierb_register_pf(ierb_pdev, pdev);
+
+ put_device(&ierb_pdev->dev);
+
+ return ret;
}
static const struct enetc_si_ops enetc_psi_ops = {
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 28f53cf2a174..5fd1f7327680 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1475,8 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
of_node_put(ptp_node);
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
}
if (ptp)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 1f411d7c4373..1be1b1ef31ee 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2870,6 +2870,8 @@ static void gve_shutdown(struct pci_dev *pdev)
struct gve_priv *priv = netdev_priv(netdev);
bool was_up = netif_running(priv->dev);
+ netif_device_detach(netdev);
+
rtnl_lock();
netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 503cfbfb4a8a..83cf75bf7a17 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -53,9 +53,11 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
{
int ret;
- ASSERT_RTNL();
+ if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
if (netif_running(priv->netdev)) {
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_warn(&priv->pdev->dev,
"failed to reset because port is up\n");
return -EBUSY;
@@ -64,7 +66,6 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
netif_device_detach(priv->netdev);
priv->reset_type = type;
- set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
if (ret) {
@@ -84,29 +85,26 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
type != priv->reset_type)
return 0;
- ASSERT_RTNL();
-
- clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
ret = hbg_rebuild(priv);
if (ret) {
priv->stats.reset_fail_cnt++;
set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
return ret;
}
netif_device_attach(priv->netdev);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_info(&priv->pdev->dev, "reset done\n");
return ret;
}
-/* must be protected by rtnl lock */
int hbg_reset(struct hbg_priv *priv)
{
int ret;
- ASSERT_RTNL();
ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
if (ret)
return ret;
@@ -171,7 +169,6 @@ static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct hbg_priv *priv = netdev_priv(netdev);
- rtnl_lock();
hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
}
@@ -181,7 +178,6 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev)
struct hbg_priv *priv = netdev_priv(netdev);
hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
- rtnl_unlock();
}
static const struct pci_error_handlers hbg_pci_err_handler = {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 8cca8316ba40..d0aa0661ecd4 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -12,6 +12,8 @@
#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
+#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000)
+#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000)
/* little endian or big endian.
* ctrl means packet description, data means skb packet data
*/
@@ -228,6 +230,9 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ u32 link_status;
+ int ret;
+
hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
@@ -239,8 +244,14 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
- if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
- HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
+ /* wait MAC link up */
+ ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
+ link_status,
+ FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
+ link_status),
+ HBG_MAC_LINK_WAIT_INTERVAL_US,
+ HBG_MAC_LINK_WAIT_TIMEOUT_US);
+ if (ret)
hbg_np_link_fail_task_schedule(priv);
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
index 2883a5899ae2..8b6110599e10 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
@@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
{
- return (ring->ntu + ring->len - ring->ntc) % ring->len;
+ u32 len = READ_ONCE(ring->len);
+
+ if (!len)
+ return 0;
+
+ return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len;
}
netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2098f00b3cd3..8a8a01a4bb40 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -510,6 +510,7 @@ enum ice_pf_flags {
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_FLAG_UNPLUG_AUX_DEV,
+ ICE_FLAG_AUX_DEV_CREATED,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 9e4adc43e474..b53561c34708 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -13,16 +13,45 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
-static unsigned long ice_adapter_index(u64 dsn)
+#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
+
+#define ICE_ADAPTER_INDEX_E825C \
+ (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
+
+static u64 ice_adapter_index(struct pci_dev *pdev)
{
+ switch (pdev->device) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ /* E825C devices have multiple NACs which are connected to the
+ * same clock source, and which must share the same
+ * ice_adapter structure. We can't use the serial number since
+ * each NAC has its own NVM generated with its own unique
+ * Device Serial Number. Instead, rely on the embedded nature
+ * of the E825C devices, and use a fixed index. This relies on
+ * the fact that all E825C physical functions in a given
+ * system are part of the same overall device.
+ */
+ return ICE_ADAPTER_INDEX_E825C;
+ default:
+ return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
+ }
+}
+
+static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
+{
+ u64 index = ice_adapter_index(pdev);
+
#if BITS_PER_LONG == 64
- return dsn;
+ return index;
#else
- return (u32)dsn ^ (u32)(dsn >> 32);
+ return (u32)index ^ (u32)(index >> 32);
#endif
}
-static struct ice_adapter *ice_adapter_new(u64 dsn)
+static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
{
struct ice_adapter *adapter;
@@ -30,7 +59,7 @@ static struct ice_adapter *ice_adapter_new(u64 dsn)
if (!adapter)
return NULL;
- adapter->device_serial_number = dsn;
+ adapter->index = ice_adapter_index(pdev);
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
spin_lock_init(&adapter->txq_ctx_lock);
refcount_set(&adapter->refcount, 1);
@@ -64,24 +93,23 @@ static void ice_adapter_free(struct ice_adapter *adapter)
*/
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
- u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
unsigned long index;
int err;
- index = ice_adapter_index(dsn);
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
if (err == -EBUSY) {
adapter = xa_load(&ice_adapters, index);
refcount_inc(&adapter->refcount);
- WARN_ON_ONCE(adapter->device_serial_number != dsn);
+ WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
return adapter;
}
if (err)
return ERR_PTR(err);
- adapter = ice_adapter_new(dsn);
+ adapter = ice_adapter_new(pdev);
if (!adapter)
return ERR_PTR(-ENOMEM);
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
@@ -100,11 +128,10 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
*/
void ice_adapter_put(struct pci_dev *pdev)
{
- u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
unsigned long index;
- index = ice_adapter_index(dsn);
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index db66d03c9f96..e95266c7f20b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -33,7 +33,7 @@ struct ice_port_list {
* @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
- * @device_serial_number: DSN cached for collision detection on 32bit systems
+ * @index: 64-bit index cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
@@ -44,7 +44,7 @@ struct ice_adapter {
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
- u64 device_serial_number;
+ u64 index;
};
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index e2a036ce76ca..3b2d9c436979 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2377,7 +2377,13 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
* The function will apply the new Tx topology from the package buffer
* if available.
*
- * Return: zero when update was successful, negative values otherwise.
+ * Return:
+ * * 0 - Successfully applied topology configuration.
+ * * -EBUSY - Failed to acquire global configuration lock.
+ * * -EEXIST - Topology configuration has already been applied.
+ * * -EIO - Unable to apply topology configuration.
+ * * -ENODEV - Failed to re-initialize device after applying configuration.
+ * * Other negative error codes indicate unexpected failures.
*/
int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
{
@@ -2410,7 +2416,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
- return status;
+ return -EIO;
}
/* Is default topology already applied ? */
@@ -2497,31 +2503,45 @@ update_topo:
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
- return status;
+ return -EBUSY;
}
/* Check if reset was triggered already. */
reg = rd32(hw, GLGEN_RSTAT);
if (reg & GLGEN_RSTAT_DEVSTATE_M) {
- /* Reset is in progress, re-init the HW again */
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
ice_check_reset(hw);
- return 0;
+ /* Reset is in progress, re-init the HW again */
+ goto reinit_hw;
}
/* Set new topology */
status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
- return status;
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n",
+ ERR_PTR(status));
+ /* only report -EIO here as the caller checks the error value
+ * and reports an informational error message informing that
+ * the driver failed to program Tx topology.
+ */
+ status = -EIO;
}
- /* New topology is updated, delay 1 second before issuing the CORER */
+ /* Even if Tx topology config failed, we need to CORE reset here to
+ * clear the global configuration lock. Delay 1 second to allow
+ * hardware to settle then issue a CORER
+ */
msleep(1000);
ice_reset(hw, ICE_RESET_CORER);
- /* CORER will clear the global lock, so no explicit call
- * required for release.
- */
+ ice_check_reset(hw);
+
+reinit_hw:
+ /* Since we triggered a CORER, re-initialize hardware */
+ ice_deinit_hw(hw);
+ if (ice_init_hw(hw)) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n");
+ return -ENODEV;
+ }
- return 0;
+ return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 6ab53e430f91..420d45c2558b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -336,6 +336,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
mutex_lock(&pf->adev_mutex);
cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
+ set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags);
return 0;
}
@@ -347,15 +348,16 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
{
struct auxiliary_device *adev;
+ if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags))
+ return;
+
mutex_lock(&pf->adev_mutex);
adev = pf->cdev_info->adev;
pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
- if (adev) {
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
- }
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8e0b06c1e02b..cae992d8f03c 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4536,17 +4536,23 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
dev_info(dev, "Tx scheduling layers switching feature disabled\n");
else
dev_info(dev, "Tx scheduling layers switching feature enabled\n");
- /* if there was a change in topology ice_cfg_tx_topo triggered
- * a CORER and we need to re-init hw
+ return 0;
+ } else if (err == -ENODEV) {
+ /* If we failed to re-initialize the device, we can no longer
+ * continue loading.
*/
- ice_deinit_hw(hw);
- err = ice_init_hw(hw);
-
+ dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
return err;
} else if (err == -EIO) {
dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
+ return 0;
+ } else if (err == -EEXIST) {
+ return 0;
}
+ /* Do not treat this as a fatal error. */
+ dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
+ ERR_PTR(err));
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 29e0088ab6b2..d2871757ec94 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1352,7 +1352,7 @@ construct_skb:
skb = ice_construct_skb(rx_ring, xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
xdp_verdict = ICE_XDP_CONSUMED;
}
ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 555879b1248d..b19b462e0bb6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -180,6 +180,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
}
/**
+ * idpf_tx_singleq_dma_map_error - handle TX DMA map errors
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ * @first: original first buffer info buffer for packet
+ * @idx: starting point on ring to unwind
+ */
+static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 idx)
+{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ /* clear dma mappings for failed tx_buf map */
+ for (;;) {
+ struct idpf_tx_buf *tx_buf;
+
+ tx_buf = &txq->tx_buf[idx];
+ libeth_tx_complete(tx_buf, &cp);
+ if (tx_buf == first)
+ break;
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ if (skb_is_gso(skb)) {
+ union idpf_tx_flex_desc *tx_desc;
+
+ /* If we failed a DMA mapping for a TSO packet, we will have
+ * used one additional descriptor for a context
+ * descriptor. Reset that here.
+ */
+ tx_desc = &txq->flex_tx[idx];
+ memset(tx_desc, 0, sizeof(*tx_desc));
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ /* Update tail in case netdev_xmit_more was previously true */
+ idpf_tx_buf_hw_update(txq, idx, false);
+}
+
+/**
* idpf_tx_singleq_map - Build the Tx base descriptor
* @tx_q: queue to send buffer on
* @first: first buffer info buffer to use
@@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma)))
+ return idpf_tx_singleq_dma_map_error(tx_q, skb,
+ first, i);
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -362,11 +415,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
+ u32 count, buf_count = 1;
int csum, tso, needed;
- unsigned int count;
__be16 protocol;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 66a1b040639d..eaad52a83b04 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -8,48 +8,13 @@
#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
-struct idpf_tx_stash {
- struct hlist_node hlist;
- struct libeth_sqe buf;
-};
-
-#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv)
+#define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
LIBETH_SQE_CHECK_PRIV(u32);
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
/**
- * idpf_buf_lifo_push - push a buffer pointer onto stack
- * @stack: pointer to stack struct
- * @buf: pointer to buf to push
- *
- * Returns 0 on success, negative on failure
- **/
-static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
- struct idpf_tx_stash *buf)
-{
- if (unlikely(stack->top == stack->size))
- return -ENOSPC;
-
- stack->bufs[stack->top++] = buf;
-
- return 0;
-}
-
-/**
- * idpf_buf_lifo_pop - pop a buffer pointer from stack
- * @stack: pointer to stack struct
- **/
-static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
-{
- if (unlikely(!stack->top))
- return NULL;
-
- return stack->bufs[--stack->top];
-}
-
-/**
* idpf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
* @txqueue: TX queue
@@ -77,52 +42,22 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
struct libeth_sq_napi_stats ss = { };
- struct idpf_buf_lifo *buf_stack;
- struct idpf_tx_stash *stash;
struct libeth_cq_pp cp = {
.dev = txq->dev,
.ss = &ss,
};
- struct hlist_node *tmp;
- u32 i, tag;
+ u32 i;
/* Buffers already cleared, nothing to do */
if (!txq->tx_buf)
return;
/* Free all the Tx buffer sk_buffs */
- for (i = 0; i < txq->desc_count; i++)
+ for (i = 0; i < txq->buf_pool_size; i++)
libeth_tx_complete(&txq->tx_buf[i], &cp);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
-
- if (!idpf_queue_has(FLOW_SCH_EN, txq))
- return;
-
- buf_stack = &txq->stash->buf_stack;
- if (!buf_stack->bufs)
- return;
-
- /*
- * If a Tx timeout occurred, there are potentially still bufs in the
- * hash table, free them here.
- */
- hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
- hlist) {
- if (!stash)
- continue;
-
- libeth_tx_complete(&stash->buf, &cp);
- hash_del(&stash->hlist);
- idpf_buf_lifo_push(buf_stack, stash);
- }
-
- for (i = 0; i < buf_stack->size; i++)
- kfree(buf_stack->bufs[i]);
-
- kfree(buf_stack->bufs);
- buf_stack->bufs = NULL;
}
/**
@@ -139,6 +74,9 @@ static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
if (!txq->desc_ring)
return;
+ if (txq->refillq)
+ kfree(txq->refillq->ring);
+
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
txq->next_to_use = 0;
@@ -195,41 +133,18 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
*/
static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
- struct idpf_buf_lifo *buf_stack;
- int buf_size;
- int i;
-
/* Allocate book keeping buffers only. Buffers to be supplied to HW
* are allocated by kernel network stack and received as part of skb
*/
- buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
- tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q))
+ tx_q->buf_pool_size = U16_MAX;
+ else
+ tx_q->buf_pool_size = tx_q->desc_count;
+ tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
+ GFP_KERNEL);
if (!tx_q->tx_buf)
return -ENOMEM;
- if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
- return 0;
-
- buf_stack = &tx_q->stash->buf_stack;
-
- /* Initialize tx buf stack for out-of-order completions if
- * flow scheduling offload is enabled
- */
- buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
- GFP_KERNEL);
- if (!buf_stack->bufs)
- return -ENOMEM;
-
- buf_stack->size = tx_q->desc_count;
- buf_stack->top = tx_q->desc_count;
-
- for (i = 0; i < tx_q->desc_count; i++) {
- buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
- GFP_KERNEL);
- if (!buf_stack->bufs[i])
- return -ENOMEM;
- }
-
return 0;
}
@@ -244,6 +159,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
+ struct idpf_sw_queue *refillq;
int err;
err = idpf_tx_buf_alloc_all(tx_q);
@@ -267,6 +183,31 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
tx_q->next_to_clean = 0;
idpf_queue_set(GEN_CHK, tx_q);
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ refillq = tx_q->refillq;
+ refillq->desc_count = tx_q->buf_pool_size;
+ refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
+ GFP_KERNEL);
+ if (!refillq->ring) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (unsigned int i = 0; i < refillq->desc_count; i++)
+ refillq->ring[i] =
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
+ idpf_queue_has(GEN_CHK, refillq));
+
+ /* Go ahead and flip the GEN bit since this counts as filling
+ * up the ring, i.e. we already ring wrapped.
+ */
+ idpf_queue_change(GEN_CHK, refillq);
+
+ tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
+
return 0;
err_alloc:
@@ -317,8 +258,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
for (i = 0; i < vport->num_txq_grp; i++) {
for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
- u8 gen_bits = 0;
- u16 bufidx_mask;
err = idpf_tx_desc_alloc(vport, txq);
if (err) {
@@ -327,34 +266,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
i);
goto err_out;
}
-
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
-
- txq->compl_tag_cur_gen = 0;
-
- /* Determine the number of bits in the bufid
- * mask and add one to get the start of the
- * generation bits
- */
- bufidx_mask = txq->desc_count - 1;
- while (bufidx_mask >> 1) {
- txq->compl_tag_gen_s++;
- bufidx_mask = bufidx_mask >> 1;
- }
- txq->compl_tag_gen_s++;
-
- gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
- txq->compl_tag_gen_s;
- txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
-
- /* Set bufid mask based on location of first
- * gen bit; it cannot simply be the descriptor
- * ring size-1 since we can have size values
- * where not all of those bits are set.
- */
- txq->compl_tag_bufid_m =
- GETMAXVAL(txq->compl_tag_gen_s);
}
if (!idpf_is_queue_model_split(vport->txq_model))
@@ -603,18 +514,18 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
}
/**
- * idpf_rx_post_buf_refill - Post buffer id to refill queue
+ * idpf_post_buf_refill - Post buffer id to refill queue
* @refillq: refill queue to post to
* @buf_id: buffer id to post
*/
-static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
- FIELD_PREP(IDPF_RX_BI_GEN_M,
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
@@ -995,6 +906,11 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++) {
+ if (flow_sch_en) {
+ kfree(txq_grp->txqs[j]->refillq);
+ txq_grp->txqs[j]->refillq = NULL;
+ }
+
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
@@ -1004,9 +920,6 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq);
txq_grp->complq = NULL;
-
- if (flow_sch_en)
- kfree(txq_grp->stashes);
}
kfree(vport->txq_grps);
vport->txq_grps = NULL;
@@ -1367,7 +1280,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_txq_stash *stashes;
int j;
tx_qgrp->vport = vport;
@@ -1380,15 +1292,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
goto err_alloc;
}
- if (split && flow_sch_en) {
- stashes = kcalloc(num_txq, sizeof(*stashes),
- GFP_KERNEL);
- if (!stashes)
- goto err_alloc;
-
- tx_qgrp->stashes = stashes;
- }
-
for (j = 0; j < tx_qgrp->num_txq; j++) {
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
@@ -1408,12 +1311,14 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!flow_sch_en)
continue;
- if (split) {
- q->stash = &stashes[j];
- hash_init(q->stash->sched_buf_hash);
- }
-
idpf_queue_set(FLOW_SCH_EN, q);
+
+ q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
+ if (!q->refillq)
+ goto err_alloc;
+
+ idpf_queue_set(GEN_CHK, q->refillq);
+ idpf_queue_set(RFL_GEN_CHK, q->refillq);
}
if (!split)
@@ -1697,87 +1602,6 @@ static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
spin_unlock_bh(&tx_tstamp_caps->status_lock);
}
-/**
- * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
- * out of order completions
- * @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
- u16 compl_tag,
- struct libeth_sq_napi_stats *cleaned,
- int budget)
-{
- struct idpf_tx_stash *stash;
- struct hlist_node *tmp_buf;
- struct libeth_cq_pp cp = {
- .dev = txq->dev,
- .ss = cleaned,
- .napi = budget,
- };
-
- /* Buffer completion */
- hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
- hlist, compl_tag) {
- if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
- continue;
-
- hash_del(&stash->hlist);
-
- if (stash->buf.type == LIBETH_SQE_SKB &&
- (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS))
- idpf_tx_read_tstamp(txq, stash->buf.skb);
-
- libeth_tx_complete(&stash->buf, &cp);
-
- /* Push shadow buf back onto stack */
- idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
- }
-}
-
-/**
- * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
- * later time (only relevant for flow scheduling mode)
- * @txq: Tx queue to clean
- * @tx_buf: buffer to store
- */
-static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
- struct idpf_tx_buf *tx_buf)
-{
- struct idpf_tx_stash *stash;
-
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
- return 0;
-
- stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
- if (unlikely(!stash)) {
- net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
- netdev_name(txq->netdev));
-
- return -ENOMEM;
- }
-
- /* Store buffer params in shadow buffer */
- stash->buf.skb = tx_buf->skb;
- stash->buf.bytes = tx_buf->bytes;
- stash->buf.packets = tx_buf->packets;
- stash->buf.type = tx_buf->type;
- stash->buf.nr_frags = tx_buf->nr_frags;
- dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
- dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
-
- /* Add buffer to buf_hash table to be freed later */
- hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- idpf_tx_buf_compl_tag(&stash->buf));
-
- tx_buf->type = LIBETH_SQE_EMPTY;
-
- return 0;
-}
-
#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
do { \
if (unlikely(++(ntc) == (txq)->desc_count)) { \
@@ -1805,14 +1629,8 @@ do { \
* Separate packet completion events will be reported on the completion queue,
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
- *
- * Furthermore, in flow scheduling mode, check to make sure there are enough
- * reserve buffers to stash the packet. If there are not, return early, which
- * will leave next_to_clean pointing to the packet that failed to be stashed.
- *
- * Return: false in the scenario above, true otherwise.
*/
-static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
+static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
struct libeth_sq_napi_stats *cleaned,
bool descs_only)
@@ -1826,7 +1644,12 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
.napi = napi_budget,
};
struct idpf_tx_buf *tx_buf;
- bool clean_complete = true;
+
+ if (descs_only) {
+ /* Bump ring index to mark as cleaned. */
+ tx_q->next_to_clean = end;
+ return;
+ }
tx_desc = &tx_q->flex_tx[ntc];
next_pending_desc = &tx_q->flex_tx[end];
@@ -1846,136 +1669,61 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
break;
eop_idx = tx_buf->rs_idx;
+ libeth_tx_complete(tx_buf, &cp);
- if (descs_only) {
- if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
- clean_complete = false;
- goto tx_splitq_clean_out;
- }
-
- idpf_stash_flow_sch_buffers(tx_q, tx_buf);
+ /* unmap remaining buffers */
+ while (ntc != eop_idx) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
- while (ntc != eop_idx) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
- idpf_stash_flow_sch_buffers(tx_q, tx_buf);
- }
- } else {
+ /* unmap any remaining paged data */
libeth_tx_complete(tx_buf, &cp);
-
- /* unmap remaining buffers */
- while (ntc != eop_idx) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
-
- /* unmap any remaining paged data */
- libeth_tx_complete(tx_buf, &cp);
- }
}
fetch_next_txq_desc:
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
}
-tx_splitq_clean_out:
tx_q->next_to_clean = ntc;
-
- return clean_complete;
}
-#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
-do { \
- (buf)++; \
- (ntc)++; \
- if (unlikely((ntc) == (txq)->desc_count)) { \
- buf = (txq)->tx_buf; \
- ntc = 0; \
- } \
-} while (0)
-
/**
- * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
+ * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
* @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @buf_id: packet's starting buffer ID, from completion descriptor
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @budget: Used to determine if we are in netpoll
*
- * Cleans all buffers associated with the input completion tag either from the
- * TX buffer ring or from the hash table if the buffers were previously
- * stashed. Returns the byte/segment count for the cleaned packet associated
- * this completion tag.
+ * Clean all buffers associated with the packet starting at buf_id. Returns the
+ * byte/segment count for the cleaned packet.
*/
-static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct libeth_sq_napi_stats *cleaned,
- int budget)
+static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
+ struct libeth_sq_napi_stats *cleaned,
+ int budget)
{
- u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
struct libeth_cq_pp cp = {
.dev = txq->dev,
.ss = cleaned,
.napi = budget,
};
- u16 ntc, orig_idx = idx;
-
- tx_buf = &txq->tx_buf[idx];
-
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
- idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
- return false;
+ tx_buf = &txq->tx_buf[buf_id];
if (tx_buf->type == LIBETH_SQE_SKB) {
if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
idpf_tx_read_tstamp(txq, tx_buf->skb);
libeth_tx_complete(tx_buf, &cp);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ buf_id = idpf_tx_buf_next(tx_buf);
- while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
+ tx_buf = &txq->tx_buf[buf_id];
libeth_tx_complete(tx_buf, &cp);
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
-
- /*
- * It's possible the packet we just cleaned was an out of order
- * completion, which means we can stash the buffers starting from
- * the original next_to_clean and reuse the descriptors. We need
- * to compare the descriptor ring next_to_clean packet's "first" buffer
- * to the "first" buffer of the packet we just cleaned to determine if
- * this is the case. Howevever, next_to_clean can point to either a
- * reserved buffer that corresponds to a context descriptor used for the
- * next_to_clean packet (TSO packet) or the "first" buffer (single
- * packet). The orig_idx from the packet we just cleaned will always
- * point to the "first" buffer. If next_to_clean points to a reserved
- * buffer, let's bump ntc once and start the comparison from there.
- */
- ntc = txq->next_to_clean;
- tx_buf = &txq->tx_buf[ntc];
-
- if (tx_buf->type == LIBETH_SQE_CTX)
- idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
-
- /*
- * If ntc still points to a different "first" buffer, clean the
- * descriptor ring and stash all of the buffers for later cleaning. If
- * we cannot stash all of the buffers, next_to_clean will point to the
- * "first" buffer of the packet that could not be stashed and cleaning
- * will start there next time.
- */
- if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
- !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
- true)))
- return true;
-
- /*
- * Otherwise, update next_to_clean to reflect the cleaning that was
- * done above.
- */
- txq->next_to_clean = idx;
-
- return true;
}
/**
@@ -1994,22 +1742,17 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct libeth_sq_napi_stats *cleaned,
int budget)
{
- u16 compl_tag;
+ /* RS completion contains queue head for queue based scheduling or
+ * completion tag for flow based scheduling.
+ */
+ u16 rs_compl_val = le16_to_cpu(desc->q_head_compl_tag.q_head);
if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
- u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
-
- idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
return;
}
- compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
-
- /* If we didn't clean anything on the ring, this packet must be
- * in the hash table. Go clean it there.
- */
- if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
- idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
+ idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
}
/**
@@ -2126,8 +1869,7 @@ fetch_next_desc:
/* Update BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
- np->state != __IDPF_VPORT_UP ||
+ dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
@@ -2184,15 +1926,21 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
-/* Global conditions to tell whether the txq (and related resources)
- * has room to allow the use of "size" descriptors.
+/**
+ * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
+ * @tx_q: the queue to be checked
+ * @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of Tx buffers required for this packet
+ *
+ * Return: 0 if no room available, 1 otherwise
*/
-static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
+ u32 bufs_needed)
{
- if (IDPF_DESC_UNUSED(tx_q) < size ||
+ if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
- IDPF_TX_BUF_RSV_LOW(tx_q))
+ idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
return 0;
return 1;
}
@@ -2201,14 +1949,21 @@ static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of buffers needed for this packet
*
- * Returns 0 if stop is not needed
+ * Return: 0 if stop is not needed
*/
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
- unsigned int descs_needed)
+ u32 descs_needed,
+ u32 bufs_needed)
{
+ /* Since we have multiple resources to check for splitq, our
+ * start,stop_thrs becomes a boolean check instead of a count
+ * threshold.
+ */
if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
- idpf_txq_has_room(tx_q, descs_needed),
+ idpf_txq_has_room(tx_q, descs_needed,
+ bufs_needed),
1, 1))
return 0;
@@ -2250,14 +2005,16 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
}
/**
- * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
+ * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
* @txq: queue to send buffer on
* @skb: send buffer
+ * @bufs_needed: (output) number of buffers needed for this skb.
*
- * Returns number of data descriptors needed for this skb.
+ * Return: number of data descriptors and buffers needed for this skb.
*/
-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
- struct sk_buff *skb)
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ u32 *bufs_needed)
{
const struct skb_shared_info *shinfo;
unsigned int count = 0, i;
@@ -2268,6 +2025,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
return count;
shinfo = skb_shinfo(skb);
+ *bufs_needed += shinfo->nr_frags;
for (i = 0; i < shinfo->nr_frags; i++) {
unsigned int size;
@@ -2297,71 +2055,89 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
}
/**
- * idpf_tx_dma_map_error - handle TX DMA map errors
- * @txq: queue to send buffer on
- * @skb: send buffer
- * @first: original first buffer info buffer for packet
- * @idx: starting point on ring to unwind
+ * idpf_tx_splitq_bump_ntu - adjust NTU and generation
+ * @txq: the tx ring to wrap
+ * @ntu: ring index to bump
*/
-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 idx)
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
- struct libeth_sq_napi_stats ss = { };
- struct libeth_cq_pp cp = {
- .dev = txq->dev,
- .ss = &ss,
- };
+ ntu++;
- u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.dma_map_errs);
- u64_stats_update_end(&txq->stats_sync);
+ if (ntu == txq->desc_count)
+ ntu = 0;
- /* clear dma mappings for failed tx_buf map */
- for (;;) {
- struct idpf_tx_buf *tx_buf;
+ return ntu;
+}
- tx_buf = &txq->tx_buf[idx];
- libeth_tx_complete(tx_buf, &cp);
- if (tx_buf == first)
- break;
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
- }
+/**
+ * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
+ * @refillq: refill queue to get buffer ID from
+ * @buf_id: return buffer ID
+ *
+ * Return: true if a buffer ID was found, false if not
+ */
+static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
+ u32 *buf_id)
+{
+ u32 ntc = refillq->next_to_clean;
+ u32 refill_desc;
- if (skb_is_gso(skb)) {
- union idpf_tx_flex_desc *tx_desc;
+ refill_desc = refillq->ring[ntc];
- /* If we failed a DMA mapping for a TSO packet, we will have
- * used one additional descriptor for a context
- * descriptor. Reset that here.
- */
- tx_desc = &txq->flex_tx[idx];
- memset(tx_desc, 0, sizeof(*tx_desc));
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
+ if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RFL_BI_GEN_M)))
+ return false;
+
+ *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
+
+ if (unlikely(++ntc == refillq->desc_count)) {
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ ntc = 0;
}
- /* Update tail in case netdev_xmit_more was previously true */
- idpf_tx_buf_hw_update(txq, idx, false);
+ refillq->next_to_clean = ntc;
+
+ return true;
}
/**
- * idpf_tx_splitq_bump_ntu - adjust NTU and generation
- * @txq: the tx ring to wrap
- * @ntu: ring index to bump
+ * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
+ * @txq: Tx queue to unwind
+ * @params: pointer to splitq params struct
+ * @first: starting buffer for packet to unmap
*/
-static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
+static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
+ struct idpf_tx_splitq_params *params,
+ struct idpf_tx_buf *first)
{
- ntu++;
+ struct idpf_sw_queue *refillq = txq->refillq;
+ struct libeth_sq_napi_stats ss = { };
+ struct idpf_tx_buf *tx_buf = first;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
- if (ntu == txq->desc_count) {
- ntu = 0;
- txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ libeth_tx_complete(tx_buf, &cp);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
+ libeth_tx_complete(tx_buf, &cp);
}
- return ntu;
+ /* Update tail in case netdev_xmit_more was previously true. */
+ idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
+
+ if (!refillq)
+ return;
+
+ /* Restore refillq state to avoid leaking tags. */
+ if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = params->prev_refill_ntc;
}
/**
@@ -2385,6 +2161,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
struct netdev_queue *nq;
struct sk_buff *skb;
skb_frag_t *frag;
+ u32 next_buf_id;
u16 td_cmd = 0;
dma_addr_t dma;
@@ -2402,17 +2179,16 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
tx_buf = first;
first->nr_frags = 0;
- params->compl_tag =
- (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
-
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
first->nr_frags++;
- idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
@@ -2468,29 +2244,12 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
max_data);
if (unlikely(++i == tx_q->desc_count)) {
- tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen =
- IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} else {
- tx_buf++;
tx_desc++;
}
- /* Since this packet has a buffer that is going to span
- * multiple descriptors, it's going to leave holes in
- * to the TX buffer ring. To ensure these holes do not
- * cause issues in the cleaning routines, we will clear
- * them of any stale data and assign them the same
- * completion tag as the current packet. Then when the
- * packet is being cleaned, the cleaning routines will
- * simply pass over these holes and finish cleaning the
- * rest of the packet.
- */
- tx_buf->type = LIBETH_SQE_EMPTY;
- idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
-
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
* max_data will be >= 12K and <= 16K-1. On any
@@ -2515,15 +2274,25 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
if (unlikely(++i == tx_q->desc_count)) {
- tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} else {
- tx_buf++;
tx_desc++;
}
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &next_buf_id))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
+ } else {
+ next_buf_id = i;
+ }
+ idpf_tx_buf_next(tx_buf) = next_buf_id;
+ tx_buf = &tx_q->tx_buf[next_buf_id];
+
size = skb_frag_size(frag);
data_len -= size;
@@ -2538,6 +2307,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
/* write last descriptor with RS and EOP bits */
first->rs_idx = i;
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
@@ -2746,8 +2516,6 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
union idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- txq->tx_buf[i].type = LIBETH_SQE_CTX;
-
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
@@ -2841,6 +2609,21 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
#endif /* CONFIG_PTP_1588_CLOCK */
/**
+ * idpf_tx_splitq_need_re - check whether RE bit needs to be set
+ * @tx_q: pointer to Tx queue
+ *
+ * Return: true if RE bit needs to be set, false otherwise
+ */
+static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
+{
+ int gap = tx_q->next_to_use - tx_q->last_re;
+
+ gap += (gap < 0) ? tx_q->desc_count : 0;
+
+ return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
+}
+
+/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
* @tx_q: queue to send buffer on
@@ -2850,13 +2633,16 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
{
- struct idpf_tx_splitq_params tx_params = { };
+ struct idpf_tx_splitq_params tx_params = {
+ .prev_ntu = tx_q->next_to_use,
+ };
union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first;
- unsigned int count;
+ u32 count, buf_count = 1;
int tso, idx;
+ u32 buf_id;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
@@ -2866,7 +2652,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
/* Check for splitq specific TX resources */
count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
- if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
+ if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_BUSY;
@@ -2898,36 +2684,47 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
idpf_tx_set_tstamp_desc(ctx_desc, idx);
}
- /* record the location of the first descriptor for this packet */
- first = &tx_q->tx_buf[tx_q->next_to_use];
- first->skb = skb;
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ struct idpf_sw_queue *refillq = tx_q->refillq;
- if (tso) {
- first->packets = tx_params.offload.tso_segs;
- first->bytes = skb->len +
- ((first->packets - 1) * tx_params.offload.tso_hdr_len);
- } else {
- first->packets = 1;
- first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- }
+ /* Save refillq state in case of a packet rollback. Otherwise,
+ * the tags will be leaked since they will be popped from the
+ * refillq but never reposted during cleaning.
+ */
+ tx_params.prev_refill_gen =
+ idpf_queue_has(RFL_GEN_CHK, refillq);
+ tx_params.prev_refill_ntc = refillq->next_to_clean;
+
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &buf_id))) {
+ if (tx_params.prev_refill_gen !=
+ idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = tx_params.prev_refill_ntc;
+
+ tx_q->next_to_use = tx_params.prev_ntu;
+ return idpf_tx_drop_skb(tx_q, skb);
+ }
+ tx_params.compl_tag = buf_id;
- if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
- /* Set the RE bit to catch any packets that may have not been
- * stashed during RS completion cleaning. MIN_GAP is set to
- * MIN_RING size to ensure it will be set at least once each
- * time around the ring.
+ /* Set the RE bit to periodically "clean" the descriptor ring.
+ * MIN_GAP is set to MIN_RING size to ensure it will be set at
+ * least once each time around the ring.
*/
- if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
+ if (idpf_tx_splitq_need_re(tx_q)) {
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
tx_q->txq_grp->num_completions_pending++;
+ tx_q->last_re = tx_q->next_to_use;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
} else {
+ buf_id = tx_q->next_to_use;
+
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
@@ -2935,6 +2732,18 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
}
+ first = &tx_q->tx_buf[buf_id];
+ first->skb = skb;
+
+ if (tso) {
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
+ } else {
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ }
+
idpf_tx_splitq_map(tx_q, &tx_params, first);
return NETDEV_TX_OK;
@@ -3472,7 +3281,7 @@ payload:
skip_data:
rx_buf->netmem = 0;
- idpf_rx_post_buf_refill(refillq, buf_id);
+ idpf_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
/* skip if it is non EOP desc */
@@ -3580,10 +3389,10 @@ static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
bool failure;
if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
- !!(refill_desc & IDPF_RX_BI_GEN_M))
+ !!(refill_desc & IDPF_RFL_BI_GEN_M))
break;
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+ buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 281de655a813..52753dff381c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -108,8 +108,8 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_GEN_M BIT(16)
-#define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
+#define IDPF_RFL_BI_GEN_M BIT(16)
+#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
@@ -118,10 +118,6 @@ do { \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
-#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
- (txq)->desc_count >> 2)
-
#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
/* Determine the absolute number of completions pending, i.e. the number of
* completions that are expected to arrive on the TX completion queue.
@@ -131,11 +127,7 @@ do { \
0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
-#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
-/* Adjust the generation for the completion tag and wrap if necessary */
-#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
- ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
- 0 : (txq)->compl_tag_cur_gen)
+#define IDPF_TXBUF_NULL U32_MAX
#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
@@ -153,18 +145,6 @@ union idpf_tx_flex_desc {
#define idpf_tx_buf libeth_sqe
/**
- * struct idpf_buf_lifo - LIFO for managing OOO completions
- * @top: Used to know how many buffers are left
- * @size: Total size of LIFO
- * @bufs: Backing array
- */
-struct idpf_buf_lifo {
- u16 top;
- u16 size;
- struct idpf_tx_stash **bufs;
-};
-
-/**
* struct idpf_tx_offload_params - Offload parameters for a given packet
* @tx_flags: Feature flags enabled for this packet
* @hdr_offsets: Offset parameter for single queue model
@@ -196,6 +176,9 @@ struct idpf_tx_offload_params {
* @compl_tag: Associated tag for completion
* @td_tag: Descriptor tunneling tag
* @offload: Offload parameters
+ * @prev_ntu: stored TxQ next_to_use in case of rollback
+ * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
+ * @prev_refill_gen: stored refillq generation bit in case of packet rollback
*/
struct idpf_tx_splitq_params {
enum idpf_tx_desc_dtype_value dtype;
@@ -206,6 +189,10 @@ struct idpf_tx_splitq_params {
};
struct idpf_tx_offload_params offload;
+
+ u16 prev_ntu;
+ u16 prev_refill_ntc;
+ bool prev_refill_gen;
};
enum idpf_tx_ctx_desc_eipt_offload {
@@ -468,17 +455,6 @@ struct idpf_tx_queue_stats {
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
/**
- * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
- * @buf_stack: Stack of empty buffers to store buffer info for out of order
- * buffer completions. See struct idpf_buf_lifo
- * @sched_buf_hash: Hash table to store buffers
- */
-struct idpf_txq_stash {
- struct idpf_buf_lifo buf_stack;
- DECLARE_HASHTABLE(sched_buf_hash, 12);
-} ____cacheline_aligned;
-
-/**
* struct idpf_rx_queue - software structure representing a receive queue
* @rx: universal receive descriptor array
* @single_buf: buffer descriptor array in singleq
@@ -610,6 +586,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @netdev: &net_device corresponding to this queue
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
+ * @last_re: last descriptor index that RE bit was set
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
* the TX completion queue, it can be for any TXQ associated
* with that completion queue. This means we can clean up to
@@ -620,11 +598,7 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* only once at the end of the cleaning routine.
* @clean_budget: singleq only, queue cleaning budget
* @cleaned_pkts: Number of packets cleaned for the above said case
- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
- * @stash: Tx buffer stash for Flow-based scheduling mode
- * @compl_tag_bufid_m: Completion tag buffer id mask
- * @compl_tag_cur_gen: Used to keep track of current completion tag generation
- * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @refillq: Pointer to refill queue
* @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
* @tstamp_task: Work that handles Tx timestamp read
* @stats_sync: See struct u64_stats_sync
@@ -633,6 +607,7 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
+ * @buf_pool_size: Total number of idpf_tx_buf
*/
struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_mostly);
@@ -654,7 +629,6 @@ struct idpf_tx_queue {
u16 desc_count;
u16 tx_min_pkt_len;
- u16 compl_tag_gen_s;
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
@@ -662,6 +636,8 @@ struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_write);
u16 next_to_use;
u16 next_to_clean;
+ u16 last_re;
+ u16 tx_max_bufs;
union {
u32 cleaned_bytes;
@@ -669,12 +645,7 @@ struct idpf_tx_queue {
};
u16 cleaned_pkts;
- u16 tx_max_bufs;
- struct idpf_txq_stash *stash;
-
- u16 compl_tag_bufid_m;
- u16 compl_tag_cur_gen;
- u16 compl_tag_gen_max;
+ struct idpf_sw_queue *refillq;
struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
struct work_struct *tstamp_task;
@@ -689,11 +660,12 @@ struct idpf_tx_queue {
dma_addr_t dma;
struct idpf_q_vector *q_vector;
+ u32 buf_pool_size;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 112 + sizeof(struct u64_stats_sync),
- 24);
+ 104 + sizeof(struct u64_stats_sync),
+ 32);
/**
* struct idpf_buf_queue - software structure representing a buffer queue
@@ -903,7 +875,6 @@ struct idpf_rxq_group {
* @vport: Vport back pointer
* @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers
- * @stashes: array of OOO stashes for the queues
* @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues
@@ -918,7 +889,6 @@ struct idpf_txq_group {
u16 num_txq;
struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
- struct idpf_txq_stash *stashes;
struct idpf_compl_queue *complq;
@@ -1011,6 +981,17 @@ static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
reg->dyn_ctl);
}
+/**
+ * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
+ * @refillq: pointer to refillq containing buf_ids
+ */
+static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
+{
+ return (refillq->next_to_use > refillq->next_to_clean ?
+ 0 : refillq->desc_count) +
+ refillq->next_to_use - refillq->next_to_clean - 1;
+}
+
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -1038,10 +1019,8 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 ring_idx);
-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
- struct sk_buff *skb);
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb, u32 *buf_count);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 458e5eaa92e5..e79b14d50b24 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7149,6 +7149,13 @@ static int igc_probe(struct pci_dev *pdev,
adapter->port_num = hw->bus.func;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
/* Disable ASPM L1.2 on I226 devices to avoid packet loss */
if (igc_is_device_id_i226(hw))
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
@@ -7175,13 +7182,6 @@ static int igc_probe(struct pci_dev *pdev,
netdev->mem_start = pci_resource_start(pdev, 0);
netdev->mem_end = pci_resource_end(pdev, 0);
- /* PCI config space info */
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->revision_id = pdev->revision;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
-
/* Copy the default MAC and PHY function pointers */
memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
index 54f1b83dfe42..d227f4d2a2d1 100644
--- a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
@@ -543,6 +543,7 @@ int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = adapter->hw.bus.func;
+ attrs.no_phys_port_name = 1;
ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index d74116441d1c..bfeef5b0b99d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -3125,7 +3125,7 @@ static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
if (err)
return err;
- combo_ver = le32_to_cpu(civd.combo_ver);
+ combo_ver = get_unaligned_le32(&civd.combo_ver);
orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6122a0abb41f..80e6a2ef1350 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -968,10 +968,6 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->tx_ring[i]->state);
-
- for (i = 0; i < adapter->num_xdp_queues; i++)
- clear_bit(__IXGBE_HANG_CHECK_ARMED,
- &adapter->xdp_ring[i]->state);
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
@@ -1214,7 +1210,7 @@ static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
struct ixgbe_hw *hw = &adapter->hw;
- e_err(drv, "Detected Tx Unit Hang%s\n"
+ e_err(drv, "Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
" next_to_use <%x>\n"
@@ -1222,16 +1218,14 @@ static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
"tx_buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
- ring_is_xdp(tx_ring) ? " (XDP)" : "",
tx_ring->queue_index,
IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
tx_ring->next_to_use, next,
tx_ring->tx_buffer_info[next].time_stamp, jiffies);
- if (!ring_is_xdp(tx_ring))
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
}
/**
@@ -1451,6 +1445,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes);
adapter->tx_ipsec += total_ipsec;
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
if (adapter->hw.mac.type == ixgbe_mac_e610)
ixgbe_handle_mdd_event(adapter, tx_ring);
@@ -1468,9 +1465,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
return true;
}
- if (ring_is_xdp(tx_ring))
- return !!budget;
-
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
@@ -7974,12 +7968,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
return;
/* Force detection of hung controller */
- if (netif_carrier_ok(adapter->netdev)) {
+ if (netif_carrier_ok(adapter->netdev))
for (i = 0; i < adapter->num_tx_queues; i++)
set_check_for_tx_hang(adapter->tx_ring[i]);
- for (i = 0; i < adapter->num_xdp_queues; i++)
- set_check_for_tx_hang(adapter->xdp_ring[i]);
- }
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/*
@@ -8199,13 +8190,6 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
return true;
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = adapter->xdp_ring[i];
-
- if (ring->next_to_use != ring->next_to_clean)
- return true;
- }
-
return false;
}
@@ -11005,6 +10989,10 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -ENETDOWN;
+ if (!netif_carrier_ok(adapter->netdev) ||
+ !netif_running(adapter->netdev))
+ return -ENETDOWN;
+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index d2f22d8558f8..ff8d640a50b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -932,7 +932,7 @@ struct ixgbe_orom_civd_info {
__le32 combo_ver; /* Combo Image Version number */
u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
-};
+} __packed;
/* Function specific capabilities */
struct ixgbe_hw_func_caps {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index ac58964b2f08..7b941505a9d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -398,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
u32 cmd_type;
- while (budget-- > 0) {
+ while (likely(budget)) {
if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
@@ -433,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
xdp_ring->next_to_use = 0;
+
+ budget--;
}
if (tx_desc) {
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index feab392ab2ee..476e73e502fe 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4610,7 +4610,7 @@ static int mvneta_stop(struct net_device *dev)
/* Inform that we are stopping so we don't want to setup the
* driver for new CPUs in the notifiers. The code of the
* notifier for CPU online is protected by the same spinlock,
- * so when we get the lock, the notifer work is done.
+ * so when we get the lock, the notifier work is done.
*/
spin_lock(&pp->lock);
pp->is_stopped = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 4ff19a04b23e..0c46ba8a5adc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1978,6 +1978,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ if (!is_cn20k(pdev) &&
+ !is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
+ dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n",
+ cgx->cgx_id);
+ goto err_release_regions;
+ }
+
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
if (!cgx->lmac_count) {
dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 7ee1fdeb5295..18c7bb39dbc7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -783,6 +783,20 @@ static inline bool is_cn10kb(struct rvu *rvu)
return false;
}
+static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id)
+{
+ /* On CNF10KA and CNF10KB silicons only two CGX blocks are connected
+ * to NIX.
+ */
+ if (id == PCI_SUBSYS_DEVID_CNF10K_A || id == PCI_SUBSYS_DEVID_CNF10K_B)
+ return cgx_id <= 1;
+
+ return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX ||
+ id == PCI_SUBSYS_DEVID_98XX ||
+ id == PCI_SUBSYS_DEVID_CN10K_A ||
+ id == PCI_SUBSYS_DEVID_CN10K_B));
+}
+
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 1b765045aa63..b56395ac5a74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
+ /* Allow extracting SPI field from AH and ESP headers at same offset */
+ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
(*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
*features |= BIT_ULL(NPC_IPSEC_SPI);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f674729124e6..aff17c37ddde 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -124,7 +124,9 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf)
dev_stats->rx_ucast_frames;
dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
- dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP) +
+ (unsigned long)atomic_long_read(&dev_stats->tx_discards);
+
dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e3765b73c434..1c8a3c078a64 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -153,6 +153,7 @@ struct otx2_dev_stats {
u64 tx_bcast_frames;
u64 tx_mcast_frames;
u64 tx_drops;
+ atomic_long_t tx_discards;
};
/* Driver counted stats */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index b23585c5e5c2..5027fae0aa77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2220,6 +2220,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
int sq_idx;
@@ -2232,6 +2233,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
(!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ dev_stats = &pf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 5589fccd370b..7ebb6e656884 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -417,9 +417,19 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *vf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > vf->tx_max_pktlen)) {
+ dev_stats = &vf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 25af98034e2e..b476733a0234 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -371,7 +371,8 @@ static void rvu_rep_get_stats(struct work_struct *work)
stats->rx_mcast_frames = rsp->rx.mcast;
stats->tx_bytes = rsp->tx.octs;
stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
- stats->tx_drops = rsp->tx.drop;
+ stats->tx_drops = rsp->tx.drop +
+ (unsigned long)atomic_long_read(&stats->tx_discards);
exit:
mutex_unlock(&priv->mbox.lock);
}
@@ -418,6 +419,16 @@ static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
struct otx2_nic *pf = rep->mdev;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ struct rep_stats *stats;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ stats = &rep->stats;
+ atomic_long_inc(&stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
sq = &pf->qset.sq[rep->rep_id];
txq = netdev_get_tx_queue(dev, 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
index 38446b3e4f13..5bc9e2c7d800 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
@@ -27,6 +27,7 @@ struct rep_stats {
u64 tx_bytes;
u64 tx_frames;
u64 tx_drops;
+ atomic_long_t tx_discards;
};
struct rep_dev {
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index c855fb799ce1..e9bd32741983 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
return -1;
+ rcu_read_lock();
err = dev_fill_forward_path(dev, addr, &stack);
+ rcu_read_unlock();
if (err)
return err;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 73c26fcfd85e..0a80d8f8cff7 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -2782,7 +2782,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
if (!pdev)
goto err_of_node_put;
- get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
goto err_put_device;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3ffa3fbacd16..2c0e0c16ca90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -160,7 +160,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- mlx5_unload_one_devl_locked(dev, true);
+ mlx5_sync_reset_unload_flow(dev, true);
err = mlx5_health_wait_pci_up(dev);
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index b59aee75de94..2c98a5299df3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -26,7 +26,6 @@ struct mlx5e_dcbx {
u8 cap;
/* Buffer configuration */
- bool manual_buffer;
u32 cable_len;
u32 xoff;
u16 port_buff_cell_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 5ae787656a7c..4720523813b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
/* Total shared buffer size is split in a ratio of 3:1 between
* lossy and lossless pools respectively.
*/
- lossy_epool_size = (shared_buffer_size / 4) * 3;
lossless_ipool_size = shared_buffer_size / 4;
+ lossy_epool_size = shared_buffer_size - lossless_ipool_size;
mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
lossy_epool_size);
@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
- u32 new_headroom_size = 0;
- u32 current_headroom_size;
+ u32 current_headroom_cells = 0;
+ u32 new_headroom_cells = 0;
void *in;
int err;
int i;
- current_headroom_size = port_buffer->headroom_size;
-
in = kzalloc(sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+ current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
+
u64 size = port_buffer->buffer[i].size;
u64 xoff = port_buffer->buffer[i].xoff;
u64 xon = port_buffer->buffer[i].xon;
- new_headroom_size += size;
do_div(size, port_buff_cell_sz);
+ new_headroom_cells += size;
do_div(xoff, port_buff_cell_sz);
do_div(xon, port_buff_cell_sz);
MLX5_SET(bufferx_reg, buffer, size, size);
@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
- new_headroom_size /= port_buff_cell_sz;
- current_headroom_size /= port_buff_cell_sz;
- err = port_update_shared_buffer(priv->mdev, current_headroom_size,
- new_headroom_size);
+ err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
+ new_headroom_cells);
if (err)
goto out;
@@ -577,7 +575,6 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
}
- priv->dcbx.xoff = xoff;
/* Apply the settings */
if (update_buffer) {
@@ -586,6 +583,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return err;
}
+ priv->dcbx.xoff = xoff;
+
if (update_prio2buffer)
err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index f4a19ffbb641..66d276a1be83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -66,11 +66,23 @@ struct mlx5e_port_buffer {
struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER];
};
+#ifdef CONFIG_MLX5_CORE_EN_DCB
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
u32 *buffer_size,
u8 *prio2buffer);
+#else
+static inline int
+mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ u32 change, unsigned int mtu,
+ void *pfc,
+ u32 *buffer_size,
+ u8 *prio2buffer)
+{
+ return 0;
+}
+#endif
int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
index a4263137fef5..01d522b02947 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -173,6 +173,8 @@ static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
+ rule_actions[0].counter.offset =
+ attr->counter->id - attr->counter->bulk->base_id;
/* Modify header is special, it may require extra arguments outside the action itself. */
if (mh_action->mh_data) {
rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 5fe016e477b3..d166c0d5189e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -362,6 +362,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
+ u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 old_cable_len = priv->dcbx.cable_len;
@@ -389,7 +390,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
if (MLX5_BUFFER_SUPPORTED(mdev)) {
pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
- if (priv->dcbx.manual_buffer)
+ ret = mlx5_query_port_buffer_ownership(mdev,
+ &buffer_ownership);
+ if (ret)
+ netdev_err(dev,
+ "%s, Failed to get buffer ownership: %d\n",
+ __func__, ret);
+
+ if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
ret = mlx5e_port_manual_buffer_config(priv, changed,
dev->mtu, &pfc_new,
NULL, NULL);
@@ -982,7 +990,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
if (!changed)
return 0;
- priv->dcbx.manual_buffer = true;
err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
buffer_size, prio2buffer);
return err;
@@ -1252,7 +1259,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
- priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
mlx5e_ets_init(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 21bb88c5d3dc..e680673ffb72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -49,6 +49,7 @@
#include "en.h"
#include "en/dim.h"
#include "en/txrx.h"
+#include "en/port_buffer.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
@@ -138,6 +139,8 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
+ mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+ NULL, NULL, NULL);
} else {
netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
@@ -3040,9 +3043,11 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- u16 mtu;
+ u16 mtu, prev_mtu;
int err;
+ mlx5e_query_mtu(mdev, params, &prev_mtu);
+
err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
if (err)
return err;
@@ -3052,6 +3057,18 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
__func__, mtu, params->sw_mtu);
+ if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) {
+ err = mlx5e_port_manual_buffer_config(priv, 0, mtu,
+ NULL, NULL, NULL);
+ if (err) {
+ netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n",
+ __func__, mtu, err, prev_mtu);
+
+ mlx5e_set_mtu(mdev, params, prev_mtu);
+ return err;
+ }
+ }
+
params->sw_mtu = mtu;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 218b1a09534c..b8c609d91d11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1574,6 +1574,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
+ skb_shinfo(skb)->gso_segs = lro_num_seg;
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index b7102e14d23d..c33accadae0f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
vport_num - 1, external);
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
+ u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
+
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
- vport_num - 1, false);
+ vport_num - base_vport, false);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 91d863c8c152..8b4977650183 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -102,6 +102,8 @@ struct mlx5_esw_sched_node {
u8 level;
/* Valid only when this node represents a traffic class. */
u8 tc;
+ /* Valid only for a TC arbiter node or vport TC arbiter. */
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX];
};
static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
@@ -462,6 +464,7 @@ static int
esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = vport_node->esw->dev;
void *attr;
@@ -477,7 +480,7 @@ esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
- vport_node->parent->ix);
+ parent ? parent->ix : vport_node->esw->qos.root_tsar_ix);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
vport_node->max_rate);
@@ -608,10 +611,7 @@ static void
esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
u32 *tc_bw)
{
- struct mlx5_esw_sched_node *vports_tc_node;
-
- list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry)
- tc_bw[vports_tc_node->tc] = vports_tc_node->bw_share;
+ memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw));
}
static void
@@ -628,6 +628,7 @@ esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
u8 tc = vports_tc_node->tc;
u32 bw_share;
+ tc_arbiter_node->tc_bw[tc] = tc_bw[tc];
bw_share = tc_bw[tc] * fw_max_bw_share;
bw_share = esw_qos_calc_bw_share(bw_share, divider,
fw_max_bw_share);
@@ -786,48 +787,15 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
return err;
}
- if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack);
- } else {
- /* The eswitch doesn't support scheduling nodes.
- * Create a software-only node0 using the root TSAR to attach vport QoS to.
- */
- if (!__esw_qos_alloc_node(esw,
- esw->qos.root_tsar_ix,
- SCHED_NODE_TYPE_VPORTS_TSAR,
- NULL))
- esw->qos.node0 = ERR_PTR(-ENOMEM);
- else
- list_add_tail(&esw->qos.node0->entry,
- &esw->qos.domain->nodes);
- }
- if (IS_ERR(esw->qos.node0)) {
- err = PTR_ERR(esw->qos.node0);
- esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err);
- goto err_node0;
- }
refcount_set(&esw->qos.refcnt, 1);
return 0;
-
-err_node0:
- if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
- esw->qos.root_tsar_ix))
- esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
-
- return err;
}
static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
int err;
- if (esw->qos.node0->ix != esw->qos.root_tsar_ix)
- __esw_qos_destroy_node(esw->qos.node0, NULL);
- else
- __esw_qos_free_node(esw->qos.node0);
- esw->qos.node0 = NULL;
-
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix);
@@ -990,13 +958,16 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- int err, new_level, max_level;
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
+ int err;
if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ int new_level, max_level;
+
/* Increase the parent's level by 2 to account for both the
* TC arbiter and the vports TC scheduling element.
*/
- new_level = vport_node->parent->level + 2;
+ new_level = (parent ? parent->level : 2) + 2;
max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
log_esw_max_sched_depth);
if (new_level > max_level) {
@@ -1033,9 +1004,7 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
err_sched_nodes:
if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
esw_qos_node_destroy_sched_element(vport_node, NULL);
- list_add_tail(&vport_node->entry,
- &vport_node->parent->children);
- vport_node->level = vport_node->parent->level + 1;
+ esw_qos_node_attach_to_parent(vport_node);
} else {
esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL);
}
@@ -1083,7 +1052,6 @@ err_out:
static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- struct mlx5_esw_sched_node *parent = vport_node->parent;
enum sched_node_type curr_type = vport_node->type;
if (curr_type == SCHED_NODE_TYPE_VPORT)
@@ -1092,8 +1060,9 @@ static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_a
esw_qos_vport_tc_disable(vport, extack);
vport_node->bw_share = 0;
+ memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw));
list_del_init(&vport_node->entry);
- esw_qos_normalize_min_rate(parent->esw, parent, extack);
+ esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack);
trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
}
@@ -1103,25 +1072,23 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
int err;
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
- esw_qos_node_set_parent(vport->qos.sched_node, parent);
- if (type == SCHED_NODE_TYPE_VPORT) {
- err = esw_qos_vport_create_sched_element(vport->qos.sched_node,
- extack);
- } else {
+ esw_qos_node_set_parent(vport_node, parent);
+ if (type == SCHED_NODE_TYPE_VPORT)
+ err = esw_qos_vport_create_sched_element(vport_node, extack);
+ else
err = esw_qos_vport_tc_enable(vport, type, extack);
- }
if (err)
return err;
- vport->qos.sched_node->type = type;
- esw_qos_normalize_min_rate(parent->esw, parent, extack);
- trace_mlx5_esw_vport_qos_create(vport->dev, vport,
- vport->qos.sched_node->max_rate,
- vport->qos.sched_node->bw_share);
+ vport_node->type = type;
+ esw_qos_normalize_min_rate(vport_node->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate,
+ vport_node->bw_share);
return 0;
}
@@ -1132,6 +1099,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
struct mlx5_esw_sched_node *sched_node;
+ struct mlx5_eswitch *parent_esw;
int err;
esw_assert_qos_lock_held(esw);
@@ -1139,10 +1107,14 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
if (err)
return err;
- parent = parent ?: esw->qos.node0;
- sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent);
- if (!sched_node)
+ parent_esw = parent ? parent->esw : esw;
+ sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent);
+ if (!sched_node) {
+ esw_qos_put(esw);
return -ENOMEM;
+ }
+ if (!parent)
+ list_add_tail(&sched_node->entry, &esw->qos.domain->nodes);
sched_node->max_rate = max_rate;
sched_node->min_rate = min_rate;
@@ -1150,6 +1122,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
vport->qos.sched_node = sched_node;
err = esw_qos_vport_enable(vport, type, parent, extack);
if (err) {
+ __esw_qos_free_node(sched_node);
esw_qos_put(esw);
vport->qos.sched_node = NULL;
}
@@ -1157,6 +1130,19 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
return err;
}
+static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw_assert_qos_lock_held(esw);
+ if (!vport->qos.sched_node)
+ return;
+
+ esw_qos_vport_disable(vport, NULL);
+ mlx5_esw_qos_vport_qos_free(vport);
+ esw_qos_put(esw);
+}
+
void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
@@ -1168,11 +1154,9 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
goto unlock;
parent = vport->qos.sched_node->parent;
- WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node");
+ WARN(parent, "Disabling QoS on port before detaching it from node");
- esw_qos_vport_disable(vport, NULL);
- mlx5_esw_qos_vport_qos_free(vport);
- esw_qos_put(esw);
+ mlx5_esw_qos_vport_disable_locked(vport);
unlock:
esw_qos_unlock(esw);
}
@@ -1262,13 +1246,13 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_sched_node *curr_parent = vport->qos.sched_node->parent;
- enum sched_node_type curr_type = vport->qos.sched_node->type;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *curr_parent = vport_node->parent;
+ enum sched_node_type curr_type = vport_node->type;
u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
int err;
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
- parent = parent ?: curr_parent;
if (curr_type == type && curr_parent == parent)
return 0;
@@ -1276,10 +1260,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
if (err)
return err;
- if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
- esw_qos_tc_arbiter_get_bw_shares(vport->qos.sched_node,
- curr_tc_bw);
- }
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
+ esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw);
esw_qos_vport_disable(vport, extack);
@@ -1290,8 +1272,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
}
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
- esw_qos_set_tc_arbiter_bw_shares(vport->qos.sched_node,
- curr_tc_bw, extack);
+ esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw,
+ extack);
}
return err;
@@ -1306,16 +1288,16 @@ static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw
esw_assert_qos_lock_held(esw);
curr_parent = vport->qos.sched_node->parent;
- parent = parent ?: esw->qos.node0;
if (curr_parent == parent)
return 0;
/* Set vport QoS type based on parent node type if different from
* default QoS; otherwise, use the vport's current QoS type.
*/
- if (parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
type = SCHED_NODE_TYPE_RATE_LIMITER;
- else if (curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ else if (curr_parent &&
+ curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
type = SCHED_NODE_TYPE_VPORT;
else
type = vport->qos.sched_node->type;
@@ -1654,9 +1636,10 @@ static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
u32 *tc_bw)
{
- struct mlx5_eswitch *esw = vport->qos.sched_node ?
- vport->qos.sched_node->parent->esw :
- vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *node = vport->qos.sched_node;
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw = (node && node->parent) ? node->parent->esw : esw;
return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
}
@@ -1673,6 +1656,21 @@ static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
return true;
}
+static void esw_vport_qos_prune_empty(struct mlx5_vport *vport)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+ if (!vport_node)
+ return;
+
+ if (vport_node->parent || vport_node->max_rate ||
+ vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw))
+ return;
+
+ mlx5_esw_qos_vport_disable_locked(vport);
+}
+
int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
{
if (esw->qos.domain)
@@ -1706,6 +1704,10 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
esw_qos_lock(esw);
err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
+ if (err)
+ goto out;
+ esw_vport_qos_prune_empty(vport);
+out:
esw_qos_unlock(esw);
return err;
}
@@ -1727,6 +1729,10 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
esw_qos_lock(esw);
err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
+ if (err)
+ goto out;
+ esw_vport_qos_prune_empty(vport);
+out:
esw_qos_unlock(esw);
return err;
}
@@ -1763,7 +1769,8 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
if (disable) {
if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
- NULL, extack);
+ vport_node->parent, extack);
+ esw_vport_qos_prune_empty(vport);
goto unlock;
}
@@ -1775,7 +1782,7 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
} else {
err = esw_qos_vport_update(vport,
SCHED_NODE_TYPE_TC_ARBITER_TSAR,
- NULL, extack);
+ vport_node->parent, extack);
}
if (!err)
esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
@@ -1924,14 +1931,20 @@ int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
void *priv, void *parent_priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_sched_node *node;
+ struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL;
struct mlx5_vport *vport = priv;
+ int err;
- if (!parent)
- return mlx5_esw_qos_vport_update_parent(vport, NULL, extack);
+ err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
+ if (!err) {
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw_qos_lock(esw);
+ esw_vport_qos_prune_empty(vport);
+ esw_qos_unlock(esw);
+ }
- node = parent_priv;
- return mlx5_esw_qos_vport_update_parent(vport, node, extack);
+ return err;
}
static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b0b8ef3ec3c4..45506ad56847 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -373,11 +373,6 @@ struct mlx5_eswitch {
refcount_t refcnt;
u32 root_tsar_ix;
struct mlx5_qos_domain *domain;
- /* Contains all vports with QoS enabled but no explicit node.
- * Cannot be NULL if QoS is enabled, but may be a fake node
- * referencing the root TSAR if the esw doesn't support nodes.
- */
- struct mlx5_esw_sched_node *node0;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d87392360dbd..cb165085a4c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3734,6 +3734,13 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
char *value = val.vstr;
u8 eswitch_mode;
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Changing fs mode is not supported when eswitch offloads enabled.");
+ return -EOPNOTSUPP;
+ }
+
if (!strcmp(value, "dmfs"))
return 0;
@@ -3759,14 +3766,6 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
return -EINVAL;
}
- eswitch_mode = mlx5_eswitch_mode(dev);
- if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
- NL_SET_ERR_MSG_FMT_MOD(extack,
- "Moving to %s is not supported when eswitch offloads enabled.",
- value);
- return -EOPNOTSUPP;
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 69933addd921..22995131824a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -6,13 +6,15 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
#include "lib/tout.h"
+#include "sf/sf.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
- MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
+ MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED,
+ MLX5_FW_RESET_FLAGS_UNLOAD_EVENT,
};
struct mlx5_fw_reset {
@@ -219,7 +221,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
}
-static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
+static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
struct devlink *devlink = priv_to_devlink(dev);
@@ -228,8 +230,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- if (!unloaded)
- mlx5_unload_one(dev, false);
+ mlx5_sync_reset_unload_flow(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
@@ -272,7 +273,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@@ -428,6 +429,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
return false;
}
+ if (!mlx5_core_is_ecpf(dev) && !mlx5_sf_table_empty(dev)) {
+ mlx5_core_warn(dev, "SFs should be removed before reset\n");
+ return false;
+ }
+
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
err = mlx5_check_hotplug_interrupt(dev, bridge);
@@ -586,6 +592,65 @@ static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
return err;
}
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ unsigned long timeout;
+ int poll_freq = 20;
+ bool reset_action;
+ u8 rst_state;
+ int err;
+
+ if (locked)
+ mlx5_unload_one_devl_locked(dev, false);
+ else
+ mlx5_unload_one(dev, false);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags))
+ return;
+
+ mlx5_set_fw_rst_ack(dev);
+ mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
+
+ reset_action = false;
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
+ do {
+ rst_state = mlx5_get_fw_rst_state(dev);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
+ rst_state == MLX5_FW_RST_STATE_IDLE) {
+ reset_action = true;
+ break;
+ }
+ if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
+ mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
+ mlx5_set_fw_rst_ack(dev);
+ poll_freq = 1000;
+ }
+ msleep(poll_freq);
+ } while (!time_after(jiffies, timeout));
+
+ if (!reset_action) {
+ mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
+ rst_state);
+ fw_reset->ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n",
+ rst_state);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n",
+ err);
+ fw_reset->ret = err;
+ }
+ }
+
+done:
+ clear_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
+}
+
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -613,17 +678,13 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
mlx5_enter_error_state(dev, true);
done:
fw_reset->ret = err;
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_unload_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_dev *dev;
- unsigned long timeout;
- int poll_freq = 20;
- bool reset_action;
- u8 rst_state;
int err;
fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
@@ -632,6 +693,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
if (mlx5_sync_reset_clear_reset_requested(dev, false))
return;
+ set_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
err = mlx5_cmd_fast_teardown_hca(dev);
@@ -640,49 +702,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
else
mlx5_enter_error_state(dev, true);
- if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
- mlx5_unload_one_devl_locked(dev, false);
- else
- mlx5_unload_one(dev, false);
-
- mlx5_set_fw_rst_ack(dev);
- mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
-
- reset_action = false;
- timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
- do {
- rst_state = mlx5_get_fw_rst_state(dev);
- if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
- rst_state == MLX5_FW_RST_STATE_IDLE) {
- reset_action = true;
- break;
- }
- if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
- mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
- mlx5_set_fw_rst_ack(dev);
- poll_freq = 1000;
- }
- msleep(poll_freq);
- } while (!time_after(jiffies, timeout));
-
- if (!reset_action) {
- mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
- rst_state);
- fw_reset->ret = -ETIMEDOUT;
- goto done;
- }
-
- mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
- if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
- err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
- if (err) {
- mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
- fw_reset->ret = err;
- }
- }
-
-done:
- mlx5_fw_reset_complete_reload(dev, true);
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_abort_event(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index ea527d06a85f..d5b28525c960 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -12,6 +12,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked);
int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
struct netlink_ext_ack *extack);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b6d53db27cd5..9d3504f5abfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -367,6 +367,8 @@ int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
+ u8 *buffer_ownership);
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 549f1066d2a5..2d7adf7444ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -968,6 +968,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
return err;
}
+int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
+ u8 *buffer_ownership)
+{
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
+ int err;
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
+ *buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
+ return 0;
+ }
+
+ err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
+ if (err)
+ return err;
+
+ *buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
+
+ return 0;
+}
+
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
{
int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 0864ba625c07..3304f25cc805 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -518,3 +518,13 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
+
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return true;
+
+ return xa_empty(&table->function_ids);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 860f9ddb7107..89559a37997a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -17,6 +17,7 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *add_attr,
@@ -61,6 +62,11 @@ static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
{
}
+static inline bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index 396804369b00..6b36a4a7d895 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -117,7 +117,7 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
ret = -EINVAL;
- goto unlock_and_out;
+ goto free_shared_stc;
}
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 92de4b761a83..adeccc588e5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -74,9 +74,9 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
static int
hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- bool move_error = false, poll_error = false, drain_error = false;
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ int drain_error = 0, move_error = 0, poll_error = 0;
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
struct mlx5hws_rule_attr rule_attr;
struct mlx5hws_bwc_rule *bwc_rule;
@@ -84,6 +84,7 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
struct list_head *rules_list;
u32 pending_rules;
int i, ret = 0;
+ bool drain;
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
@@ -99,23 +100,37 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
ret = mlx5hws_matcher_resize_rule_move(matcher,
bwc_rule->rule,
&rule_attr);
- if (unlikely(ret && !move_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
- ret);
- move_error = true;
+ if (unlikely(ret)) {
+ if (!move_error) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = ret;
+ }
+ /* Rule wasn't queued, no need to poll */
+ continue;
}
pending_rules++;
+ drain = pending_rules >=
+ hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
ret = mlx5hws_bwc_queue_poll(ctx,
rule_attr.queue_id,
&pending_rules,
- false);
- if (unlikely(ret && !poll_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n",
- ret);
- poll_error = true;
+ drain);
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: timeout polling for completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!poll_error) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = ret;
+ }
}
}
@@ -126,17 +141,30 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
rule_attr.queue_id,
&pending_rules,
true);
- if (unlikely(ret && !drain_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n",
- ret);
- drain_error = true;
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving bwc rule: timeout draining completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!drain_error) {
+ mlx5hws_err(ctx,
+ "Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n",
+ ret);
+ drain_error = ret;
+ }
}
}
}
- if (move_error || poll_error || drain_error)
- ret = -EINVAL;
+ /* Return the first error that happened */
+ if (unlikely(move_error))
+ return move_error;
+ if (unlikely(poll_error))
+ return poll_error;
+ if (unlikely(drain_error))
+ return drain_error;
return ret;
}
@@ -1035,6 +1063,21 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
return 0; /* rule inserted successfully */
}
+ /* Rule insertion could fail due to queue being full, timeout, or
+ * matcher in resize. In such cases, no point in trying to rehash.
+ */
+ if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx,
+ "BWC rule insertion failed - %s (%d)\n",
+ ret == -EBUSY ? "queue is full" :
+ ret == -ETIMEDOUT ? "timeout" :
+ ret == -EAGAIN ? "matcher in resize" : "N/A",
+ ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+ return ret;
+ }
+
/* At this point the rule wasn't added.
* It could be because there was collision, or some other problem.
* Try rehash by size and insert rule again - last chance.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index ca7501c57468..14e79579c719 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -1328,11 +1328,11 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
- bool move_error = false, poll_error = false;
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
struct mlx5hws_bwc_rule *tmp_bwc_rule;
struct mlx5hws_rule_attr rule_attr;
struct mlx5hws_table *isolated_tbl;
+ int move_error = 0, poll_error = 0;
struct mlx5hws_rule *tmp_rule;
struct list_head *rules_list;
u32 expected_completions = 1;
@@ -1391,11 +1391,15 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
ret = mlx5hws_matcher_resize_rule_move(matcher,
tmp_rule,
&rule_attr);
- if (unlikely(ret && !move_error)) {
- mlx5hws_err(ctx,
- "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n",
- ret);
- move_error = true;
+ if (unlikely(ret)) {
+ if (!move_error) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = ret;
+ }
+ /* Rule wasn't queued, no need to poll */
+ continue;
}
expected_completions = 1;
@@ -1403,11 +1407,19 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
rule_attr.queue_id,
&expected_completions,
true);
- if (unlikely(ret && !poll_error)) {
- mlx5hws_err(ctx,
- "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n",
- ret);
- poll_error = true;
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: timeout polling for completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!poll_error) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = ret;
+ }
}
/* Done moving the rule to the new matcher,
@@ -1422,8 +1434,11 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
}
}
- if (move_error || poll_error)
- ret = -EINVAL;
+ /* Return the first error that happened */
+ if (unlikely(move_error))
+ return move_error;
+ if (unlikely(poll_error))
+ return poll_error;
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index 9c83753e4592..0bdcab2e5cf3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -55,6 +55,7 @@ int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index fa6bff210266..122ccc671628 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -36,6 +36,7 @@ struct mlx5hws_cmd_set_fte_attr {
struct mlx5hws_cmd_ft_create_attr {
u8 type;
u8 level;
+ u16 uid;
bool rtc_valid;
bool decap_en;
bool reformat_en;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 57592b92e24b..131e74b2b774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -267,6 +267,7 @@ static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
tbl_attr.level = ft_attr->level;
+ tbl_attr.uid = ft_attr->uid;
tbl = mlx5hws_table_create(ctx, &tbl_attr);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index f3ea09caba2b..32f87fdf3213 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -85,6 +85,7 @@ static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
tbl,
+ 0,
&matcher->end_ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
@@ -112,7 +113,9 @@ static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
if (mlx5hws_matcher_is_isolated(matcher))
ret = hws_matcher_create_end_ft_isolated(matcher);
else
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl,
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ 0,
&matcher->end_ft_id);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index 59c14745ed0c..2498ceff2060 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -75,6 +75,7 @@ struct mlx5hws_context_attr {
struct mlx5hws_table_attr {
enum mlx5hws_table_type type;
u32 level;
+ u16 uid;
};
enum mlx5hws_matcher_flow_src {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index 51e4c551e0ef..d56271a9e4f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -279,7 +279,7 @@ int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
return ret;
clean_pattern:
- mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
out_unlock:
mutex_unlock(&ctx->pattern_cache->lock);
return ret;
@@ -527,7 +527,6 @@ int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
u32 *nop_locations, __be64 *new_pat)
{
u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
- u16 src_field, dst_field;
u8 action_type;
bool dependent;
size_t i, j;
@@ -539,6 +538,9 @@ int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
return 0;
for (i = 0, j = 0; i < num_actions; i++, j++) {
+ u16 src_field = INVALID_FIELD;
+ u16 dst_field = INVALID_FIELD;
+
if (j >= max_actions)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index 7e37d6e9eb83..7b5071c3df36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -124,6 +124,7 @@ static int hws_pool_buddy_init(struct mlx5hws_pool *pool)
mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n",
pool->type, pool->alloc_log_sz);
mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index c4b22be19a9b..b0595c9b09e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -964,7 +964,6 @@ static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
return -ENOMEM;
MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
- MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index 568f691733f3..6113383ae47b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -9,6 +9,7 @@ u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
}
static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ u16 uid,
struct mlx5hws_cmd_ft_create_attr *ft_attr)
{
ft_attr->type = tbl->fw_ft_type;
@@ -16,7 +17,9 @@ static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
else
ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+
ft_attr->rtc_valid = true;
+ ft_attr->uid = uid;
}
static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
@@ -119,12 +122,12 @@ static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32
int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
struct mlx5hws_table *tbl,
- u32 *ft_id)
+ u16 uid, u32 *ft_id)
{
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
int ret;
- hws_table_init_next_ft_attr(tbl, &ft_attr);
+ hws_table_init_next_ft_attr(tbl, uid, &ft_attr);
hws_table_set_cap_attr(tbl, &ft_attr);
ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
@@ -189,7 +192,10 @@ static int hws_table_init(struct mlx5hws_table *tbl)
}
mutex_lock(&ctx->ctrl_lock);
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ tbl->uid,
+ &tbl->ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
mutex_unlock(&ctx->ctrl_lock);
@@ -239,6 +245,7 @@ struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
tbl->ctx = ctx;
tbl->type = attr->type;
tbl->level = attr->level;
+ tbl->uid = attr->uid;
ret = hws_table_init(tbl);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
index 0400cce0c317..1246f9bd8422 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
@@ -18,6 +18,7 @@ struct mlx5hws_table {
enum mlx5hws_table_type type;
u32 fw_ft_type;
u32 level;
+ u16 uid;
struct list_head matchers_list;
struct list_head tbl_list_node;
struct mlx5hws_default_miss default_miss;
@@ -47,7 +48,7 @@ u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
struct mlx5hws_table *tbl,
- u32 *ft_id);
+ u16 uid, u32 *ft_id);
void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
u32 ft_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 618957d65663..9a2d64a0a858 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2375,6 +2375,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 80ee5c4825dc..9962dc157901 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -94,6 +94,7 @@ enum {
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 7bd7812d9c06..40581550da1a 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -33,7 +33,7 @@ int __fbnic_open(struct fbnic_net *fbn)
dev_warn(fbd->dev,
"Error %d sending host ownership message to the firmware\n",
err);
- goto free_resources;
+ goto err_reset_queues;
}
err = fbnic_time_start(fbn);
@@ -52,11 +52,15 @@ int __fbnic_open(struct fbnic_net *fbn)
fbnic_bmc_rpc_init(fbd);
fbnic_rss_reinit(fbd, fbn);
+ phylink_resume(fbn->phylink);
+
return 0;
time_stop:
fbnic_time_stop(fbn);
release_ownership:
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+err_reset_queues:
+ fbnic_reset_netif_queues(fbn);
free_resources:
fbnic_free_resources(fbn);
free_napi_vectors:
@@ -82,6 +86,8 @@ static int fbnic_stop(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
+
fbnic_down(fbn);
fbnic_pcs_free_irq(fbn->fbd);
@@ -420,15 +426,17 @@ static void fbnic_get_stats64(struct net_device *dev,
tx_packets = stats->packets;
tx_dropped = stats->dropped;
- stats64->tx_bytes = tx_bytes;
- stats64->tx_packets = tx_packets;
- stats64->tx_dropped = tx_dropped;
-
/* Record drops from Tx HW Datapath */
+ spin_lock(&fbd->hw_stats_lock);
tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
fbd->hw_stats.tti.cm_drop.frames.value +
fbd->hw_stats.tti.frame_drop.frames.value +
fbd->hw_stats.tti.tbi_drop.frames.value;
+ spin_unlock(&fbd->hw_stats_lock);
+
+ stats64->tx_bytes = tx_bytes;
+ stats64->tx_packets = tx_packets;
+ stats64->tx_dropped = tx_dropped;
for (i = 0; i < fbn->num_tx_queues; i++) {
struct fbnic_ring *txr = fbn->tx[i];
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index b70e4cadb37b..28e23e3ffca8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -118,14 +118,12 @@ static void fbnic_service_task_start(struct fbnic_net *fbn)
struct fbnic_dev *fbd = fbn->fbd;
schedule_delayed_work(&fbd->service_task, HZ);
- phylink_resume(fbn->phylink);
}
static void fbnic_service_task_stop(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd));
cancel_delayed_work(&fbd->service_task);
}
@@ -443,11 +441,10 @@ static int __fbnic_pm_resume(struct device *dev)
/* Re-enable mailbox */
err = fbnic_fw_request_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
if (err)
goto err_free_irqs;
- devl_unlock(priv_to_devlink(fbd));
-
/* Only send log history if log buffer is empty to prevent duplicate
* log entries.
*/
@@ -464,20 +461,20 @@ static int __fbnic_pm_resume(struct device *dev)
rtnl_lock();
- if (netif_running(netdev)) {
+ if (netif_running(netdev))
err = __fbnic_open(fbn);
- if (err)
- goto err_free_mbx;
- }
rtnl_unlock();
+ if (err)
+ goto err_free_mbx;
return 0;
err_free_mbx:
fbnic_fw_log_disable(fbd);
- rtnl_unlock();
+ devl_lock(priv_to_devlink(fbd));
fbnic_fw_free_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
err_free_irqs:
fbnic_free_irqs(fbd);
err_invalidate_uc_addr:
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index ac11389a764c..f9543d03485f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -661,8 +661,8 @@ static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
- rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
+ page_pool_fragment_page(page, FBNIC_PAGECNT_BIAS_MAX);
+ rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
rx_buf->page = page;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 2e361d6f03ff..34693596e5eb 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -91,10 +91,8 @@ struct fbnic_queue_stats {
struct u64_stats_sync syncp;
};
-/* Pagecnt bias is long max to reserve the last bit to catch overflow
- * cases where if we overcharge the bias it will flip over to be negative.
- */
-#define PAGECNT_BIAS_MAX LONG_MAX
+#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
+
struct fbnic_rx_buf {
struct page *page;
long pagecnt_bias;
diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
index dd436bdff0f8..84c41f193561 100644
--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
+++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
@@ -32,6 +32,10 @@
/* MAC Specific Addr 1 Top Reg */
#define LAN865X_REG_MAC_H_SADDR1 0x00010023
+/* MAC TSU Timer Increment Register */
+#define LAN865X_REG_MAC_TSU_TIMER_INCR 0x00010077
+#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x0028
+
struct lan865x_priv {
struct work_struct multicast_work;
struct net_device *netdev;
@@ -311,6 +315,8 @@ static int lan865x_net_open(struct net_device *netdev)
phy_start(netdev->phydev);
+ netif_start_queue(netdev);
+
return 0;
}
@@ -344,6 +350,21 @@ static int lan865x_probe(struct spi_device *spi)
goto free_netdev;
}
+ /* LAN865x Rev.B0/B1 configuration parameters from AN1760
+ * As per the Configuration Application Note AN1760 published in the
+ * link, https://www.microchip.com/en-us/application-notes/an1760
+ * Revision F (DS60001760G - June 2024), configure the MAC to set time
+ * stamping at the end of the Start of Frame Delimiter (SFD) and set the
+ * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock.
+ */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR,
+ MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n",
+ ret);
+ goto oa_tc6_exit;
+ }
+
/* As per the point s3 in the below errata, SPI receive Ethernet frame
* transfer may halt when starting the next frame in the same data block
* (chunk) as the end of a previous frame. The RFA field should be
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 20decdeb9fdb..b9209eb6ea73 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -241,7 +241,7 @@ union rtase_rx_desc {
#define RTASE_RX_RES BIT(20)
#define RTASE_RX_RUNT BIT(19)
#define RTASE_RX_RWT BIT(18)
-#define RTASE_RX_CRC BIT(16)
+#define RTASE_RX_CRC BIT(17)
#define RTASE_RX_V6F BIT(31)
#define RTASE_RX_V4F BIT(30)
#define RTASE_RX_UDPT BIT(29)
diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c
index 2258f854e5be..e872f926e438 100644
--- a/drivers/net/ethernet/sfc/tc_encap_actions.c
+++ b/drivers/net/ethernet/sfc/tc_encap_actions.c
@@ -442,7 +442,7 @@ static void efx_tc_update_encap(struct efx_nic *efx,
rule = container_of(acts, struct efx_tc_flow_rule, acts);
if (rule->fallback)
fallback = rule->fallback;
- else /* fallback: deliver to PF */
+ else /* fallback of the fallback: deliver to PF */
fallback = &efx->tc->facts.pf;
rc = efx_mae_update_rule(efx, fallback->fw_id,
rule->fw_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 09ae16e026eb..6c363f9b0ce2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -330,15 +330,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
+ ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks);
if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n");
+ return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n");
plat_dat->num_clks = ret;
- ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
-
plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
data->stmmac_clk_name);
@@ -346,7 +342,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
- clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
return ret;
}
@@ -370,15 +365,11 @@ remove:
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
- struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
if (data->remove)
data->remove(pdev);
-
- if (plat_dat)
- clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 79b92130a03f..f6687c2f30f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1765,11 +1765,15 @@ err_gmac_powerdown:
static void rk_gmac_remove(struct platform_device *pdev)
{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev));
+ struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
rk_gmac_powerdown(bsp_priv);
+
+ if (priv->plat->phy_node && bsp_priv->integrated_phy)
+ clk_put(bsp_priv->clk_phy);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index c72ee759aae5..6c6c49e4b66f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -152,7 +152,7 @@ static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
{
struct thead_dwmac *dwmac = plat->bsp_priv;
- u32 reg;
+ u32 reg, div;
switch (plat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
@@ -164,6 +164,13 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
/* use pll */
+ div = clk_get_rate(plat->stmmac_clk) / rgmii_clock(SPEED_1000);
+ reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
+ FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
+
+ writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
+ writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
+
writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL);
reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN |
GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN;
@@ -211,6 +218,7 @@ static int thead_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct plat_stmmacenet_data *plat;
struct thead_dwmac *dwmac;
+ struct clk *apb_clk;
void __iomem *apb;
int ret;
@@ -224,6 +232,19 @@ static int thead_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(plat),
"dt configuration failed\n");
+ /*
+ * The APB clock is essential for accessing glue registers. However,
+ * old devicetrees don't describe it correctly. We continue to probe
+ * and emit a warning if it isn't present.
+ */
+ apb_clk = devm_clk_get_enabled(&pdev->dev, "apb");
+ if (PTR_ERR(apb_clk) == -ENOENT)
+ dev_warn(&pdev->dev,
+ "cannot get apb clock, link may break after speed changes\n");
+ else if (IS_ERR(apb_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk),
+ "failed to get apb clock\n");
+
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 6cadf8de4fdf..00e929bf280b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -49,6 +49,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
+static void dwxgmac2_update_caps(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.mbps_10_100)
+ priv->hw->link.caps &= ~(MAC_10 | MAC_100);
+ else if (!priv->dma_cap.half_duplex)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
+}
+
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -1424,6 +1432,7 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
+ .update_caps = dwxgmac2_update_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1532,8 +1541,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_1000FD | MAC_2500FD | MAC_5000FD |
- MAC_10000FD;
+ MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 5dcc95bc0ad2..4d6bb995d8d8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -203,10 +203,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
}
writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
-
- /* Enable MTL RX overflow */
- value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
- writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
}
static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -386,8 +382,11 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
struct dma_features *dma_cap)
{
+ struct stmmac_priv *priv;
u32 hw_cap;
+ priv = container_of(dma_cap, struct stmmac_priv, dma_cap);
+
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
@@ -410,6 +409,8 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+ if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20)
+ dma_cap->mbps_10_100 = 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f1abf4242cd2..7b16d1207b80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2584,6 +2584,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2671,7 +2672,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdp_desc.len);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
@@ -4983,6 +4984,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
{
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -5034,7 +5036,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_set_desc_addr(priv, tx_desc, dma_addr);
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdpf->len);
tx_q->tx_count_frames++;
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 2a1c43316f46..d8c9fe1d98c4 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -621,7 +621,8 @@ exit:
static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
{
- u32 val, cap, ret = 0;
+ u32 val, cap;
+ int ret = 0;
mutex_lock(&iep->ptp_clk_mutex);
@@ -685,10 +686,16 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
struct platform_device *pdev;
struct device_node *iep_np;
struct icss_iep *iep;
+ int ret;
iep_np = of_parse_phandle(np, "ti,iep", idx);
- if (!iep_np || !of_device_is_available(iep_np))
+ if (!iep_np)
+ return ERR_PTR(-ENODEV);
+
+ if (!of_device_is_available(iep_np)) {
+ of_node_put(iep_np);
return ERR_PTR(-ENODEV);
+ }
pdev = of_find_device_by_node(iep_np);
of_node_put(iep_np);
@@ -698,21 +705,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
return ERR_PTR(-EPROBE_DEFER);
iep = platform_get_drvdata(pdev);
- if (!iep)
- return ERR_PTR(-EPROBE_DEFER);
+ if (!iep) {
+ ret = -EPROBE_DEFER;
+ goto err_put_pdev;
+ }
device_lock(iep->dev);
if (iep->client_np) {
device_unlock(iep->dev);
dev_err(iep->dev, "IEP is already acquired by %s",
iep->client_np->name);
- return ERR_PTR(-EBUSY);
+ ret = -EBUSY;
+ goto err_put_pdev;
}
iep->client_np = np;
device_unlock(iep->dev);
- get_device(iep->dev);
return iep;
+
+err_put_pdev:
+ put_device(&pdev->dev);
+
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(icss_iep_get_idx);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 12f25cec6255..57e5f1c88f50 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -706,9 +706,9 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
struct page_pool *pool;
struct sk_buff *skb;
struct xdp_buff xdp;
+ int headroom, ret;
u32 *psdata;
void *pa;
- int ret;
*xdp_state = 0;
pool = rx_chn->pg_pool;
@@ -757,22 +757,23 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
*xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
- if (*xdp_state == ICSSG_XDP_PASS)
- skb = xdp_build_skb_from_buff(&xdp);
- else
+ if (*xdp_state != ICSSG_XDP_PASS)
goto requeue;
+ headroom = xdp.data - xdp.data_hard_start;
+ pkt_len = xdp.data_end - xdp.data;
} else {
- /* prepare skb and send to n/w stack */
- skb = napi_build_skb(pa, PAGE_SIZE);
+ headroom = PRUETH_HEADROOM;
}
+ /* prepare skb and send to n/w stack */
+ skb = napi_build_skb(pa, PAGE_SIZE);
if (!skb) {
ndev->stats.rx_dropped++;
page_pool_recycle_direct(pool, page);
goto requeue;
}
- skb_reserve(skb, PRUETH_HEADROOM);
+ skb_reserve(skb, headroom);
skb_put(skb, pkt_len);
skb->dev = ndev;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 2b973d6e2341..dadce6009791 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -50,6 +50,8 @@
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
+static void emac_adjust_link(struct net_device *ndev);
+
static int emac_get_tx_ts(struct prueth_emac *emac,
struct emac_tx_ts_response *rsp)
{
@@ -201,6 +203,44 @@ static void prueth_emac_stop(struct prueth *prueth)
}
}
+static void icssg_enable_fw_offload(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int mac;
+
+ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+ emac = prueth->emac[mac];
+ if (prueth->is_hsr_offload_mode) {
+ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+ else
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+ }
+
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
+ if (netif_running(emac->ndev)) {
+ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK,
+ true);
+ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+ BIT(emac->port_id) | DEFAULT_PORT_MASK,
+ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+ true);
+ if (prueth->is_hsr_offload_mode)
+ icssg_vtbl_modify(emac, DEFAULT_VID,
+ DEFAULT_PORT_MASK,
+ DEFAULT_UNTAG_MASK, true);
+ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+ if (prueth->is_switch_mode)
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ }
+ }
+ }
+}
+
static int prueth_emac_common_start(struct prueth *prueth)
{
struct prueth_emac *emac;
@@ -229,6 +269,10 @@ static int prueth_emac_common_start(struct prueth *prueth)
ret = icssg_config(prueth, emac, slice);
if (ret)
goto disable_class;
+
+ mutex_lock(&emac->ndev->phydev->lock);
+ emac_adjust_link(emac->ndev);
+ mutex_unlock(&emac->ndev->phydev->lock);
}
ret = prueth_emac_start(prueth);
@@ -747,6 +791,7 @@ static int emac_ndo_open(struct net_device *ndev)
ret = prueth_emac_common_start(prueth);
if (ret)
goto free_rx_irq;
+ icssg_enable_fw_offload(prueth);
}
flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
@@ -1354,8 +1399,7 @@ static int prueth_emac_restart(struct prueth *prueth)
static void icssg_change_mode(struct prueth *prueth)
{
- struct prueth_emac *emac;
- int mac, ret;
+ int ret;
ret = prueth_emac_restart(prueth);
if (ret) {
@@ -1363,35 +1407,7 @@ static void icssg_change_mode(struct prueth *prueth)
return;
}
- for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
- emac = prueth->emac[mac];
- if (prueth->is_hsr_offload_mode) {
- if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
- else
- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
- }
-
- if (netif_running(emac->ndev)) {
- icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK,
- true);
- icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
- BIT(emac->port_id) | DEFAULT_PORT_MASK,
- BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
- true);
- if (prueth->is_hsr_offload_mode)
- icssg_vtbl_modify(emac, DEFAULT_VID,
- DEFAULT_PORT_MASK,
- DEFAULT_UNTAG_MASK, true);
- icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
- if (prueth->is_switch_mode)
- icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
- }
- }
+ icssg_enable_fw_offload(prueth);
}
static int prueth_netdevice_port_link(struct net_device *ndev,
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
index 5d48df7a849f..3023ea2732ef 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
@@ -192,7 +192,7 @@ void wx_setup_vfmrqc_vf(struct wx *wx)
u8 i, j;
/* Fill out hash function seeds */
- netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
+ netdev_rss_key_fill(wx->rss_key, WX_RSS_KEY_SIZE);
for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++)
wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 6011d7eae0c7..0d8a05fe541a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1160,6 +1160,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
struct axienet_local *lp = data;
struct sk_buff *skb;
u32 *app_metadata;
+ int i;
skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
skb = skbuf_dma->skb;
@@ -1178,7 +1179,10 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
u64_stats_add(&lp->rx_packets, 1);
u64_stats_add(&lp->rx_bytes, rx_len);
u64_stats_update_end(&lp->rx_stat_sync);
- axienet_rx_submit_desc(lp->ndev);
+
+ for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
+ RX_BUF_NUM_DEFAULT); i++)
+ axienet_rx_submit_desc(lp->ndev);
dma_async_issue_pending(lp->rx_chan);
}
@@ -1457,7 +1461,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
if (!skbuf_dma)
return;
- lp->rx_ring_head++;
skb = netdev_alloc_skb(ndev, lp->max_frm_size);
if (!skb)
return;
@@ -1482,6 +1485,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
skbuf_dma->desc = dma_rx_desc;
dma_rx_desc->callback_param = lp;
dma_rx_desc->callback_result = axienet_dma_rx_cb;
+ lp->rx_ring_head++;
dmaengine_submit(dma_rx_desc);
return;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 0e0fe32d2da4..045c5177262e 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -138,7 +138,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
static inline int dev_is_ethdev(struct net_device *dev)
{
- return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
+ return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index cb6f5482d203..7397c693f984 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -1061,6 +1061,7 @@ struct net_device_context {
struct net_device __rcu *vf_netdev;
struct netvsc_vf_pcpu_stats __percpu *vf_stats;
struct delayed_work vf_takeover;
+ struct delayed_work vfns_work;
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
@@ -1075,6 +1076,8 @@ struct net_device_context {
struct netvsc_device_info *saved_netvsc_dev_info;
};
+void netvsc_vfns_work(struct work_struct *w);
+
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
* packets. We can use ethtool to change UDP hash level when necessary.
*/
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 720104661d7f..60a4629fe6ba 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1812,6 +1812,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
/* Enable NAPI handler before init callbacks */
netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
+ napi_enable(&net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
+ &net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
+ &net_device->chan_table[0].napi);
/* Open the channel */
device->channel->next_request_id_callback = vmbus_next_request_id;
@@ -1831,12 +1836,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
/* Channel is opened */
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
- napi_enable(&net_device->chan_table[0].napi);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
- &net_device->chan_table[0].napi);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
- &net_device->chan_table[0].napi);
-
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device, device_info);
if (ret != 0) {
@@ -1854,14 +1853,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
close:
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
- napi_disable(&net_device->chan_table[0].napi);
/* Now, we can close the channel safely */
vmbus_close(device->channel);
cleanup:
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+ napi_disable(&net_device->chan_table[0].napi);
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f44753756358..39c892e46cb0 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2522,6 +2522,7 @@ static int netvsc_probe(struct hv_device *dev,
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
+ INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work);
net_device_ctx->vf_stats
= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
@@ -2666,6 +2667,8 @@ static void netvsc_remove(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
+ cancel_delayed_work_sync(&ndev_ctx->vfns_work);
+
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev) {
cancel_work_sync(&nvdev->subchan_work);
@@ -2707,6 +2710,7 @@ static int netvsc_suspend(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
+ cancel_delayed_work_sync(&ndev_ctx->vfns_work);
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev == NULL) {
@@ -2800,6 +2804,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev)
}
}
+void netvsc_vfns_work(struct work_struct *w)
+{
+ struct net_device_context *ndev_ctx =
+ container_of(w, struct net_device_context, vfns_work.work);
+ struct net_device *ndev;
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&ndev_ctx->vfns_work, 1);
+ return;
+ }
+
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (!ndev)
+ goto out;
+
+ netvsc_event_set_vf_ns(ndev);
+
+out:
+ rtnl_unlock();
+}
+
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first
@@ -2810,10 +2835,12 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ struct net_device_context *ndev_ctx;
int ret = 0;
if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
- netvsc_event_set_vf_ns(event_dev);
+ ndev_ctx = netdev_priv(event_dev);
+ schedule_delayed_work(&ndev_ctx->vfns_work, 0);
return NOTIFY_DONE;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 9e73959e61ee..c35f9685b6bf 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1252,17 +1252,26 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+ /* Enable napi before opening the vmbus channel to avoid races
+ * as the host placing data on the host->guest ring may be left
+ * out if napi was not enabled.
+ */
+ napi_enable(&nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ &nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ &nvchan->napi);
+
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
- if (ret == 0) {
- napi_enable(&nvchan->napi);
- netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
- &nvchan->napi);
- netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
- &nvchan->napi);
- } else {
+ if (ret != 0) {
netdev_notice(ndev, "sub channel open failed: %d\n", ret);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
+ napi_disable(&nvchan->napi);
}
if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 6782c2cbf542..01d219d3760c 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -5,7 +5,7 @@ config QCOM_IPA
depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
- select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_MDT_LOADER
select QCOM_SCM
select QCOM_QMI_HELPERS
help
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index a59bd215494c..a53e9e6f6cdf 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -37,8 +37,12 @@ static const char *ipa_version_string(struct ipa *ipa)
return "4.11";
case IPA_VERSION_5_0:
return "5.0";
+ case IPA_VERSION_5_1:
+ return "5.1";
+ case IPA_VERSION_5_5:
+ return "5.5";
default:
- return "0.0"; /* Won't happen (checked at probe time) */
+ return "0.0"; /* Should not happen */
}
}
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index b6e30bdf5325..7baab230008a 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -209,10 +209,9 @@ static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
if (ret)
return ret;
- if (!priv->clk)
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
rate = 250000000;
- else
- rate = clk_get_rate(priv->clk);
div = (rate / (2 * priv->clk_freq)) - 1;
if (div & ~MDIO_CLK_DIV_MASK) {
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 39fe28af48b9..0178219f0db5 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -710,9 +710,13 @@ static struct nsim_rq *nsim_queue_alloc(void)
static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq)
{
hrtimer_cancel(&rq->napi_timer);
- local_bh_disable();
- dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
- local_bh_enable();
+
+ if (rq->skb_queue.qlen) {
+ local_bh_disable();
+ dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
+ local_bh_enable();
+ }
+
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
kfree(rq);
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index fda2e27c1810..cad6ed3aa10b 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -91,6 +91,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
return -EINVAL;
+ gpiod_put(mdiodev->reset_gpio);
reset_control_put(mdiodev->reset_ctrl);
mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c
index 48dc4bf85125..f43973e73ea3 100644
--- a/drivers/net/phy/mdio_bus_provider.c
+++ b/drivers/net/phy/mdio_bus_provider.c
@@ -443,9 +443,6 @@ void mdiobus_unregister(struct mii_bus *bus)
if (!mdiodev)
continue;
- if (mdiodev->reset_gpio)
- gpiod_put(mdiodev->reset_gpio);
-
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 6a3d8a754eb8..2bfe314ef881 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -362,6 +362,13 @@ struct vsc85xx_hw_stat {
u16 mask;
};
+struct vsc8531_skb_cb {
+ u32 ns;
+};
+
+#define VSC8531_SKB_CB(skb) \
+ ((struct vsc8531_skb_cb *)((skb)->cb))
+
struct vsc8531_private {
int rate_magic;
u16 supp_led_modes;
@@ -410,6 +417,11 @@ struct vsc8531_private {
*/
struct mutex ts_lock;
struct mutex phc_lock;
+
+ /* list of skbs that were received and need timestamp information but it
+ * didn't received it yet
+ */
+ struct sk_buff_head rx_skbs_list;
};
/* Shared structure between the PHYs of the same package.
@@ -469,6 +481,7 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev)
void vsc85xx_link_change_notify(struct phy_device *phydev);
void vsc8584_config_ts_intr(struct phy_device *phydev);
int vsc8584_ptp_init(struct phy_device *phydev);
+void vsc8584_ptp_deinit(struct phy_device *phydev);
int vsc8584_ptp_probe_once(struct phy_device *phydev);
int vsc8584_ptp_probe(struct phy_device *phydev);
irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev);
@@ -483,6 +496,9 @@ static inline int vsc8584_ptp_init(struct phy_device *phydev)
{
return 0;
}
+static inline void vsc8584_ptp_deinit(struct phy_device *phydev)
+{
+}
static inline int vsc8584_ptp_probe_once(struct phy_device *phydev)
{
return 0;
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 7ed6522fb0ef..24c75903f535 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2335,6 +2335,11 @@ static int vsc85xx_probe(struct phy_device *phydev)
return vsc85xx_dt_led_modes_get(phydev, default_mode);
}
+static void vsc85xx_remove(struct phy_device *phydev)
+{
+ vsc8584_ptp_deinit(phydev);
+}
+
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
@@ -2589,6 +2594,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8574_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
@@ -2614,6 +2620,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8574_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
@@ -2639,6 +2646,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
@@ -2662,6 +2670,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
@@ -2685,6 +2694,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 6b800081eed5..72847320cb65 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -900,6 +900,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
get_unaligned_be32(ptp_multicast));
} else {
val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
+ val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
vsc85xx_ts_write_csr(phydev, blk,
MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
vsc85xx_ts_write_csr(phydev, blk,
@@ -1193,9 +1194,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
{
struct vsc8531_private *vsc8531 =
container_of(mii_ts, struct vsc8531_private, mii_ts);
- struct skb_shared_hwtstamps *shhwtstamps = NULL;
struct vsc85xx_ptphdr *ptphdr;
- struct timespec64 ts;
unsigned long ns;
if (!vsc8531->ptp->configured)
@@ -1205,27 +1204,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
type == PTP_CLASS_NONE)
return false;
- vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
-
ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
if (!ptphdr)
return false;
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
-
ns = ntohl(ptphdr->rsrvd2);
- /* nsec is in reserved field */
- if (ts.tv_nsec < ns)
- ts.tv_sec--;
+ VSC8531_SKB_CB(skb)->ns = ns;
+ skb_queue_tail(&vsc8531->rx_skbs_list, skb);
- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
- netif_rx(skb);
+ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
return true;
}
+static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
+{
+ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+ struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct phy_device *phydev = ptp->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct sk_buff_head received;
+ struct sk_buff *rx_skb;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ __skb_queue_head_init(&received);
+ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
+ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
+ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
+
+ vsc85xx_gettime(info, &ts);
+ while ((rx_skb = __skb_dequeue(&received)) != NULL) {
+ shhwtstamps = skb_hwtstamps(rx_skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+
+ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
+ ts.tv_sec--;
+
+ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
+ VSC8531_SKB_CB(rx_skb)->ns);
+ netif_rx(rx_skb);
+ }
+
+ return -1;
+}
+
static const struct ptp_clock_info vsc85xx_clk_caps = {
.owner = THIS_MODULE,
.name = "VSC85xx timer",
@@ -1239,6 +1263,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
.adjfine = &vsc85xx_adjfine,
.gettime64 = &vsc85xx_gettime,
.settime64 = &vsc85xx_settime,
+ .do_aux_work = &vsc85xx_do_aux_work,
};
static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
@@ -1273,7 +1298,6 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
static int __vsc8584_init_ptp(struct phy_device *phydev)
{
- struct vsc8531_private *vsc8531 = phydev->priv;
static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 };
u32 val;
@@ -1490,17 +1514,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev)
vsc85xx_ts_eth_cmp1_sig(phydev);
- vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
- vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
- vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
- vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
- phydev->mii_ts = &vsc8531->mii_ts;
-
- memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
-
- vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
- &phydev->mdio.dev);
- return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
+ return 0;
}
void vsc8584_config_ts_intr(struct phy_device *phydev)
@@ -1527,6 +1541,16 @@ int vsc8584_ptp_init(struct phy_device *phydev)
return 0;
}
+void vsc8584_ptp_deinit(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+
+ if (vsc8531->ptp->ptp_clock) {
+ ptp_clock_unregister(vsc8531->ptp->ptp_clock);
+ skb_queue_purge(&vsc8531->rx_skbs_list);
+ }
+}
+
irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
{
struct vsc8531_private *priv = phydev->priv;
@@ -1566,6 +1590,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
mutex_init(&vsc8531->phc_lock);
mutex_init(&vsc8531->ts_lock);
+ skb_queue_head_init(&vsc8531->rx_skbs_list);
/* Retrieve the shared load/save GPIO. Request it as non exclusive as
* the same GPIO can be requested by all the PHYs of the same package.
@@ -1586,7 +1611,16 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
vsc8531->ptp->phydev = phydev;
- return 0;
+ vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
+ vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
+ vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
+ vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
+ phydev->mii_ts = &vsc8531->mii_ts;
+
+ memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
+ vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
+ &phydev->mdio.dev);
+ return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
}
int vsc8584_ptp_probe_once(struct phy_device *phydev)
diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
index da3465360e90..ae9ad925bfa8 100644
--- a/drivers/net/phy/mscc/mscc_ptp.h
+++ b/drivers/net/phy/mscc/mscc_ptp.h
@@ -98,6 +98,7 @@
#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
#define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK GENMASK(22, 20)
#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST 0x400000
+#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST 0x200000
#define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR 0x100000
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK GENMASK(17, 16)
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST 0x020000
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 4c6d905f0a9f..87adb6508017 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1965,24 +1965,27 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev)
return macsec_ability;
}
+static bool tja11xx_phy_id_compare(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
+{
+ u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] :
+ phydev->phy_id;
+
+ return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask);
+}
+
static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
- phydrv->phy_id_mask))
- return 0;
-
- return !nxp_c45_macsec_ability(phydev);
+ return tja11xx_phy_id_compare(phydev, phydrv) &&
+ !nxp_c45_macsec_ability(phydev);
}
static int tja11xx_macsec_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
- phydrv->phy_id_mask))
- return 0;
-
- return nxp_c45_macsec_ability(phydev);
+ return tja11xx_phy_id_compare(phydev, phydrv) &&
+ nxp_c45_macsec_ability(phydev);
}
static const struct nxp_c45_regmap tja1120_regmap = {
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 04e84ebb646c..070dc8c00835 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -427,7 +427,7 @@ static int qca807x_gpio(struct phy_device *phydev)
gc->get_direction = qca807x_gpio_get_direction;
gc->direction_output = qca807x_gpio_dir_out;
gc->get = qca807x_gpio_get;
- gc->set_rv = qca807x_gpio_set;
+ gc->set = qca807x_gpio_set;
return devm_gpiochip_add_data(dev, gc, priv);
}
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index b6489da5cfcd..48487149c225 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -785,6 +785,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* PHY_BASIC_FEATURES */
+ .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
/* basic functions */
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 8c98cbd4b06d..824c8dc4120b 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -33,6 +33,7 @@
#include <linux/ppp_channel.h>
#include <linux/ppp-comp.h>
#include <linux/skbuff.h>
+#include <linux/rculist.h>
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
@@ -1598,11 +1599,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
if (ppp->flags & SC_MULTILINK)
return -EOPNOTSUPP;
- if (list_empty(&ppp->channels))
+ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
+ if (!pch)
+ return -ENODEV;
+
+ chan = READ_ONCE(pch->chan);
+ if (!chan)
return -ENODEV;
- pch = list_first_entry(&ppp->channels, struct channel, clist);
- chan = pch->chan;
if (!chan->ops->fill_forward_path)
return -EOPNOTSUPP;
@@ -2994,7 +2998,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
- pch->chan = NULL;
+ WRITE_ONCE(pch->chan, NULL);
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
@@ -3515,7 +3519,7 @@ ppp_connect_channel(struct channel *pch, int unit)
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
if (hdrlen > ppp->dev->hard_header_len)
ppp->dev->hard_header_len = hdrlen;
- list_add_tail(&pch->clist, &ppp->channels);
+ list_add_tail_rcu(&pch->clist, &ppp->channels);
++ppp->n_channels;
pch->ppp = ppp;
refcount_inc(&ppp->file.refcnt);
@@ -3545,10 +3549,11 @@ ppp_disconnect_channel(struct channel *pch)
if (ppp) {
/* remove it from the ppp unit's list */
ppp_lock(ppp);
- list_del(&pch->clist);
+ list_del_rcu(&pch->clist);
if (--ppp->n_channels == 0)
wake_up_interruptible(&ppp->file.rwait);
ppp_unlock(ppp);
+ synchronize_net();
if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
err = 0;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 5feaa70b5f47..90737cb71892 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -159,19 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
int len;
unsigned char *data;
__u32 seq_recv;
-
-
struct rtable *rt;
struct net_device *tdev;
struct iphdr *iph;
int max_headroom;
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
- goto tx_error;
+ goto tx_drop;
rt = pptp_route_output(po, &fl4);
if (IS_ERR(rt))
- goto tx_error;
+ goto tx_drop;
tdev = rt->dst.dev;
@@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
- if (!new_skb) {
- ip_rt_put(rt);
+
+ if (!new_skb)
goto tx_error;
- }
+
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
consume_skb(skb);
skb = new_skb;
}
+ /* Ensure we can safely access protocol field and LCP code */
+ if (!pskb_may_pull(skb, 3))
+ goto tx_error;
+
data = skb->data;
islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
@@ -262,6 +264,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
return 1;
tx_error:
+ ip_rt_put(rt);
+tx_drop:
kfree_skb(skb);
return 1;
}
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index 399ce9febda4..f4e91ba64a66 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -1041,6 +1041,10 @@ pd692x0_configure_managers(struct pd692x0_priv *priv, int nmanagers)
int pw_budget;
pw_budget = regulator_get_unclaimed_power_budget(supply);
+ if (!pw_budget)
+ /* Do nothing if no power budget */
+ continue;
+
/* Max power budget per manager */
if (pw_budget > 6000000)
pw_budget = 6000000;
@@ -1162,12 +1166,44 @@ pd692x0_write_ports_matrix(struct pd692x0_priv *priv,
return 0;
}
+static void pd692x0_of_put_managers(struct pd692x0_priv *priv,
+ struct pd692x0_manager *manager,
+ int nmanagers)
+{
+ int i, j;
+
+ for (i = 0; i < nmanagers; i++) {
+ for (j = 0; j < manager[i].nports; j++)
+ of_node_put(manager[i].port_node[j]);
+ of_node_put(manager[i].node);
+ }
+}
+
+static void pd692x0_managers_free_pw_budget(struct pd692x0_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < PD692X0_MAX_MANAGERS; i++) {
+ struct regulator *supply;
+
+ if (!priv->manager_reg[i] || !priv->manager_pw_budget[i])
+ continue;
+
+ supply = priv->manager_reg[i]->supply;
+ if (!supply)
+ continue;
+
+ regulator_free_power_budget(supply,
+ priv->manager_pw_budget[i]);
+ }
+}
+
static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
{
struct pd692x0_manager *manager __free(kfree) = NULL;
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS];
- int ret, i, j, nmanagers;
+ int ret, nmanagers;
/* Should we flash the port matrix */
if (priv->fw_state != PD692X0_FW_OK &&
@@ -1185,31 +1221,27 @@ static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
nmanagers = ret;
ret = pd692x0_register_managers_regulator(priv, manager, nmanagers);
if (ret)
- goto out;
+ goto err_of_managers;
ret = pd692x0_configure_managers(priv, nmanagers);
if (ret)
- goto out;
+ goto err_of_managers;
ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix);
if (ret)
- goto out;
+ goto err_managers_req_pw;
ret = pd692x0_write_ports_matrix(priv, port_matrix);
if (ret)
- goto out;
-
-out:
- for (i = 0; i < nmanagers; i++) {
- struct regulator *supply = priv->manager_reg[i]->supply;
+ goto err_managers_req_pw;
- regulator_free_power_budget(supply,
- priv->manager_pw_budget[i]);
+ pd692x0_of_put_managers(priv, manager, nmanagers);
+ return 0;
- for (j = 0; j < manager[i].nports; j++)
- of_node_put(manager[i].port_node[j]);
- of_node_put(manager[i].node);
- }
+err_managers_req_pw:
+ pd692x0_managers_free_pw_budget(priv);
+err_of_managers:
+ pd692x0_of_put_managers(priv, manager, nmanagers);
return ret;
}
@@ -1748,6 +1780,7 @@ static void pd692x0_i2c_remove(struct i2c_client *client)
{
struct pd692x0_priv *priv = i2c_get_clientdata(client);
+ pd692x0_managers_free_pw_budget(priv);
firmware_upload_unregister(priv->fwl);
}
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 9b0318fb50b5..792ddda1ad49 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -676,6 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
priv->mdio->read = &asix_mdio_bus_read;
priv->mdio->write = &asix_mdio_bus_write;
priv->mdio->name = "Asix MDIO Bus";
+ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
/* mii bus name is usb-<usb bus number>-<usb device number> */
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index ea0e5e276cd6..5d123df0a866 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -2087,6 +2087,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_info,
},
+ /* Intel modem (label from OEM reads Fibocom L850-GL) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* DisplayLink docking stations */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f5647ee0adde..11352d85475a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1355,12 +1355,16 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1034, 2)}, /* Telit LE910C4-WWX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1037, 4)}, /* Telit LE910C4-WWX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1038, 3)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)}, /* Telit FN990A w/audio */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a38ffbf4b3f0..511c4154cf74 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1120,6 +1120,9 @@ static void __handle_link_change(struct usbnet *dev)
if (!test_bit(EVENT_DEV_OPEN, &dev->flags))
return;
+ if (test_and_clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags))
+ netif_carrier_on(dev->net);
+
if (!netif_carrier_ok(dev->net)) {
/* kill URBs for reading packets to save bus bandwidth */
unlink_urbs(dev, &dev->rxq);
@@ -1129,9 +1132,6 @@ static void __handle_link_change(struct usbnet *dev)
* tx queue is stopped by netcore after link becomes off
*/
} else {
- if (test_and_clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags))
- netif_carrier_on(dev->net);
-
/* submitting URBs for reading packets */
queue_work(system_bh_wq, &dev->bh_work);
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d14e6d602273..975bdc5dab84 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -5758,14 +5758,15 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
disable_rx_mode_work(vi);
flush_work(&vi->rx_mode_work);
- netif_tx_lock_bh(vi->dev);
- netif_device_detach(vi->dev);
- netif_tx_unlock_bh(vi->dev);
if (netif_running(vi->dev)) {
rtnl_lock();
virtnet_close(vi->dev);
rtnl_unlock();
}
+
+ netif_tx_lock_bh(vi->dev);
+ netif_device_detach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 995a7207bdf8..f357a7ac70ac 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -81,7 +81,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
static __inline__ int dev_is_ethdev(struct net_device *dev)
{
- return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
+ return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c
index eeecfa3d10c5..9656254c1c6c 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_trace.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c
@@ -51,8 +51,7 @@ static int ipc_trace_remove_buf_file_handler(struct dentry *dentry)
}
static int ipc_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
+ void *prev_subbuf)
{
if (relay_buf_full(buf)) {
pr_err_ratelimited("Relay_buf full dropping traces");
diff --git a/drivers/net/wwan/t7xx/t7xx_port_trace.c b/drivers/net/wwan/t7xx/t7xx_port_trace.c
index 4ed8b4e29bf1..f16d3b01302c 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_trace.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -33,7 +33,7 @@ static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry)
}
static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
- void *prev_subbuf, size_t prev_padding)
+ void *prev_subbuf)
{
if (relay_buf_full(buf)) {
pr_err_ratelimited("Relay_buf full dropping traces");
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index f6ddbe553289..201fc8809a62 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -742,7 +742,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
"%s: qid %d failed to generate digest, error %d\n",
__func__, chap->qid, ret);
goto out_free_psk;
- };
+ }
dev_dbg(ctrl->device, "%s: generated digest %s\n",
__func__, digest);
ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
@@ -752,7 +752,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
"%s: qid %d failed to derive TLS psk, error %d\n",
__func__, chap->qid, ret);
goto out_free_digest;
- };
+ }
tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
ctrl->opts->host->nqn,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9d988f4cb87a..812c1565114f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3158,6 +3158,11 @@ static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
return ctrl->opts && ctrl->opts->discovery_nqn;
}
+static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->cntrltype == NVME_CTRL_ADMIN;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -3670,6 +3675,17 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
if (ret)
return ret;
+ if (nvme_admin_ctrl(ctrl)) {
+ /*
+ * An admin controller has one admin queue, but no I/O queues.
+ * Override queue_count so it only creates an admin queue.
+ */
+ dev_dbg(ctrl->device,
+ "Subsystem %s is an administrative controller",
+ ctrl->subsys->subnqn);
+ ctrl->queue_count = 1;
+ }
+
ret = nvme_configure_apst(ctrl);
if (ret < 0)
return ret;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 08a5ea3e9383..3e12d4683ac7 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1363,7 +1363,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
* down, and the related FC-NVME Association ID and Connection IDs
* become invalid.
*
- * The behavior of the fc-nvme initiator is such that it's
+ * The behavior of the fc-nvme initiator is such that its
* understanding of the association and connections will implicitly
* be torn down. The action is implicit as it may be due to a loss of
* connectivity with the fc-nvme target, so you may never get a
@@ -2777,7 +2777,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
* as WRITE ZEROES will return a non-zero rq payload_bytes yet
* there is no actual payload to be transferred.
* To get it right, key data transmission on there being 1 or
- * more physical segments in the sg list. If there is no
+ * more physical segments in the sg list. If there are no
* physical segments, there is no payload.
*/
if (blk_rq_nr_phys_segments(rq)) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 071efec25346..2c6d9506b172 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -935,7 +935,7 @@ static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
if (unlikely(iter->status))
- nvme_free_sgls(req);
+ nvme_unmap_data(req);
return iter->status;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 9233f088fac8..c0fe8cfb7229 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2179,7 +2179,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
/*
* Only start IO queues for which we have allocated the tagset
- * and limitted it to the available queues. On reconnects, the
+ * and limited it to the available queues. On reconnects, the
* queue number might have changed.
*/
nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 884286f90688..0dd7bd99afa3 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1960,24 +1960,24 @@ static int __init nvmet_init(void)
if (!nvmet_wq)
goto out_free_buffered_work_queue;
- error = nvmet_init_discovery();
+ error = nvmet_init_debugfs();
if (error)
goto out_free_nvmet_work_queue;
- error = nvmet_init_debugfs();
+ error = nvmet_init_discovery();
if (error)
- goto out_exit_discovery;
+ goto out_exit_debugfs;
error = nvmet_init_configfs();
if (error)
- goto out_exit_debugfs;
+ goto out_exit_discovery;
return 0;
-out_exit_debugfs:
- nvmet_exit_debugfs();
out_exit_discovery:
nvmet_exit_discovery();
+out_exit_debugfs:
+ nvmet_exit_debugfs();
out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
@@ -1992,8 +1992,8 @@ out_destroy_bvec_cache:
static void __exit nvmet_exit(void)
{
nvmet_exit_configfs();
- nvmet_exit_debugfs();
nvmet_exit_discovery();
+ nvmet_exit_debugfs();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 25598a46bf0d..a9b18c051f5b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -459,7 +459,7 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
* down, and the related FC-NVME Association ID and Connection IDs
* become invalid.
*
- * The behavior of the fc-nvme target is such that it's
+ * The behavior of the fc-nvme target is such that its
* understanding of the association and connections will implicitly
* be torn down. The action is implicit as it may be due to a loss of
* connectivity with the fc-nvme host, so the target may never get a
@@ -2313,7 +2313,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
if (ret) {
/*
- * should be ok to set w/o lock as its in the thread of
+ * should be ok to set w/o lock as it's in the thread of
* execution (not an async timer routine) and doesn't
* contend with any clearing action
*/
@@ -2629,7 +2629,7 @@ transport_error:
* and the api of the FC LLDD which may issue a hw command to send the
* response, but the LLDD may not get the hw completion for that command
* and upcall the nvmet_fc layer before a new command may be
- * asynchronously received - its possible for a command to be received
+ * asynchronously received - it's possible for a command to be received
* before the LLDD and nvmet_fc have recycled the job structure. It gives
* the appearance of more commands received than fits in the sq.
* To alleviate this scenario, a temporary queue is maintained in the
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 3b4b0df8f879..0c361b1e3566 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -533,6 +533,8 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
case NVME_FEAT_HOST_ID:
req->execute = nvmet_execute_get_features;
return NVME_SC_SUCCESS;
+ case NVME_FEAT_FDP:
+ return nvmet_setup_passthru_command(req);
default:
return nvmet_passthru_get_set_features(req);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 67f61c67c167..0485e25ab797 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1731,7 +1731,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
* We registered an ib_client to handle device removal for queues,
* so we only need to handle the listening port cm_ids. In this case
* we nullify the priv to prevent double cm_id destruction and destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
+ * the cm_id implicitly by returning a non-zero rc to the callout.
*/
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
@@ -1742,7 +1742,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* This is a queue cm_id. we have registered
* an ib_client to handle queues removal
- * so don't interfear and just return.
+ * so don't interfere and just return.
*/
return 0;
}
@@ -1760,7 +1760,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* We need to return 1 so that the core will destroy
- * it's own ID. What a great API design..
+ * its own ID. What a great API design..
*/
return 1;
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c80426510ec2..f7e75e527667 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -17,8 +17,8 @@
/**
* of_match_device - Tell if a struct device matches an of_device_id list
- * @matches: array of of device match structures to search in
- * @dev: the of device structure to match against
+ * @matches: array of of_device_id match structures to search in
+ * @dev: the OF device structure to match against
*
* Used by a driver to check whether an platform_device present in the
* system is in its list of supported devices.
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 0aba760f7577..2eaaddcb0ec4 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -935,10 +935,15 @@ static int of_changeset_add_prop_helper(struct of_changeset *ocs,
return -ENOMEM;
ret = of_changeset_add_property(ocs, np, new_pp);
- if (ret)
+ if (ret) {
__of_prop_free(new_pp);
+ return ret;
+ }
- return ret;
+ new_pp->next = np->deadprops;
+ np->deadprops = new_pp;
+
+ return 0;
}
/**
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 77016c0cc296..2e9ea751ed2d 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -25,6 +25,7 @@
#include <linux/memblock.h>
#include <linux/kmemleak.h>
#include <linux/cma.h>
+#include <linux/dma-map-ops.h>
#include "of_private.h"
@@ -175,13 +176,17 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
- if (size &&
- early_init_dt_reserve_memory(base, size, nomap) == 0)
+ if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
+ /* Architecture specific contiguous memory fixup. */
+ if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
+ of_get_flat_dt_prop(node, "reusable", NULL))
+ dma_contiguous_early_fixup(base, size);
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
- else
+ } else {
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
+ }
len -= t_len;
}
@@ -472,7 +477,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
-
+ /* Architecture specific contiguous memory fixup. */
+ if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
+ of_get_flat_dt_prop(node, "reusable", NULL))
+ dma_contiguous_early_fixup(base, size);
/* Save region in the reserved_mem array */
fdt_reserved_mem_save_node(node, uname, base, size);
return 0;
@@ -771,6 +779,7 @@ int of_reserved_mem_region_to_resource(const struct device_node *np,
return -EINVAL;
resource_set_range(res, rmem->base, rmem->size);
+ res->flags = IORESOURCE_MEM;
res->name = rmem->name;
return 0;
}
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 69048869ef1c..b77fd30bbfd9 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -341,7 +341,6 @@ void pci_bus_add_device(struct pci_dev *dev)
{
struct device_node *dn = dev->dev.of_node;
struct platform_device *pdev;
- int retval;
/*
* Can not put in pci_device_add yet because resources
@@ -372,9 +371,7 @@ void pci_bus_add_device(struct pci_dev *dev)
if (!dn || of_device_is_available(dn))
pci_dev_allow_binding(dev);
- retval = device_attach(&dev->dev);
- if (retval < 0 && retval != -EPROBE_DEFER)
- pci_warn(dev, "device attach failed (%d)\n", retval);
+ device_initial_probe(&dev->dev);
pci_dev_assign_added(dev);
}
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 886f6f43a895..41748d083b93 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -13,6 +13,7 @@ config PCI_AARDVARK
depends on OF
depends on PCI_MSI
select PCI_BRIDGE_EMUL
+ select IRQ_MSI_LIB
help
Add support for Aardvark 64bit PCIe Host Controller. This
controller is part of the South Bridge of the Marvel Armada
@@ -29,6 +30,7 @@ config PCIE_ALTERA_MSI
tristate "Altera PCIe MSI feature"
depends on PCIE_ALTERA
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want PCIe MSI support for the Altera FPGA.
This MSI driver supports Altera MSI to GIC controller IP.
@@ -62,6 +64,7 @@ config PCIE_BRCMSTB
BMIPS_GENERIC || COMPILE_TEST
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
default ARCH_BRCMSTB || BMIPS_GENERIC
help
Say Y here to enable PCIe host controller support for
@@ -98,6 +101,7 @@ config PCIE_IPROC_MSI
bool "Broadcom iProc PCIe MSI support"
depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
depends on PCI_MSI
+ select IRQ_MSI_LIB
default ARCH_BCM_IPROC
help
Say Y here if you want to enable MSI support for Broadcom's iProc
@@ -152,6 +156,7 @@ config PCI_IXP4XX
config VMD
depends on PCI_MSI && X86_64 && !UML
tristate "Intel Volume Management Device Driver"
+ select IRQ_MSI_LIB
help
Adds support for the Intel Volume Management Device (VMD). VMD is a
secondary PCI host bridge that allows PCI Express root ports,
@@ -191,6 +196,7 @@ config PCIE_MEDIATEK
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
@@ -199,6 +205,7 @@ config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
@@ -237,6 +244,7 @@ config PCIE_RCAR_HOST
bool "Renesas R-Car PCIe controller (host mode)"
depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want PCIe controller support on R-Car SoCs in host
mode.
@@ -315,6 +323,7 @@ config PCIE_XILINX
bool "Xilinx AXI PCIe controller"
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -324,6 +333,7 @@ config PCIE_XILINX_DMA_PL
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
Say 'Y' here if you want kernel support for the Xilinx PL DMA
PCIe host bridge. The controller is a Soft IP which can act as
@@ -334,6 +344,7 @@ config PCIE_XILINX_NWL
bool "Xilinx NWL PCIe controller"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say 'Y' here if you want kernel support for Xilinx
NWL PCIe controller. The controller can act as Root Port
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 8ab6cf70c18e..77c5a19b2ab1 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -353,7 +353,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
}
spin_unlock_irqrestore(&ep->lock, flags);
- offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
+ offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) |
CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index a149845d341a..1d81c4bf6c6d 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -250,26 +250,6 @@ struct cdns_pcie_rp_ib_bar {
struct cdns_pcie;
-enum cdns_pcie_msg_routing {
- /* Route to Root Complex */
- MSG_ROUTING_TO_RC,
-
- /* Use Address Routing */
- MSG_ROUTING_BY_ADDR,
-
- /* Use ID Routing */
- MSG_ROUTING_BY_ID,
-
- /* Route as Broadcast Message from Root Complex */
- MSG_ROUTING_BCAST,
-
- /* Local message; terminate at receiver (INTx messages) */
- MSG_ROUTING_LOCAL,
-
- /* Gather & route to Root Complex (PME_TO_Ack message) */
- MSG_ROUTING_GATHER,
-};
-
struct cdns_pcie_ops {
int (*start_link)(struct cdns_pcie *pcie);
void (*stop_link)(struct cdns_pcie *pcie);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index d9f0386396ed..ff6b6d9e18ec 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -19,6 +19,7 @@ config PCIE_DW_DEBUGFS
config PCIE_DW_HOST
bool
select PCIE_DW
+ select IRQ_MSI_LIB
config PCIE_DW_EP
bool
@@ -296,6 +297,7 @@ config PCIE_QCOM
select PCIE_DW_HOST
select CRC8
select PCIE_QCOM_COMMON
+ select PCI_HOST_COMMON
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -402,6 +404,16 @@ config PCIE_UNIPHIER_EP
Say Y here if you want PCIe endpoint controller support on
UniPhier SoCs. This driver supports Pro5 SoC.
+config PCIE_SOPHGO_DW
+ bool "Sophgo DesignWare PCIe controller (host mode)"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on PCI_MSI
+ depends on OF
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe host controller support on
+ Sophgo SoCs.
+
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
depends on ARCH_SPEAR13XX || COMPILE_TEST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 908cb7f345db..6919d27798d1 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_SOPHGO_DW) += pcie-sophgo.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 5a38cfaf989b..80e48746bbaf 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -860,7 +860,6 @@ static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_assert(imx_pcie->pciephy_reset);
- reset_control_assert(imx_pcie->apps_reset);
if (imx_pcie->drvdata->core_reset)
imx_pcie->drvdata->core_reset(imx_pcie, true);
@@ -872,7 +871,6 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_deassert(imx_pcie->pciephy_reset);
- reset_control_deassert(imx_pcie->apps_reset);
if (imx_pcie->drvdata->core_reset)
imx_pcie->drvdata->core_reset(imx_pcie, false);
@@ -1063,7 +1061,10 @@ static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
data1 |= IMX95_PE0_LUT_VLD;
regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
- data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ data2 = 0x7; /* In the EP mode, only 'Device ID' is required */
+ else
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
@@ -1096,18 +1097,14 @@ static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
}
}
-static int imx_pcie_enable_device(struct pci_host_bridge *bridge,
- struct pci_dev *pdev)
+static int imx_pcie_add_lut_by_rid(struct imx_pcie *imx_pcie, u32 rid)
{
- struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
- u32 sid_i, sid_m, rid = pci_dev_id(pdev);
+ struct device *dev = imx_pcie->pci->dev;
struct device_node *target;
- struct device *dev;
+ u32 sid_i, sid_m;
int err_i, err_m;
u32 sid = 0;
- dev = imx_pcie->pci->dev;
-
target = NULL;
err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
&target, &sid_i);
@@ -1182,6 +1179,13 @@ static int imx_pcie_enable_device(struct pci_host_bridge *bridge,
return imx_pcie_add_lut(imx_pcie, rid, sid);
}
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+
+ return imx_pcie_add_lut_by_rid(imx_pcie, pci_dev_id(pdev));
+}
+
static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
struct pci_dev *pdev)
{
@@ -1247,6 +1251,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
}
}
+ /* Make sure that PCIe LTSSM is cleared */
+ imx_pcie_ltssm_disable(dev);
+
ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret < 0) {
dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
@@ -1385,6 +1392,8 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_64K,
};
@@ -1465,9 +1474,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
pci_epc_init_notify(ep->epc);
- /* Start LTSSM. */
- imx_pcie_ltssm_enable(dev);
-
return 0;
}
@@ -1764,6 +1770,12 @@ static int imx_pcie_probe(struct platform_device *pdev)
ret = imx_add_pcie_ep(imx_pcie, pdev);
if (ret < 0)
return ret;
+
+ /*
+ * FIXME: Only single Device (EPF) is supported due to the
+ * Endpoint framework limitation.
+ */
+ imx_pcie_add_lut_by_rid(imx_pcie, 0);
} else {
pci->pp.use_atu_msg = true;
ret = dw_pcie_host_init(&pci->pp);
@@ -1912,7 +1924,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- .epc_features = &imx8m_pcie_epc_features,
+ .epc_features = &imx8q_pcie_epc_features,
.init_phy = imx8mq_pcie_init_phy,
.enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
index c67601096c48..0fbf86c0b97e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-debugfs.c
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -814,14 +814,14 @@ static bool dw_pcie_ptm_context_update_visible(void *drvdata)
{
struct dw_pcie *pci = drvdata;
- return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+ return pci->mode == DW_PCIE_EP_TYPE;
}
static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
{
struct dw_pcie *pci = drvdata;
- return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+ return pci->mode == DW_PCIE_RC_TYPE;
}
static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
@@ -834,38 +834,38 @@ static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
{
struct dw_pcie *pci = drvdata;
- return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+ return pci->mode == DW_PCIE_EP_TYPE;
}
static bool dw_pcie_ptm_t1_visible(void *drvdata)
{
struct dw_pcie *pci = drvdata;
- return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+ return pci->mode == DW_PCIE_EP_TYPE;
}
static bool dw_pcie_ptm_t2_visible(void *drvdata)
{
struct dw_pcie *pci = drvdata;
- return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+ return pci->mode == DW_PCIE_RC_TYPE;
}
static bool dw_pcie_ptm_t3_visible(void *drvdata)
{
struct dw_pcie *pci = drvdata;
- return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+ return pci->mode == DW_PCIE_RC_TYPE;
}
static bool dw_pcie_ptm_t4_visible(void *drvdata)
{
struct dw_pcie *pci = drvdata;
- return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+ return pci->mode == DW_PCIE_EP_TYPE;
}
-const struct pcie_ptm_ops dw_pcie_ptm_ops = {
+static const struct pcie_ptm_ops dw_pcie_ptm_ops = {
.check_capability = dw_pcie_ptm_check_capability,
.context_update_write = dw_pcie_ptm_context_update_write,
.context_update_read = dw_pcie_ptm_context_update_read,
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 906277f9ffaf..952f8594b501 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -10,6 +10,7 @@
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_address.h>
@@ -23,35 +24,21 @@
static struct pci_ops dw_pcie_ops;
static struct pci_ops dw_child_pcie_ops;
-static void dw_msi_ack_irq(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void dw_msi_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void dw_msi_unmask_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip dw_pcie_msi_irq_chip = {
- .name = "PCI-MSI",
- .irq_ack = dw_msi_ack_irq,
- .irq_mask = dw_msi_mask_irq,
- .irq_unmask = dw_msi_unmask_irq,
-};
-
-static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
- MSI_FLAG_MULTI_PCI_MSI,
- .chip = &dw_pcie_msi_irq_chip,
+#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
+ .required_flags = DW_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "DW-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
/* MSI int handler */
@@ -227,30 +214,23 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_fwnode_handle(pci->dev->of_node);
-
- pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
- &dw_pcie_msi_domain_ops, pp);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pci->dev),
+ .ops = &dw_pcie_msi_domain_ops,
+ .size = pp->num_vectors,
+ .host_data = pp,
+ };
+
+ pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
if (!pp->irq_domain) {
dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
-
- pp->msi_domain = pci_msi_create_irq_domain(fwnode,
- &dw_pcie_msi_domain_info,
- pp->irq_domain);
- if (!pp->msi_domain) {
- dev_err(pci->dev, "Failed to create MSI domain\n");
- irq_domain_remove(pp->irq_domain);
- return -ENOMEM;
- }
-
return 0;
}
-static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
u32 ctrl;
@@ -260,22 +240,36 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
NULL, NULL);
}
- irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
}
+EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
-static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
+ u32 ctrl, num_ctrls;
if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+
/* Program the msi_data */
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
{
@@ -317,7 +311,7 @@ static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
return 0;
}
-static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -391,6 +385,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
{
@@ -909,7 +904,7 @@ static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u32 val, ctrl, num_ctrls;
+ u32 val;
int ret;
/*
@@ -920,20 +915,6 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
dw_pcie_setup(pci);
- if (pp->has_msi_ctrl) {
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
-
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- pp->irq_mask[ctrl]);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- ~0);
- }
- }
-
dw_pcie_msi_init(pp);
/* Setup RC BARs */
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 4d794964fa0f..89aad5a08928 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -702,18 +702,26 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
int retries;
/* Check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ for (retries = 0; retries < PCIE_LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pci))
break;
- msleep(LINK_WAIT_SLEEP_MS);
+ msleep(PCIE_LINK_WAIT_SLEEP_MS);
}
- if (retries >= LINK_WAIT_MAX_RETRIES) {
+ if (retries >= PCIE_LINK_WAIT_MAX_RETRIES) {
dev_info(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
+ /*
+ * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link
+ * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms
+ * after Link training completes before sending a Configuration Request.
+ */
+ if (pci->max_link_speed > 2)
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index ce9e18554e42..00f52d472dcd 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -62,10 +62,6 @@
#define dw_pcie_cap_set(_pci, _cap) \
set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_SLEEP_MS 90
-
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
@@ -417,7 +413,6 @@ struct dw_pcie_rp {
const struct dw_pcie_host_ops *ops;
int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
- struct irq_domain *msi_domain;
dma_addr_t msi_data;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
@@ -759,6 +754,9 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
int dw_pcie_suspend_noirq(struct dw_pcie *pci);
int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+void dw_pcie_msi_init(struct dw_pcie_rp *pp);
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_free_msi(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
@@ -781,6 +779,17 @@ static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
return IRQ_NONE;
}
+static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+{ }
+
+static inline int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ return -ENODEV;
+}
+
+static inline void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+{ }
+
static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 93171a392879..b5f5eee5a50e 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -58,6 +58,8 @@
/* Hot Reset Control Register */
#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_LTSSM_APP_DLY2_EN BIT(1)
+#define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
/* LTSSM Status Register */
@@ -458,6 +460,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
if (rockchip_pcie_link_up(pci)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
/* Rescan the bus to enumerate endpoint devices */
pci_lock_rescan_remove();
@@ -474,7 +477,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
struct rockchip_pcie *rockchip = arg;
struct dw_pcie *pci = &rockchip->pci;
struct device *dev = pci->dev;
- u32 reg;
+ u32 reg, val;
reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
@@ -485,6 +488,10 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
dev_dbg(dev, "hot reset or link-down reset\n");
dw_pcie_ep_linkdown(&pci->ep);
+ /* Stop delaying link training. */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_APP_DLY2_DONE);
+ rockchip_pcie_writel_apb(rockchip, val,
+ PCIE_CLIENT_HOT_RESET_CTRL);
}
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
@@ -566,8 +573,11 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
return ret;
}
- /* LTSSM enable control mode */
- val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ /*
+ * LTSSM enable control mode, and automatically delay link training on
+ * hot reset/link-down reset.
+ */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index c789e3f85655..294babe1816e 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -21,7 +21,9 @@
#include <linux/limits.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
@@ -34,6 +36,7 @@
#include <linux/units.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#include "pcie-designware.h"
#include "pcie-qcom-common.h"
@@ -255,13 +258,21 @@ struct qcom_pcie_ops {
* @ops: qcom PCIe ops structure
* @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
* snooping
+ * @firmware_managed: Set if the Root Complex is firmware managed
*/
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
bool override_no_snoop;
+ bool firmware_managed;
bool no_l0s;
};
+struct qcom_pcie_port {
+ struct list_head list;
+ struct gpio_desc *reset;
+ struct phy *phy;
+};
+
struct qcom_pcie {
struct dw_pcie *pci;
void __iomem *parf; /* DT parf */
@@ -274,24 +285,37 @@ struct qcom_pcie {
struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
struct dentry *debugfs;
+ struct list_head ports;
bool suspended;
bool use_pm_opp;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
-static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
{
- gpiod_set_value_cansleep(pcie->reset, 1);
+ struct qcom_pcie_port *port;
+ int val = assert ? 1 : 0;
+
+ if (list_empty(&pcie->ports))
+ gpiod_set_value_cansleep(pcie->reset, val);
+ else
+ list_for_each_entry(port, &pcie->ports, list)
+ gpiod_set_value_cansleep(port->reset, val);
+
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ qcom_perst_assert(pcie, true);
+}
+
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
/* Ensure that PERST has been asserted for at least 100 ms */
msleep(PCIE_T_PVPERL_MS);
- gpiod_set_value_cansleep(pcie->reset, 0);
- usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+ qcom_perst_assert(pcie, false);
}
static int qcom_pcie_start_link(struct dw_pcie *pci)
@@ -1229,6 +1253,59 @@ static bool qcom_pcie_link_up(struct dw_pcie *pci)
return val & PCI_EXP_LNKSTA_DLLLA;
}
+static void qcom_pcie_phy_exit(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+
+ if (list_empty(&pcie->ports))
+ phy_exit(pcie->phy);
+ else
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_exit(port->phy);
+}
+
+static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+
+ if (list_empty(&pcie->ports)) {
+ phy_power_off(pcie->phy);
+ } else {
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_power_off(port->phy);
+ }
+}
+
+static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+ int ret = 0;
+
+ if (list_empty(&pcie->ports)) {
+ ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(pcie->phy);
+ if (ret)
+ return ret;
+ } else {
+ list_for_each_entry(port, &pcie->ports, list) {
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ qcom_pcie_phy_power_off(pcie);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -1241,11 +1318,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
- if (ret)
- goto err_deinit;
-
- ret = phy_power_on(pcie->phy);
+ ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
@@ -1268,7 +1341,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
err_assert_reset:
qcom_ep_reset_assert(pcie);
err_disable_phy:
- phy_power_off(pcie->phy);
+ qcom_pcie_phy_power_off(pcie);
err_deinit:
pcie->cfg->ops->deinit(pcie);
@@ -1281,7 +1354,7 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
qcom_ep_reset_assert(pcie);
- phy_power_off(pcie->phy);
+ qcom_pcie_phy_power_off(pcie);
pcie->cfg->ops->deinit(pcie);
}
@@ -1426,6 +1499,10 @@ static const struct qcom_pcie_cfg cfg_sc8280xp = {
.no_l0s = true,
};
+static const struct qcom_pcie_cfg cfg_fw_managed = {
+ .firmware_managed = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1564,6 +1641,7 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
/* Rescan the bus to enumerate endpoint devices */
pci_lock_rescan_remove();
@@ -1579,10 +1657,128 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
+static void qcom_pci_free_msi(void *ptr)
+{
+ struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr;
+
+ if (pp && pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+}
+
+static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ int ret;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pp = &pci->pp;
+ pci->dbi_base = cfg->win;
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret)
+ return ret;
+
+ pp->has_msi_ctrl = true;
+ dw_pcie_msi_init(pp);
+
+ return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp);
+}
+
+static const struct pci_ecam_ops pci_qcom_ecam_ops = {
+ .init = qcom_pcie_ecam_host_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
+ int ret;
+
+ reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node),
+ "reset", GPIOD_OUT_HIGH, "PERST#");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = phy_init(phy);
+ if (ret)
+ return ret;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port, *tmp;
+ int ret = -ENOENT;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ ret = qcom_pcie_parse_port(pcie, of_port);
+ if (ret)
+ goto err_port_del;
+ }
+
+ return ret;
+
+err_port_del:
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ list_del(&port->list);
+
+ return ret;
+}
+
+static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ int ret;
+
+ pcie->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
unsigned long max_freq = ULONG_MAX;
+ struct qcom_pcie_port *port, *tmp;
struct device *dev = &pdev->dev;
struct dev_pm_opp *opp;
struct qcom_pcie *pcie;
@@ -1593,24 +1789,64 @@ static int qcom_pcie_probe(struct platform_device *pdev)
char *name;
pcie_cfg = of_device_get_match_data(dev);
- if (!pcie_cfg || !pcie_cfg->ops) {
- dev_err(dev, "Invalid platform data\n");
- return -EINVAL;
+ if (!pcie_cfg) {
+ dev_err(dev, "No platform data\n");
+ return -ENODATA;
}
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
-
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
+ if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) {
+ dev_err(dev, "No platform ops\n");
+ return -ENODATA;
+ }
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_pm_runtime_put;
+ if (pcie_cfg->firmware_managed) {
+ struct pci_host_bridge *bridge;
+ struct pci_config_window *cfg;
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ /* Parse and map our ECAM configuration space area */
+ cfg = pci_host_common_ecam_create(dev, bridge,
+ &pci_qcom_ecam_ops);
+ if (IS_ERR(cfg)) {
+ ret = PTR_ERR(cfg);
+ goto err_pm_runtime_put;
+ }
+
+ bridge->sysdata = cfg;
+ bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops;
+ bridge->msi_domain = true;
+
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ return 0;
+ }
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ INIT_LIST_HEAD(&pcie->ports);
+
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1619,12 +1855,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->cfg = pcie_cfg;
- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->reset)) {
- ret = PTR_ERR(pcie->reset);
- goto err_pm_runtime_put;
- }
-
pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
if (IS_ERR(pcie->parf)) {
ret = PTR_ERR(pcie->parf);
@@ -1647,12 +1877,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
}
}
- pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy)) {
- ret = PTR_ERR(pcie->phy);
- goto err_pm_runtime_put;
- }
-
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV) {
@@ -1699,9 +1923,23 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
- ret = phy_init(pcie->phy);
- if (ret)
- goto err_pm_runtime_put;
+ ret = qcom_pcie_parse_ports(pcie);
+ if (ret) {
+ if (ret != -ENOENT) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to parse Root Port: %d\n", ret);
+ goto err_pm_runtime_put;
+ }
+
+ /*
+ * In the case of properties not populated in Root Port node,
+ * fallback to the legacy method of parsing the Host Bridge
+ * node. This is to maintain DT backwards compatibility.
+ */
+ ret = qcom_pcie_parse_legacy_binding(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
platform_set_drvdata(pdev, pcie);
@@ -1746,7 +1984,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)
err_host_deinit:
dw_pcie_host_deinit(pp);
err_phy_exit:
- phy_exit(pcie->phy);
+ qcom_pcie_phy_exit(pcie);
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ list_del(&port->list);
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1756,9 +1996,13 @@ err_pm_runtime_put:
static int qcom_pcie_suspend_noirq(struct device *dev)
{
- struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ struct qcom_pcie *pcie;
int ret = 0;
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
/*
* Set minimum bandwidth required to keep data path functional during
* suspend.
@@ -1812,9 +2056,13 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
static int qcom_pcie_resume_noirq(struct device *dev)
{
- struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ struct qcom_pcie *pcie;
int ret;
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
if (pm_suspend_target_state != PM_SUSPEND_MEM) {
ret = icc_enable(pcie->icc_cpu);
if (ret) {
@@ -1849,6 +2097,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
diff --git a/drivers/pci/controller/dwc/pcie-sophgo.c b/drivers/pci/controller/dwc/pcie-sophgo.c
new file mode 100644
index 000000000000..ad4baaa34ffa
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-sophgo.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo DesignWare based PCIe host controller driver
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+
+#define to_sophgo_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_INT_SIGNAL 0xc48
+#define PCIE_INT_EN 0xca0
+
+#define PCIE_INT_SIGNAL_INTX GENMASK(8, 5)
+
+#define PCIE_INT_EN_INTX GENMASK(4, 1)
+#define PCIE_INT_EN_INT_MSI BIT(5)
+
+struct sophgo_pcie {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct irq_domain *irq_domain;
+};
+
+static int sophgo_pcie_readl_app(struct sophgo_pcie *sophgo, u32 reg)
+{
+ return readl_relaxed(sophgo->app_base + reg);
+}
+
+static void sophgo_pcie_writel_app(struct sophgo_pcie *sophgo, u32 val, u32 reg)
+{
+ writel_relaxed(val, sophgo->app_base + reg);
+}
+
+static void sophgo_pcie_intx_handler(struct irq_desc *desc)
+{
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long hwirq, reg;
+
+ chained_irq_enter(chip, desc);
+
+ reg = sophgo_pcie_readl_app(sophgo, PCIE_INT_SIGNAL);
+ reg = FIELD_GET(PCIE_INT_SIGNAL_INTX, reg);
+
+ for_each_set_bit(hwirq, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(sophgo->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void sophgo_intx_irq_mask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val &= ~FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq));
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+};
+
+static void sophgo_intx_irq_unmask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val |= FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq));
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+};
+
+static struct irq_chip sophgo_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = sophgo_intx_irq_mask,
+ .irq_unmask = sophgo_intx_irq_unmask,
+};
+
+static int sophgo_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sophgo_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = sophgo_pcie_intx_map,
+};
+
+static int sophgo_pcie_init_irq_domain(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ struct device *dev = sophgo->pci.dev;
+ struct fwnode_handle *intc;
+ int irq;
+
+ intc = device_get_named_child_node(dev, "interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -ENODEV;
+ }
+
+ irq = fwnode_irq_get(intc, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get INTx irq number\n");
+ fwnode_handle_put(intc);
+ return irq;
+ }
+
+ sophgo->irq_domain = irq_domain_create_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ fwnode_handle_put(intc);
+ if (!sophgo->irq_domain) {
+ dev_err(dev, "failed to get a INTx irq domain\n");
+ return -EINVAL;
+ }
+
+ return irq;
+}
+
+static void sophgo_pcie_msi_enable(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val |= PCIE_INT_EN_INT_MSI;
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static int sophgo_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ int irq;
+
+ irq = sophgo_pcie_init_irq_domain(pp);
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, sophgo_pcie_intx_handler, pp);
+
+ sophgo_pcie_msi_enable(pp);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops sophgo_pcie_host_ops = {
+ .init = sophgo_pcie_host_init,
+};
+
+static int sophgo_pcie_clk_init(struct sophgo_pcie *sophgo)
+{
+ struct device *dev = sophgo->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all_enabled(dev, &sophgo->clks);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+
+ sophgo->clk_cnt = ret;
+
+ return 0;
+}
+
+static int sophgo_pcie_resource_get(struct platform_device *pdev,
+ struct sophgo_pcie *sophgo)
+{
+ sophgo->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(sophgo->app_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sophgo->app_base),
+ "failed to map app registers\n");
+
+ return 0;
+}
+
+static int sophgo_pcie_configure_rc(struct sophgo_pcie *sophgo)
+{
+ struct dw_pcie_rp *pp;
+
+ pp = &sophgo->pci.pp;
+ pp->ops = &sophgo_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int sophgo_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sophgo_pcie *sophgo;
+ int ret;
+
+ sophgo = devm_kzalloc(dev, sizeof(*sophgo), GFP_KERNEL);
+ if (!sophgo)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sophgo);
+
+ sophgo->pci.dev = dev;
+
+ ret = sophgo_pcie_resource_get(pdev, sophgo);
+ if (ret)
+ return ret;
+
+ ret = sophgo_pcie_clk_init(sophgo);
+ if (ret)
+ return ret;
+
+ return sophgo_pcie_configure_rc(sophgo);
+}
+
+static const struct of_device_id sophgo_pcie_of_match[] = {
+ { .compatible = "sophgo,sg2044-pcie" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sophgo_pcie_of_match);
+
+static struct platform_driver sophgo_pcie_driver = {
+ .driver = {
+ .name = "sophgo-pcie",
+ .of_match_table = sophgo_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = sophgo_pcie_probe,
+};
+builtin_platform_driver(sophgo_pcie_driver);
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
index 58ce034f701a..c50c4625937f 100644
--- a/drivers/pci/controller/mobiveil/Kconfig
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -9,6 +9,7 @@ config PCIE_MOBIVEIL
config PCIE_MOBIVEIL_HOST
bool
depends on PCI_MSI
+ select IRQ_MSI_LIB
select PCIE_MOBIVEIL
config PCIE_LAYERSCAPE_GEN4
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index a600f46ee3c3..dbc72c73fd0a 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
@@ -353,16 +354,19 @@ static const struct irq_domain_ops intx_domain_ops = {
.map = mobiveil_pcie_intx_map,
};
-static struct irq_chip mobiveil_msi_irq_chip = {
- .name = "Mobiveil PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define MOBIVEIL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define MOBIVEIL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
-static struct msi_domain_info mobiveil_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &mobiveil_msi_irq_chip,
+static const struct msi_parent_ops mobiveil_msi_parent_ops = {
+ .required_flags = MOBIVEIL_MSI_FLAGS_REQUIRED,
+ .supported_flags = MOBIVEIL_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "Mobiveil-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -435,23 +439,20 @@ static const struct irq_domain_ops msi_domain_ops = {
static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_init(&msi->lock);
- msi->dev_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, pcie);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &mobiveil_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &msi_domain_ops,
+ .host_data = pcie,
+ .size = msi->num_of_vectors,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &mobiveil_msi_parent_ops);
+ if (!msi->dev_domain) {
dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
@@ -464,9 +465,8 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
struct mobiveil_root_port *rp = &pcie->rp;
/* setup INTx */
- rp->intx_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), PCI_NUM_INTX,
- &intx_domain_ops, pcie);
-
+ rp->intx_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX, &intx_domain_ops,
+ pcie);
if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index 662f17f9bf65..7246de6a7176 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -135,7 +135,6 @@
struct mobiveil_msi { /* MSI information */
struct mutex lock; /* protect bitmap variable */
- struct irq_domain *msi_domain;
struct irq_domain *dev_domain;
phys_addr_t msi_pages_phys;
int num_of_vectors;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 7bac64533b14..e34bea1ff0ac 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -13,6 +13,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -278,7 +279,6 @@ struct advk_pcie {
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
raw_spinlock_t irq_lock;
- struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
raw_spinlock_t msi_irq_lock;
DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
@@ -1332,18 +1332,6 @@ static void advk_msi_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
}
-static void advk_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void advk_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
static struct irq_chip advk_msi_bottom_irq_chip = {
.name = "MSI",
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
@@ -1436,17 +1424,20 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static struct irq_chip advk_msi_irq_chip = {
- .name = "advk-MSI",
- .irq_mask = advk_msi_top_irq_mask,
- .irq_unmask = advk_msi_top_irq_unmask,
-};
-
-static struct msi_domain_info advk_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI |
- MSI_FLAG_PCI_MSIX,
- .chip = &advk_msi_irq_chip,
+#define ADVK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+#define ADVK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops advk_msi_parent_ops = {
+ .required_flags = ADVK_MSI_FLAGS_REQUIRED,
+ .supported_flags = ADVK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "advk-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
@@ -1456,26 +1447,22 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM,
- &advk_msi_domain_ops, pcie);
- if (!pcie->msi_inner_domain)
- return -ENOMEM;
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &advk_msi_domain_ops,
+ .host_data = pcie,
+ .size = MSI_IRQ_NUM,
+ };
- pcie->msi_domain =
- pci_msi_create_irq_domain(dev_fwnode(dev),
- &advk_msi_domain_info,
- pcie->msi_inner_domain);
- if (!pcie->msi_domain) {
- irq_domain_remove(pcie->msi_inner_domain);
+ pcie->msi_inner_domain = msi_create_parent_irq_domain(&info, &advk_msi_parent_ops);
+ if (!pcie->msi_inner_domain)
return -ENOMEM;
- }
return 0;
}
static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
{
- irq_domain_remove(pcie->msi_domain);
irq_domain_remove(pcie->msi_inner_domain);
}
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index b37052863847..810d1c8de24e 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -22,7 +22,7 @@ static void gen_pci_unmap_cfg(void *ptr)
pci_ecam_free((struct pci_config_window *)ptr);
}
-static struct pci_config_window *gen_pci_init(struct device *dev,
+struct pci_config_window *pci_host_common_ecam_create(struct device *dev,
struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops)
{
int err;
@@ -50,6 +50,7 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
return cfg;
}
+EXPORT_SYMBOL_GPL(pci_host_common_ecam_create);
int pci_host_common_init(struct platform_device *pdev,
const struct pci_ecam_ops *ops)
@@ -67,7 +68,7 @@ int pci_host_common_init(struct platform_device *pdev,
platform_set_drvdata(pdev, bridge);
/* Parse and map our Configuration Space windows */
- cfg = gen_pci_init(dev, bridge, ops);
+ cfg = pci_host_common_ecam_create(dev, bridge, ops);
if (IS_ERR(cfg))
return PTR_ERR(cfg);
diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h
index 65bd9e032353..51c35ec0cf37 100644
--- a/drivers/pci/controller/pci-host-common.h
+++ b/drivers/pci/controller/pci-host-common.h
@@ -17,4 +17,6 @@ int pci_host_common_init(struct platform_device *pdev,
const struct pci_ecam_ops *ops);
void pci_host_common_remove(struct platform_device *pdev);
+struct pci_config_window *pci_host_common_ecam_create(struct device *dev,
+ struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops);
#endif
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index a4a2bac4f4b2..755651f33811 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1353,11 +1353,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
goto skip;
}
- ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
- if (ret < 0) {
- clk_put(port->clk);
+ ret = devm_add_action_or_reset(dev, mvebu_pcie_port_clk_put, port);
+ if (ret < 0)
goto err;
- }
return 1;
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index b05ec8b0bb93..0a37a3f1809c 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -6,6 +6,7 @@
* Author: Tanmay Inamdar <tinamdar@apm.com>
* Duc Dang <dhdang@apm.com>
*/
+#include <linux/bitfield.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -22,31 +23,49 @@
#define IDX_PER_GROUP 8
#define IRQS_PER_IDX 16
#define NR_HW_IRQS 16
-#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+#define NR_MSI_BITS (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+#define NR_MSI_VEC (NR_MSI_BITS / num_possible_cpus())
-struct xgene_msi_group {
- struct xgene_msi *msi;
- int gic_irq;
- u32 msi_grp;
-};
+#define MSI_GROUP_MASK GENMASK(22, 19)
+#define MSI_INDEX_MASK GENMASK(18, 16)
+#define MSI_INTR_MASK GENMASK(19, 16)
+
+#define MSInRx_HWIRQ_MASK GENMASK(6, 4)
+#define DATA_HWIRQ_MASK GENMASK(3, 0)
struct xgene_msi {
- struct device_node *node;
struct irq_domain *inner_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
struct mutex bitmap_lock;
- struct xgene_msi_group *msi_groups;
- int num_cpus;
+ unsigned int gic_irq[NR_HW_IRQS];
};
/* Global data */
-static struct xgene_msi xgene_msi_ctrl;
+static struct xgene_msi *xgene_msi_ctrl;
/*
- * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
- * n is group number (0..F), x is index of registers in each group (0..7)
+ * X-Gene v1 has 16 frames of MSI termination registers MSInIRx, where n is
+ * frame number (0..15), x is index of registers in each frame (0..7). Each
+ * 32b register is at the beginning of a 64kB region, each frame occupying
+ * 512kB (and the whole thing 8MB of PA space).
+ *
+ * Each register supports 16 MSI vectors (0..15) to generate interrupts. A
+ * write to the MSInIRx from the PCI side generates an interrupt. A read
+ * from the MSInRx on the CPU side returns a bitmap of the pending MSIs in
+ * the lower 16 bits. A side effect of this read is that all pending
+ * interrupts are acknowledged and cleared).
+ *
+ * Additionally, each MSI termination frame has 1 MSIINTn register (n is
+ * 0..15) to indicate the MSI pending status caused by any of its 8
+ * termination registers, reported as a bitmap in the lower 8 bits. Each 32b
+ * register is at the beginning of a 64kB region (and overall occupying an
+ * extra 1MB).
+ *
+ * There is one GIC IRQ assigned for each MSI termination frame, 16 in
+ * total.
+ *
* The register layout is as follows:
* MSI0IR0 base_addr
* MSI0IR1 base_addr + 0x10000
@@ -67,107 +86,74 @@ static struct xgene_msi xgene_msi_ctrl;
* MSIINT1 base_addr + 0x810000
* ... ...
* MSIINTF base_addr + 0x8F0000
- *
- * Each index register supports 16 MSI vectors (0..15) to generate interrupt.
- * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
- * registers.
- *
- * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
- * the MSI pending status caused by 1 of its 8 index registers.
*/
/* MSInIRx read helper */
-static u32 xgene_msi_ir_read(struct xgene_msi *msi,
- u32 msi_grp, u32 msir_idx)
+static u32 xgene_msi_ir_read(struct xgene_msi *msi, u32 msi_grp, u32 msir_idx)
{
return readl_relaxed(msi->msi_regs + MSI_IR0 +
- (msi_grp << 19) + (msir_idx << 16));
+ (FIELD_PREP(MSI_GROUP_MASK, msi_grp) |
+ FIELD_PREP(MSI_INDEX_MASK, msir_idx)));
}
/* MSIINTn read helper */
static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
{
- return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
+ return readl_relaxed(msi->msi_regs + MSI_INT0 +
+ FIELD_PREP(MSI_INTR_MASK, msi_grp));
}
/*
- * With 2048 MSI vectors supported, the MSI message can be constructed using
- * following scheme:
- * - Divide into 8 256-vector groups
- * Group 0: 0-255
- * Group 1: 256-511
- * Group 2: 512-767
- * ...
- * Group 7: 1792-2047
- * - Each 256-vector group is divided into 16 16-vector groups
- * As an example: 16 16-vector groups for 256-vector group 0-255 is
- * Group 0: 0-15
- * Group 1: 16-32
- * ...
- * Group 15: 240-255
- * - The termination address of MSI vector in 256-vector group n and 16-vector
- * group x is the address of MSIxIRn
- * - The data for MSI vector in 16-vector group x is x
+ * In order to allow an MSI to be moved from one CPU to another without
+ * having to repaint both the address and the data (which cannot be done
+ * atomically), we statically partitions the MSI frames between CPUs. Given
+ * that XGene-1 has 8 CPUs, each CPU gets two frames assigned to it
+ *
+ * We adopt the convention that when an MSI is moved, it is configured to
+ * target the same register number in the congruent frame assigned to the
+ * new target CPU. This reserves a given MSI across all CPUs, and reduces
+ * the MSI capacity from 2048 to 256.
+ *
+ * Effectively, this amounts to:
+ * - hwirq[7]::cpu[2:0] is the target frame number (n in MSInIRx)
+ * - hwirq[6:4] is the register index in any given frame (x in MSInIRx)
+ * - hwirq[3:0] is the MSI data
*/
-static u32 hwirq_to_reg_set(unsigned long hwirq)
-{
- return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
-}
-
-static u32 hwirq_to_group(unsigned long hwirq)
-{
- return (hwirq % NR_HW_IRQS);
-}
-
-static u32 hwirq_to_msi_data(unsigned long hwirq)
+static irq_hw_number_t compute_hwirq(u8 frame, u8 index, u8 data)
{
- return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
+ return (FIELD_PREP(BIT(7), FIELD_GET(BIT(3), frame)) |
+ FIELD_PREP(MSInRx_HWIRQ_MASK, index) |
+ FIELD_PREP(DATA_HWIRQ_MASK, data));
}
static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
- u32 reg_set = hwirq_to_reg_set(data->hwirq);
- u32 group = hwirq_to_group(data->hwirq);
- u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
+ u64 target_addr;
+ u32 frame, msir;
+ int cpu;
- msg->address_hi = upper_32_bits(target_addr);
- msg->address_lo = lower_32_bits(target_addr);
- msg->data = hwirq_to_msi_data(data->hwirq);
-}
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+ msir = FIELD_GET(MSInRx_HWIRQ_MASK, data->hwirq);
+ frame = FIELD_PREP(BIT(3), FIELD_GET(BIT(7), data->hwirq)) | cpu;
-/*
- * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
- * the expected behaviour of .set_affinity for each MSI interrupt, the 16
- * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
- * for each core). The MSI vector is moved from 1 MSI GIC IRQ to another
- * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
- * consequence, the total MSI vectors that X-Gene v1 supports will be
- * reduced to 256 (2048/8) vectors.
- */
-static int hwirq_to_cpu(unsigned long hwirq)
-{
- return (hwirq % xgene_msi_ctrl.num_cpus);
-}
+ target_addr = msi->msi_addr;
+ target_addr += (FIELD_PREP(MSI_GROUP_MASK, frame) |
+ FIELD_PREP(MSI_INTR_MASK, msir));
-static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
-{
- return (hwirq - hwirq_to_cpu(hwirq));
+ msg->address_hi = upper_32_bits(target_addr);
+ msg->address_lo = lower_32_bits(target_addr);
+ msg->data = FIELD_GET(DATA_HWIRQ_MASK, data->hwirq);
}
static int xgene_msi_set_affinity(struct irq_data *irqdata,
const struct cpumask *mask, bool force)
{
int target_cpu = cpumask_first(mask);
- int curr_cpu;
-
- curr_cpu = hwirq_to_cpu(irqdata->hwirq);
- if (curr_cpu == target_cpu)
- return IRQ_SET_MASK_OK_DONE;
- /* Update MSI number to target the new CPU */
- irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
+ irq_data_update_effective_affinity(irqdata, cpumask_of(target_cpu));
+ /* Force the core code to regenerate the message */
return IRQ_SET_MASK_OK;
}
@@ -181,25 +167,23 @@ static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
struct xgene_msi *msi = domain->host_data;
- int msi_irq;
+ irq_hw_number_t hwirq;
mutex_lock(&msi->bitmap_lock);
- msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
- msi->num_cpus, 0);
- if (msi_irq < NR_MSI_VEC)
- bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
- else
- msi_irq = -ENOSPC;
+ hwirq = find_first_zero_bit(msi->bitmap, NR_MSI_VEC);
+ if (hwirq < NR_MSI_VEC)
+ set_bit(hwirq, msi->bitmap);
mutex_unlock(&msi->bitmap_lock);
- if (msi_irq < 0)
- return msi_irq;
+ if (hwirq >= NR_MSI_VEC)
+ return -ENOSPC;
- irq_domain_set_info(domain, virq, msi_irq,
+ irq_domain_set_info(domain, virq, hwirq,
&xgene_msi_bottom_irq_chip, domain->host_data,
handle_simple_irq, NULL, NULL);
+ irqd_set_resend_when_in_progress(irq_get_irq_data(virq));
return 0;
}
@@ -209,12 +193,10 @@ static void xgene_irq_domain_free(struct irq_domain *domain,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
- u32 hwirq;
mutex_lock(&msi->bitmap_lock);
- hwirq = hwirq_to_canonical_hwirq(d->hwirq);
- bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
+ clear_bit(d->hwirq, msi->bitmap);
mutex_unlock(&msi->bitmap_lock);
@@ -235,10 +217,11 @@ static const struct msi_parent_ops xgene_msi_parent_ops = {
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-static int xgene_allocate_domains(struct xgene_msi *msi)
+static int xgene_allocate_domains(struct device_node *node,
+ struct xgene_msi *msi)
{
struct irq_domain_info info = {
- .fwnode = of_fwnode_handle(msi->node),
+ .fwnode = of_fwnode_handle(node),
.ops = &xgene_msi_domain_ops,
.size = NR_MSI_VEC,
.host_data = msi,
@@ -248,166 +231,111 @@ static int xgene_allocate_domains(struct xgene_msi *msi)
return msi->inner_domain ? 0 : -ENOMEM;
}
-static void xgene_free_domains(struct xgene_msi *msi)
+static int xgene_msi_init_allocator(struct device *dev)
{
- if (msi->inner_domain)
- irq_domain_remove(msi->inner_domain);
-}
-
-static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
-{
- xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL);
- if (!xgene_msi->bitmap)
+ xgene_msi_ctrl->bitmap = devm_bitmap_zalloc(dev, NR_MSI_VEC, GFP_KERNEL);
+ if (!xgene_msi_ctrl->bitmap)
return -ENOMEM;
- mutex_init(&xgene_msi->bitmap_lock);
-
- xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
- sizeof(struct xgene_msi_group),
- GFP_KERNEL);
- if (!xgene_msi->msi_groups)
- return -ENOMEM;
+ mutex_init(&xgene_msi_ctrl->bitmap_lock);
return 0;
}
static void xgene_msi_isr(struct irq_desc *desc)
{
+ unsigned int *irqp = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct xgene_msi_group *msi_groups;
- struct xgene_msi *xgene_msi;
- int msir_index, msir_val, hw_irq, ret;
- u32 intr_index, grp_select, msi_grp;
+ struct xgene_msi *xgene_msi = xgene_msi_ctrl;
+ unsigned long grp_pending;
+ int msir_idx;
+ u32 msi_grp;
chained_irq_enter(chip, desc);
- msi_groups = irq_desc_get_handler_data(desc);
- xgene_msi = msi_groups->msi;
- msi_grp = msi_groups->msi_grp;
-
- /*
- * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
- * If bit x of this register is set (x is 0..7), one or more interrupts
- * corresponding to MSInIRx is set.
- */
- grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
- while (grp_select) {
- msir_index = ffs(grp_select) - 1;
- /*
- * Calculate MSInIRx address to read to check for interrupts
- * (refer to termination address and data assignment
- * described in xgene_compose_msi_msg() )
- */
- msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
- while (msir_val) {
- intr_index = ffs(msir_val) - 1;
- /*
- * Calculate MSI vector number (refer to the termination
- * address and data assignment described in
- * xgene_compose_msi_msg function)
- */
- hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
- NR_HW_IRQS) + msi_grp;
- /*
- * As we have multiple hw_irq that maps to single MSI,
- * always look up the virq using the hw_irq as seen from
- * CPU0
- */
- hw_irq = hwirq_to_canonical_hwirq(hw_irq);
- ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq);
+ msi_grp = irqp - xgene_msi->gic_irq;
+
+ grp_pending = xgene_msi_int_read(xgene_msi, msi_grp);
+
+ for_each_set_bit(msir_idx, &grp_pending, IDX_PER_GROUP) {
+ unsigned long msir;
+ int intr_idx;
+
+ msir = xgene_msi_ir_read(xgene_msi, msi_grp, msir_idx);
+
+ for_each_set_bit(intr_idx, &msir, IRQS_PER_IDX) {
+ irq_hw_number_t hwirq;
+ int ret;
+
+ hwirq = compute_hwirq(msi_grp, msir_idx, intr_idx);
+ ret = generic_handle_domain_irq(xgene_msi->inner_domain,
+ hwirq);
WARN_ON_ONCE(ret);
- msir_val &= ~(1 << intr_index);
- }
- grp_select &= ~(1 << msir_index);
-
- if (!grp_select) {
- /*
- * We handled all interrupts happened in this group,
- * resample this group MSI_INTx register in case
- * something else has been made pending in the meantime
- */
- grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
}
}
chained_irq_exit(chip, desc);
}
-static enum cpuhp_state pci_xgene_online;
-
static void xgene_msi_remove(struct platform_device *pdev)
{
- struct xgene_msi *msi = platform_get_drvdata(pdev);
-
- if (pci_xgene_online)
- cpuhp_remove_state(pci_xgene_online);
- cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
-
- kfree(msi->msi_groups);
-
- bitmap_free(msi->bitmap);
- msi->bitmap = NULL;
+ for (int i = 0; i < NR_HW_IRQS; i++) {
+ unsigned int irq = xgene_msi_ctrl->gic_irq[i];
+ if (!irq)
+ continue;
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ }
- xgene_free_domains(msi);
+ if (xgene_msi_ctrl->inner_domain)
+ irq_domain_remove(xgene_msi_ctrl->inner_domain);
}
-static int xgene_msi_hwirq_alloc(unsigned int cpu)
+static int xgene_msi_handler_setup(struct platform_device *pdev)
{
- struct xgene_msi *msi = &xgene_msi_ctrl;
- struct xgene_msi_group *msi_group;
- cpumask_var_t mask;
+ struct xgene_msi *xgene_msi = xgene_msi_ctrl;
int i;
- int err;
- for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
- msi_group = &msi->msi_groups[i];
- if (!msi_group->gic_irq)
- continue;
+ for (i = 0; i < NR_HW_IRQS; i++) {
+ u32 msi_val;
+ int irq, err;
- irq_set_chained_handler_and_data(msi_group->gic_irq,
- xgene_msi_isr, msi_group);
+ /*
+ * MSInIRx registers are read-to-clear; before registering
+ * interrupt handlers, read all of them to clear spurious
+ * interrupts that may occur before the driver is probed.
+ */
+ for (int msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
+ xgene_msi_ir_read(xgene_msi, i, msi_idx);
+
+ /* Read MSIINTn to confirm */
+ msi_val = xgene_msi_int_read(xgene_msi, i);
+ if (msi_val) {
+ dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
+ return EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ xgene_msi->gic_irq[i] = irq;
/*
* Statically allocate MSI GIC IRQs to each CPU core.
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
* to each core.
*/
- if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
- cpumask_clear(mask);
- cpumask_set_cpu(cpu, mask);
- err = irq_set_affinity(msi_group->gic_irq, mask);
- if (err)
- pr_err("failed to set affinity for GIC IRQ");
- free_cpumask_var(mask);
- } else {
- pr_err("failed to alloc CPU mask for affinity\n");
- err = -EINVAL;
- }
-
+ irq_set_status_flags(irq, IRQ_NO_BALANCING);
+ err = irq_set_affinity(irq, cpumask_of(i % num_possible_cpus()));
if (err) {
- irq_set_chained_handler_and_data(msi_group->gic_irq,
- NULL, NULL);
+ pr_err("failed to set affinity for GIC IRQ");
return err;
}
- }
-
- return 0;
-}
-
-static int xgene_msi_hwirq_free(unsigned int cpu)
-{
- struct xgene_msi *msi = &xgene_msi_ctrl;
- struct xgene_msi_group *msi_group;
- int i;
-
- for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
- msi_group = &msi->msi_groups[i];
- if (!msi_group->gic_irq)
- continue;
- irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
- NULL);
+ irq_set_chained_handler_and_data(irq, xgene_msi_isr,
+ &xgene_msi_ctrl->gic_irq[i]);
}
+
return 0;
}
@@ -419,14 +347,15 @@ static const struct of_device_id xgene_msi_match_table[] = {
static int xgene_msi_probe(struct platform_device *pdev)
{
struct resource *res;
- int rc, irq_index;
struct xgene_msi *xgene_msi;
- int virt_msir;
- u32 msi_val, msi_idx;
+ int rc;
- xgene_msi = &xgene_msi_ctrl;
+ xgene_msi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*xgene_msi_ctrl),
+ GFP_KERNEL);
+ if (!xgene_msi_ctrl)
+ return -ENOMEM;
- platform_set_drvdata(pdev, xgene_msi);
+ xgene_msi = xgene_msi_ctrl;
xgene_msi->msi_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(xgene_msi->msi_regs)) {
@@ -434,66 +363,26 @@ static int xgene_msi_probe(struct platform_device *pdev)
goto error;
}
xgene_msi->msi_addr = res->start;
- xgene_msi->node = pdev->dev.of_node;
- xgene_msi->num_cpus = num_possible_cpus();
- rc = xgene_msi_init_allocator(xgene_msi);
+ rc = xgene_msi_init_allocator(&pdev->dev);
if (rc) {
dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
goto error;
}
- rc = xgene_allocate_domains(xgene_msi);
+ rc = xgene_allocate_domains(dev_of_node(&pdev->dev), xgene_msi);
if (rc) {
dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
goto error;
}
- for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
- virt_msir = platform_get_irq(pdev, irq_index);
- if (virt_msir < 0) {
- rc = virt_msir;
- goto error;
- }
- xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
- xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
- xgene_msi->msi_groups[irq_index].msi = xgene_msi;
- }
-
- /*
- * MSInIRx registers are read-to-clear; before registering
- * interrupt handlers, read all of them to clear spurious
- * interrupts that may occur before the driver is probed.
- */
- for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
- for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
- xgene_msi_ir_read(xgene_msi, irq_index, msi_idx);
-
- /* Read MSIINTn to confirm */
- msi_val = xgene_msi_int_read(xgene_msi, irq_index);
- if (msi_val) {
- dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
- rc = -EINVAL;
- goto error;
- }
- }
-
- rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
- xgene_msi_hwirq_alloc, NULL);
- if (rc < 0)
- goto err_cpuhp;
- pci_xgene_online = rc;
- rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
- xgene_msi_hwirq_free);
+ rc = xgene_msi_handler_setup(pdev);
if (rc)
- goto err_cpuhp;
+ goto error;
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
return 0;
-
-err_cpuhp:
- dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
error:
xgene_msi_remove(pdev);
return rc;
@@ -507,9 +396,4 @@ static struct platform_driver xgene_msi_driver = {
.probe = xgene_msi_probe,
.remove = xgene_msi_remove,
};
-
-static int __init xgene_pcie_msi_init(void)
-{
- return platform_driver_register(&xgene_msi_driver);
-}
-subsys_initcall(xgene_pcie_msi_init);
+builtin_platform_driver(xgene_msi_driver);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 1e2ebbfa36d1..b95afa35201d 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -12,6 +12,7 @@
#include <linux/jiffies.h>
#include <linux/memblock.h>
#include <linux/init.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -53,11 +54,9 @@
#define XGENE_V1_PCI_EXP_CAP 0x40
/* PCIe IP version */
-#define XGENE_PCIE_IP_VER_UNKN 0
#define XGENE_PCIE_IP_VER_1 1
#define XGENE_PCIE_IP_VER_2 2
-#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
struct xgene_pcie {
struct device_node *node;
struct device *dev;
@@ -188,7 +187,6 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
-#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
static int xgene_get_csr_resource(struct acpi_device *adev,
@@ -279,7 +277,6 @@ const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
};
#endif
-#if defined(CONFIG_PCI_XGENE)
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr,
u32 flags, u64 size)
{
@@ -594,6 +591,24 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
+static bool xgene_check_pcie_msi_ready(void)
+{
+ struct device_node *np;
+ struct irq_domain *d;
+
+ if (!IS_ENABLED(CONFIG_PCI_XGENE_MSI))
+ return true;
+
+ np = of_find_compatible_node(NULL, NULL, "apm,xgene1-msi");
+ if (!np)
+ return true;
+
+ d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+ of_node_put(np);
+
+ return d && irq_domain_is_msi_parent(d);
+}
+
static int xgene_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -602,6 +617,10 @@ static int xgene_pcie_probe(struct platform_device *pdev)
struct pci_host_bridge *bridge;
int ret;
+ if (!xgene_check_pcie_msi_ready())
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "MSI driver not ready\n");
+
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
if (!bridge)
return -ENOMEM;
@@ -610,10 +629,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
port->node = of_node_get(dn);
port->dev = dev;
-
- port->version = XGENE_PCIE_IP_VER_UNKN;
- if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
- port->version = XGENE_PCIE_IP_VER_1;
+ port->version = XGENE_PCIE_IP_VER_1;
ret = xgene_pcie_map_reg(port, pdev);
if (ret)
@@ -647,4 +663,3 @@ static struct platform_driver xgene_pcie_driver = {
.probe = xgene_pcie_probe,
};
builtin_platform_driver(xgene_pcie_driver);
-#endif
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index a43f21eb8fbb..ea2ca2e70f20 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -29,7 +30,6 @@ struct altera_msi {
DECLARE_BITMAP(used, MAX_MSI_VECTORS);
struct mutex lock; /* protect "used" bitmap */
struct platform_device *pdev;
- struct irq_domain *msi_domain;
struct irq_domain *inner_domain;
void __iomem *csr_base;
void __iomem *vector_base;
@@ -74,18 +74,20 @@ static void altera_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static struct irq_chip altera_msi_irq_chip = {
- .name = "Altera PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define ALTERA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info altera_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &altera_msi_irq_chip,
-};
+#define ALTERA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+static const struct msi_parent_ops altera_msi_parent_ops = {
+ .required_flags = ALTERA_MSI_FLAGS_REQUIRED,
+ .supported_flags = ALTERA_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "Altera-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct altera_msi *msi = irq_data_get_irq_chip_data(data);
@@ -164,20 +166,16 @@ static const struct irq_domain_ops msi_domain_ops = {
static int altera_allocate_domains(struct altera_msi *msi)
{
- struct fwnode_handle *fwnode = of_fwnode_handle(msi->pdev->dev.of_node);
-
- msi->inner_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, msi);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(&msi->pdev->dev),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->num_of_vectors,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &altera_msi_parent_ops);
if (!msi->inner_domain) {
- dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &altera_msi_domain_info, msi->inner_domain);
- if (!msi->msi_domain) {
dev_err(&msi->pdev->dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -186,7 +184,6 @@ static int altera_allocate_domains(struct altera_msi *msi)
static void altera_free_domains(struct altera_msi *msi)
{
- irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->inner_domain);
}
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 0fc77176a52e..3dbb7adc421c 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -852,10 +852,9 @@ static void aglx_isr(struct irq_desc *desc)
static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), PCI_NUM_INTX,
+ pcie->irq_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 92887b394eb4..9afbd02ded35 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -46,6 +47,7 @@
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
#define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8
@@ -55,6 +57,9 @@
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_REG_PHY_CTL_1 0x1804
+#define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8
+
#define PCIE_RC_PL_PHY_CTL_15 0x184c
#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000
#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff
@@ -265,7 +270,6 @@ struct brcm_msi {
struct device *dev;
void __iomem *base;
struct device_node *np;
- struct irq_domain *msi_domain;
struct irq_domain *inner_domain;
struct mutex lock; /* guards the alloc/free operations */
u64 target_addr;
@@ -465,17 +469,20 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
}
-static struct irq_chip brcm_msi_irq_chip = {
- .name = "BRCM STB PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define BRCM_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info brcm_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
- .chip = &brcm_msi_irq_chip,
+#define BRCM_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops brcm_msi_parent_ops = {
+ .required_flags = BRCM_MSI_FLAGS_REQUIRED,
+ .supported_flags = BRCM_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "BRCM-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void brcm_pcie_msi_isr(struct irq_desc *desc)
@@ -581,21 +588,18 @@ static const struct irq_domain_ops msi_domain_ops = {
static int brcm_allocate_domains(struct brcm_msi *msi)
{
- struct fwnode_handle *fwnode = of_fwnode_handle(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi);
- if (!msi->inner_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(msi->np),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->nr,
+ };
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &brcm_msi_domain_info,
- msi->inner_domain);
- if (!msi->msi_domain) {
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &brcm_msi_parent_ops);
+ if (!msi->inner_domain) {
dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -604,7 +608,6 @@ static int brcm_allocate_domains(struct brcm_msi *msi)
static void brcm_free_domains(struct brcm_msi *msi)
{
- irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->inner_domain);
}
@@ -970,7 +973,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
*
* The PCIe host controller by design must set the inbound viewport to
* be a contiguous arrangement of all of the system's memory. In
- * addition, its size mut be a power of two. To further complicate
+ * addition, its size must be a power of two. To further complicate
* matters, the viewport must start on a pcie-address that is aligned
* on a multiple of its size. If a portion of the viewport does not
* represent system memory -- e.g. 3GB of memory requires a 4GB
@@ -1072,7 +1075,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
void __iomem *base = pcie->base;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
- u32 tmp, burst, aspm_support;
+ u32 tmp, burst, aspm_support, num_lanes, num_lanes_cap;
u8 num_out_wins = 0;
int num_inbound_wins = 0;
int memc, ret;
@@ -1180,6 +1183,27 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */
+ num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
+ num_lanes = 0;
+
+ /*
+ * Use hardware negotiated Max Link Width value by default. If the
+ * "num-lanes" DT property is present, assume that the chip's default
+ * link width capability information is incorrect/undesired and use the
+ * specified value instead.
+ */
+ if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) &&
+ num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) {
+ u32p_replace_bits(&tmp, num_lanes,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1);
+ u32p_replace_bits(&tmp, 1,
+ PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK);
+ writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1);
+ }
+
/*
* For config space accesses on the RC, show the right class for
* a PCIe-PCIe bridge (the default setting is to be EP mode).
@@ -1333,11 +1357,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
if (ret)
return ret;
- /*
- * Wait for 100ms after PERST# deassertion; see PCIe CEM specification
- * sections 2.2, PCIe r5.0, 6.6.1.
- */
- msleep(100);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
/*
* Give the RC/EP even more time to wake up, before trying to
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index d2cb4c4f821a..9ba242ab9596 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -5,6 +5,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -81,7 +82,6 @@ struct iproc_msi_grp {
* @bitmap_lock: lock to protect access to the MSI bitmap
* @nr_msi_vecs: total number of MSI vectors
* @inner_domain: inner IRQ domain
- * @msi_domain: MSI IRQ domain
* @nr_eq_region: required number of 4K aligned memory region for MSI event
* queues
* @nr_msi_region: required number of 4K aligned address region for MSI posted
@@ -101,7 +101,6 @@ struct iproc_msi {
struct mutex bitmap_lock;
unsigned int nr_msi_vecs;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
unsigned int nr_eq_region;
unsigned int nr_msi_region;
void *eq_cpu;
@@ -165,16 +164,18 @@ static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
return eq * EQ_LEN * sizeof(u32);
}
-static struct irq_chip iproc_msi_irq_chip = {
- .name = "iProc-MSI",
+#define IPROC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+#define IPROC_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static struct msi_parent_ops iproc_msi_parent_ops = {
+ .required_flags = IPROC_MSI_FLAGS_REQUIRED,
+ .supported_flags = IPROC_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "iProc-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-
-static struct msi_domain_info iproc_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
- .chip = &iproc_msi_irq_chip,
-};
-
/*
* In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
* dedicated event queue. Each MSI group can support up to 64 MSI vectors.
@@ -446,27 +447,22 @@ static void iproc_msi_disable(struct iproc_msi *msi)
static int iproc_msi_alloc_domains(struct device_node *node,
struct iproc_msi *msi)
{
- msi->inner_domain = irq_domain_create_linear(NULL, msi->nr_msi_vecs,
- &msi_domain_ops, msi);
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->nr_msi_vecs,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &iproc_msi_parent_ops);
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node),
- &iproc_msi_domain_info,
- msi->inner_domain);
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
return 0;
}
static void iproc_msi_free_domains(struct iproc_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
-
if (msi->inner_domain)
irq_domain_remove(msi->inner_domain);
}
@@ -542,7 +538,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
msi->nr_cpus = num_possible_cpus();
if (msi->nr_cpus == 1)
- iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI;
+ iproc_msi_parent_ops.supported_flags |= MSI_FLAG_MULTI_PCI_MSI;
msi->nr_irqs = of_irq_count(node);
if (!msi->nr_irqs) {
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index b55f5973414c..97147f43e41c 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
@@ -187,7 +188,6 @@ struct mtk_msi_set {
* @saved_irq_state: IRQ enable state saved at suspend time
* @irq_lock: lock protecting IRQ register access
* @intx_domain: legacy INTx IRQ domain
- * @msi_domain: MSI IRQ domain
* @msi_bottom_domain: MSI IRQ bottom domain
* @msi_sets: MSI sets information
* @lock: lock protecting IRQ bit map
@@ -210,7 +210,6 @@ struct mtk_gen3_pcie {
u32 saved_irq_state;
raw_spinlock_t irq_lock;
struct irq_domain *intx_domain;
- struct irq_domain *msi_domain;
struct irq_domain *msi_bottom_domain;
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
struct mutex lock;
@@ -526,30 +525,22 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
return 0;
}
-static void mtk_pcie_msi_irq_mask(struct irq_data *data)
-{
- pci_msi_mask_irq(data);
- irq_chip_mask_parent(data);
-}
-
-static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
-{
- pci_msi_unmask_irq(data);
- irq_chip_unmask_parent(data);
-}
-
-static struct irq_chip mtk_msi_irq_chip = {
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = mtk_pcie_msi_irq_mask,
- .irq_unmask = mtk_pcie_msi_irq_unmask,
- .name = "MSI",
-};
-
-static struct msi_domain_info mtk_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
- MSI_FLAG_MULTI_PCI_MSI,
- .chip = &mtk_msi_irq_chip,
+#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops mtk_msi_parent_ops = {
+ .required_flags = MTK_MSI_FLAGS_REQUIRED,
+ .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "MTK3-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -756,29 +747,23 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
/* Setup MSI */
mutex_init(&pcie->lock);
- pcie->msi_bottom_domain = irq_domain_create_linear(of_fwnode_handle(node),
- PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, pcie);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &mtk_msi_bottom_domain_ops,
+ .host_data = pcie,
+ .size = PCIE_MSI_IRQS_NUM,
+ };
+
+ pcie->msi_bottom_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
goto err_msi_bottom_domain;
}
- pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
- &mtk_msi_domain_info,
- pcie->msi_bottom_domain);
- if (!pcie->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- ret = -ENODEV;
- goto err_msi_domain;
- }
-
of_node_put(intc_node);
return 0;
-err_msi_domain:
- irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
irq_domain_remove(pcie->intx_domain);
out_put_node:
@@ -793,9 +778,6 @@ static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
if (pcie->intx_domain)
irq_domain_remove(pcie->intx_domain);
- if (pcie->msi_domain)
- irq_domain_remove(pcie->msi_domain);
-
if (pcie->msi_bottom_domain)
irq_domain_remove(pcie->msi_bottom_domain);
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index e1934aa06c8d..24cc30a2ab6c 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -180,7 +181,6 @@ struct mtk_pcie_soc {
* @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
* @inner_domain: inner IRQ domain
- * @msi_domain: MSI IRQ domain
* @lock: protect the msi_irq_in_use bitmap
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
@@ -200,7 +200,6 @@ struct mtk_pcie_port {
int irq;
struct irq_domain *irq_domain;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
@@ -470,40 +469,39 @@ static const struct irq_domain_ops msi_domain_ops = {
.free = mtk_pcie_irq_domain_free,
};
-static struct irq_chip mtk_msi_irq_chip = {
- .name = "MTK PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
-static struct msi_domain_info mtk_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &mtk_msi_irq_chip,
+static const struct msi_parent_ops mtk_msi_parent_ops = {
+ .required_flags = MTK_MSI_FLAGS_REQUIRED,
+ .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "MTK-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- struct fwnode_handle *fwnode = of_fwnode_handle(port->pcie->dev->of_node);
-
mutex_init(&port->lock);
- port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
- &msi_domain_ops, port);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(port->pcie->dev),
+ .ops = &msi_domain_ops,
+ .host_data = port,
+ .size = MTK_MSI_IRQS_NUM,
+ };
+
+ port->inner_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
if (!port->inner_domain) {
dev_err(port->pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
- port->inner_domain);
- if (!port->msi_domain) {
- dev_err(port->pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(port->inner_domain);
- return -ENOMEM;
- }
-
return 0;
}
@@ -532,8 +530,6 @@ static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
irq_domain_remove(port->irq_domain);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- if (port->msi_domain)
- irq_domain_remove(port->msi_domain);
if (port->inner_domain)
irq_domain_remove(port->inner_domain);
}
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index c32b803a47c7..fe288fd770c4 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -597,30 +598,6 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void rcar_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void rcar_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void rcar_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip rcar_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = rcar_msi_top_irq_ack,
- .irq_mask = rcar_msi_top_irq_mask,
- .irq_unmask = rcar_msi_top_irq_unmask,
-};
-
static void rcar_msi_irq_ack(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
@@ -718,30 +695,36 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
.free = rcar_msi_domain_free,
};
-static struct msi_domain_info rcar_msi_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
- .chip = &rcar_msi_top_chip,
+#define RCAR_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define RCAR_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops rcar_msi_parent_ops = {
+ .required_flags = RCAR_MSI_FLAGS_REQUIRED,
+ .supported_flags = RCAR_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "RCAR-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int rcar_allocate_domains(struct rcar_msi *msi)
{
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &rcar_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &rcar_msi_domain_ops,
+ .host_data = msi,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->domain = msi_create_parent_irq_domain(&info, &rcar_msi_parent_ops);
if (!msi->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -750,10 +733,7 @@ static int rcar_allocate_domains(struct rcar_msi *msi)
static void rcar_free_domains(struct rcar_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 55416b8311dd..300cd85fa035 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -518,9 +518,9 @@ static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip)
{
u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKCTL_RL;
- rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL);
}
static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip)
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index b9e7a8710cf0..ee1822ca01db 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -11,27 +11,19 @@
* ARM PCI Host generic driver.
*/
+#include <linux/bitfield.h>
#include <linux/bitrev.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_pci.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
-#include <linux/regmap.h>
#include "../pci.h"
#include "pcie-rockchip.h"
@@ -40,18 +32,18 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
{
u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
}
static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
{
u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
}
static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
@@ -269,7 +261,7 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
scale = 3; /* 0.001x */
curr = curr / 1000; /* convert to mA */
power = (curr * 3300) / 1000; /* milliwatt */
- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ while (power > FIELD_MAX(PCI_EXP_DEVCAP_PWR_VAL)) {
if (!scale) {
dev_warn(rockchip->dev, "invalid power supply\n");
return;
@@ -278,10 +270,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
power = power / 10;
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP);
+ status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_VAL, power);
+ status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_SCL, scale);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP);
}
/**
@@ -309,14 +301,14 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_set_power_limit(rockchip);
/* Set RC's clock architecture as common clock */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKSTA_SLC << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
/* Set RC's RCB to 128 */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKCTL_RCB;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
/* Enable Gen1 training */
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
@@ -325,7 +317,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(rockchip->perst_gpio, 1);
- msleep(PCIE_T_RRS_READY_MS);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
/* 500ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
@@ -341,9 +333,13 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
* Enable retrain for gen2. This should be configured only after
* gen1 finished.
*/
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2);
+ status &= ~PCI_EXP_LNKCTL2_TLS;
+ status |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKCTL_RL;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
status, PCIE_LINK_IS_GEN2(status), 20,
@@ -380,15 +376,15 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
/* Clear L0s from RC's link cap */
if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
- status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP);
+ status &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP);
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
- status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
- status |= PCIE_RC_CONFIG_DCSR_MPS_256;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
+ status &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ status |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
return 0;
err_power_off_phy:
@@ -439,7 +435,7 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
dev_dbg(dev, "malformed TLP received from the link\n");
if (sub_reg & PCIE_CORE_INT_UCR)
- dev_dbg(dev, "malformed TLP received from the link\n");
+ dev_dbg(dev, "Unexpected Completion received from the link\n");
if (sub_reg & PCIE_CORE_INT_FCE)
dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
@@ -489,7 +485,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
dev_dbg(dev, "fatal error interrupt received\n");
if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
- dev_dbg(dev, "no fatal error interrupt received\n");
+ dev_dbg(dev, "non fatal error interrupt received\n");
if (reg & PCIE_CLIENT_INT_CORR_ERR)
dev_dbg(dev, "correctable error interrupt received\n");
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 5864a20323f2..72a2c045f6fe 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -155,17 +155,7 @@
#define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00)
#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
-#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
-#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
-#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
-#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
-#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
-#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
-#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
-#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
-#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
-#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
-#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_CR (PCIE_RC_CONFIG_BASE + 0xc0)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
@@ -215,20 +205,6 @@
#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
#define MAX_AXI_WRAPPER_REGION_NUM 33
-#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0
-#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1
-#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2
-#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3
-#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4
-#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27
#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5)
#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
(((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index dc9690a535e1..b037c8f315e4 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -90,7 +91,6 @@ struct xilinx_pl_dma_variant {
};
struct xilinx_msi {
- struct irq_domain *msi_domain;
unsigned long *bitmap;
struct irq_domain *dev_domain;
struct mutex lock; /* Protect bitmap variable */
@@ -373,20 +373,20 @@ static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct irq_chip xilinx_msi_irq_chip = {
- .name = "pl_dma:PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info xilinx_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
- .chip = &xilinx_msi_irq_chip,
-};
+#define XILINX_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+static const struct msi_parent_ops xilinx_msi_parent_ops = {
+ .required_flags = XILINX_MSI_FLAGS_REQUIRED,
+ .supported_flags = XILINX_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "pl_dma-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -458,11 +458,6 @@ static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port)
irq_domain_remove(msi->dev_domain);
msi->dev_domain = NULL;
}
-
- if (msi->msi_domain) {
- irq_domain_remove(msi->msi_domain);
- msi->msi_domain = NULL;
- }
}
static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
@@ -470,19 +465,17 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
struct device *dev = port->dev;
struct xilinx_msi *msi = &port->msi;
int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
- struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node);
-
- msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS,
- &dev_msi_domain_ops, port);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(port->dev),
+ .ops = &dev_msi_domain_ops,
+ .host_data = port,
+ .size = XILINX_NUM_MSI_IRQS,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops);
if (!msi->dev_domain)
goto out;
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &xilinx_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain)
- goto out;
-
mutex_init(&msi->lock);
msi->bitmap = kzalloc(size, GFP_KERNEL);
if (!msi->bitmap)
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index c8b05477b719..05b8c205493c 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -145,7 +146,6 @@
#define LINK_WAIT_USLEEP_MAX 100000
struct nwl_msi { /* MSI information */
- struct irq_domain *msi_domain;
DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR);
struct irq_domain *dev_domain;
struct mutex lock; /* protect bitmap variable */
@@ -418,19 +418,22 @@ static const struct irq_domain_ops intx_domain_ops = {
};
#ifdef CONFIG_PCI_MSI
-static struct irq_chip nwl_msi_irq_chip = {
- .name = "nwl_pcie:msi",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-static struct msi_domain_info nwl_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
- .chip = &nwl_msi_irq_chip,
+#define NWL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define NWL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops nwl_msi_parent_ops = {
+ .required_flags = NWL_MSI_FLAGS_REQUIRED,
+ .supported_flags = NWL_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "nwl-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
+
#endif
static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -495,22 +498,19 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct device *dev = pcie->dev;
- struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
-
- msi->dev_domain = irq_domain_create_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &dev_msi_domain_ops,
+ .host_data = pcie,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &nwl_msi_parent_ops);
if (!msi->dev_domain) {
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
}
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &nwl_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create msi IRQ domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
#endif
return 0;
}
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index e36aa874bae9..937ea6ae1ac4 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -203,11 +204,6 @@ static void xilinx_msi_top_irq_ack(struct irq_data *d)
*/
}
-static struct irq_chip xilinx_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = xilinx_msi_top_irq_ack,
-};
-
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -264,29 +260,42 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
.free = xilinx_msi_domain_free,
};
-static struct msi_domain_info xilinx_msi_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY,
- .chip = &xilinx_msi_top_chip,
+static bool xilinx_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ chip->irq_ack = xilinx_msi_top_irq_ack;
+ return true;
+}
+
+#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+static const struct msi_parent_ops xilinx_msi_parent_ops = {
+ .required_flags = XILINX_MSI_FLAGS_REQUIRED,
+ .supported_flags = MSI_GENERIC_FLAGS_MASK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "xilinx-",
+ .init_dev_msi_info = xilinx_init_dev_msi_info,
};
static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
{
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS,
- &xilinx_msi_domain_ops, pcie);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &xilinx_msi_domain_ops,
+ .host_data = pcie,
+ .size = XILINX_NUM_MSI_IRQS,
+ };
+
+ pcie->msi_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops);
if (!pcie->msi_domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
@@ -295,10 +304,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
static void xilinx_free_msi_domains(struct xilinx_pcie *pcie)
{
- struct irq_domain *parent = pcie->msi_domain->parent;
-
irq_domain_remove(pcie->msi_domain);
- irq_domain_remove(parent);
}
/* INTx Functions */
@@ -394,7 +400,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- domain = pcie->msi_domain->parent;
+ domain = pcie->msi_domain;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig
index c0e14146d7e4..62120101139c 100644
--- a/drivers/pci/controller/plda/Kconfig
+++ b/drivers/pci/controller/plda/Kconfig
@@ -5,6 +5,7 @@ menu "PLDA-based PCIe controllers"
config PCIE_PLDA_HOST
bool
+ select IRQ_MSI_LIB
config PCIE_MICROCHIP_HOST
tristate "Microchip AXI PCIe controller"
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 3abedf723215..8e2db2e5b64b 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -11,6 +11,7 @@
#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/pci_regs.h>
@@ -134,42 +135,41 @@ static const struct irq_domain_ops msi_domain_ops = {
.free = plda_irq_msi_domain_free,
};
-static struct irq_chip plda_msi_irq_chip = {
- .name = "PLDA PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info plda_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &plda_msi_irq_chip,
+#define PLDA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+#define PLDA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static const struct msi_parent_ops plda_msi_parent_ops = {
+ .required_flags = PLDA_MSI_FLAGS_REQUIRED,
+ .supported_flags = PLDA_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "PLDA-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
{
struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct plda_msi *msi = &port->msi;
mutex_init(&port->msi.lock);
- msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &msi_domain_ops,
+ .host_data = port,
+ .size = msi->num_vectors,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &plda_msi_parent_ops);
if (!msi->dev_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &plda_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
-
return 0;
}
@@ -563,7 +563,6 @@ static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
- irq_domain_remove(pcie->msi.msi_domain);
irq_domain_remove(pcie->msi.dev_domain);
irq_domain_remove(pcie->intx_domain);
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
index 61ece26065ea..6b8665df7bf0 100644
--- a/drivers/pci/controller/plda/pcie-plda.h
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -164,7 +164,6 @@ struct plda_pcie_host_ops {
struct plda_msi {
struct mutex lock; /* Protect used bitmap */
- struct irq_domain *msi_domain;
struct irq_domain *dev_domain;
u32 num_vectors;
u64 vector_phy;
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
index e73c1b7bc8ef..3caf53c6c082 100644
--- a/drivers/pci/controller/plda/pcie-starfive.c
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -368,7 +368,7 @@ static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
* of 100ms following exit from a conventional reset before
* sending a configuration request to the device.
*/
- msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
if (starfive_pcie_host_wait_for_link(pcie))
dev_info(dev, "port link down\n");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 8df064b62a2f..1bd5bf4a6097 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -174,58 +175,52 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
}
-/*
- * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
- */
static void vmd_irq_enable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- unsigned long flags;
- raw_spin_lock_irqsave(&list_lock, flags);
- WARN_ON(vmdirq->enabled);
- list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
- vmdirq->enabled = true;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &list_lock) {
+ WARN_ON(vmdirq->enabled);
+ list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+ vmdirq->enabled = true;
+ }
+}
+static void vmd_pci_msi_enable(struct irq_data *data)
+{
+ vmd_irq_enable(data->parent_data);
data->chip->irq_unmask(data);
}
static void vmd_irq_disable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- unsigned long flags;
-
- data->chip->irq_mask(data);
- raw_spin_lock_irqsave(&list_lock, flags);
- if (vmdirq->enabled) {
- list_del_rcu(&vmdirq->node);
- vmdirq->enabled = false;
+ scoped_guard(raw_spinlock_irqsave, &list_lock) {
+ if (vmdirq->enabled) {
+ list_del_rcu(&vmdirq->node);
+ vmdirq->enabled = false;
+ }
}
- raw_spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void vmd_pci_msi_disable(struct irq_data *data)
+{
+ data->chip->irq_mask(data);
+ vmd_irq_disable(data->parent_data);
}
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
- .irq_enable = vmd_irq_enable,
- .irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
};
-static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return 0;
-}
-
/*
* XXX: We can be even smarter selecting the best IRQ once we solve the
* affinity problem.
*/
static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
{
- unsigned long flags;
int i, best;
if (vmd->msix_count == 1 + vmd->first_vec)
@@ -242,113 +237,128 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
return &vmd->irqs[vmd->first_vec];
}
- raw_spin_lock_irqsave(&list_lock, flags);
- best = vmd->first_vec + 1;
- for (i = best; i < vmd->msix_count; i++)
- if (vmd->irqs[i].count < vmd->irqs[best].count)
- best = i;
- vmd->irqs[best].count++;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ scoped_guard(raw_spinlock_irq, &list_lock) {
+ best = vmd->first_vec + 1;
+ for (i = best; i < vmd->msix_count; i++)
+ if (vmd->irqs[i].count < vmd->irqs[best].count)
+ best = i;
+ vmd->irqs[best].count++;
+ }
return &vmd->irqs[best];
}
-static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int virq, irq_hw_number_t hwirq,
- msi_alloc_info_t *arg)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+
+static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
- struct msi_desc *desc = arg->desc;
- struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
- struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ struct msi_desc *desc = ((msi_alloc_info_t *)arg)->desc;
+ struct vmd_dev *vmd = domain->host_data;
+ struct vmd_irq *vmdirq;
- if (!vmdirq)
- return -ENOMEM;
+ for (int i = 0; i < nr_irqs; ++i) {
+ vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ if (!vmdirq) {
+ vmd_msi_free(domain, virq, i);
+ return -ENOMEM;
+ }
- INIT_LIST_HEAD(&vmdirq->node);
- vmdirq->irq = vmd_next_irq(vmd, desc);
- vmdirq->virq = virq;
+ INIT_LIST_HEAD(&vmdirq->node);
+ vmdirq->irq = vmd_next_irq(vmd, desc);
+ vmdirq->virq = virq + i;
+
+ irq_domain_set_info(domain, virq + i, vmdirq->irq->virq,
+ &vmd_msi_controller, vmdirq,
+ handle_untracked_irq, vmd, NULL);
+ }
- irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
- handle_untracked_irq, vmd, NULL);
return 0;
}
-static void vmd_msi_free(struct irq_domain *domain,
- struct msi_domain_info *info, unsigned int virq)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
- struct vmd_irq *vmdirq = irq_get_chip_data(virq);
- unsigned long flags;
-
- synchronize_srcu(&vmdirq->irq->srcu);
-
- /* XXX: Potential optimization to rebalance */
- raw_spin_lock_irqsave(&list_lock, flags);
- vmdirq->irq->count--;
- raw_spin_unlock_irqrestore(&list_lock, flags);
-
- kfree(vmdirq);
-}
+ struct irq_data *irq_data;
+ struct vmd_irq *vmdirq;
-static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *arg)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+ for (int i = 0; i < nr_irqs; ++i) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+ vmdirq = irq_data->chip_data;
- if (nvec > vmd->msix_count)
- return vmd->msix_count;
+ synchronize_srcu(&vmdirq->irq->srcu);
- memset(arg, 0, sizeof(*arg));
- return 0;
-}
+ /* XXX: Potential optimization to rebalance */
+ scoped_guard(raw_spinlock_irq, &list_lock)
+ vmdirq->irq->count--;
-static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
-{
- arg->desc = desc;
+ kfree(vmdirq);
+ }
}
-static struct msi_domain_ops vmd_msi_domain_ops = {
- .get_hwirq = vmd_get_hwirq,
- .msi_init = vmd_msi_init,
- .msi_free = vmd_msi_free,
- .msi_prepare = vmd_msi_prepare,
- .set_desc = vmd_set_desc,
-};
-
-static struct msi_domain_info vmd_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .ops = &vmd_msi_domain_ops,
- .chip = &vmd_msi_controller,
+static const struct irq_domain_ops vmd_msi_domain_ops = {
+ .alloc = vmd_msi_alloc,
+ .free = vmd_msi_free,
};
-static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
{
- u16 reg;
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
- pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
- reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
- (reg | VMCONFIG_MSI_REMAP);
- pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+ info->chip->irq_enable = vmd_pci_msi_enable;
+ info->chip->irq_disable = vmd_pci_msi_disable;
+ return true;
}
+#define VMD_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX)
+#define VMD_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_NO_AFFINITY)
+
+static const struct msi_parent_ops vmd_msi_parent_ops = {
+ .supported_flags = VMD_MSI_FLAGS_SUPPORTED,
+ .required_flags = VMD_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_VMD_MSI,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .prefix = "VMD-",
+ .init_dev_msi_info = vmd_init_dev_msi_info,
+};
+
static int vmd_create_irq_domain(struct vmd_dev *vmd)
{
- struct fwnode_handle *fn;
+ struct irq_domain_info info = {
+ .size = vmd->msix_count,
+ .ops = &vmd_msi_domain_ops,
+ .host_data = vmd,
+ };
- fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
- if (!fn)
+ info.fwnode = irq_domain_alloc_named_id_fwnode("VMD-MSI",
+ vmd->sysdata.domain);
+ if (!info.fwnode)
return -ENODEV;
- vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
+ vmd->irq_domain = msi_create_parent_irq_domain(&info,
+ &vmd_msi_parent_ops);
if (!vmd->irq_domain) {
- irq_domain_free_fwnode(fn);
+ irq_domain_free_fwnode(info.fwnode);
return -ENODEV;
}
return 0;
}
+static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+{
+ u16 reg;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
+ reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
+ (reg | VMCONFIG_MSI_REMAP);
+ pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+}
+
static void vmd_remove_irq_domain(struct vmd_dev *vmd)
{
/*
@@ -387,29 +397,24 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
{
struct vmd_dev *vmd = vmd_from_bus(bus);
void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
- unsigned long flags;
- int ret = 0;
if (!addr)
return -EFAULT;
- raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
+ guard(raw_spinlock_irqsave)(&vmd->cfg_lock);
switch (len) {
case 1:
*value = readb(addr);
- break;
+ return 0;
case 2:
*value = readw(addr);
- break;
+ return 0;
case 4:
*value = readl(addr);
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
- return ret;
}
/*
@@ -422,32 +427,27 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
{
struct vmd_dev *vmd = vmd_from_bus(bus);
void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
- unsigned long flags;
- int ret = 0;
if (!addr)
return -EFAULT;
- raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
+ guard(raw_spinlock_irqsave)(&vmd->cfg_lock);
switch (len) {
case 1:
writeb(value, addr);
readb(addr);
- break;
+ return 0;
case 2:
writew(value, addr);
readw(addr);
- break;
+ return 0;
case 4:
writel(value, addr);
readl(addr);
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
- return ret;
}
static struct pci_ops vmd_ops = {
@@ -889,12 +889,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
ret = vmd_create_irq_domain(vmd);
if (ret)
return ret;
-
- /*
- * Override the IRQ domain bus token so the domain can be
- * distinguished from a regular PCI/MSI domain.
- */
- irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
} else {
vmd_set_msi_remapping(vmd, false);
}
@@ -1129,6 +1123,8 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, 0xb06f),
.driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb07f),
+ .driver_data = VMD_FEATS_CLIENT,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index 1c5d82eb57d4..8dad291be8b8 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -28,6 +28,14 @@ config PCI_ENDPOINT_CONFIGFS
configure the endpoint function and used to bind the
function with an endpoint controller.
+config PCI_ENDPOINT_MSI_DOORBELL
+ bool "PCI Endpoint MSI Doorbell Support"
+ depends on PCI_ENDPOINT && GENERIC_MSI_IRQ
+ help
+ This enables the EP's MSI interrupt controller to function as a
+ doorbell. The RC can trigger doorbell in EP by writing data to a
+ dedicated BAR, which the EP maps to the controller's message address.
+
source "drivers/pci/endpoint/functions/Kconfig"
endmenu
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile
index 95b2fe47e3b0..b4869d52053a 100644
--- a/drivers/pci/endpoint/Makefile
+++ b/drivers/pci/endpoint/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o
obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\
pci-epc-mem.o functions/
+obj-$(CONFIG_PCI_ENDPOINT_MSI_DOORBELL) += pci-ep-msi.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 50eb4106369f..e091193bd8a8 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -11,12 +11,14 @@
#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#include <linux/random.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include <linux/pci-ep-msi.h>
#include <linux/pci_regs.h>
#define IRQ_TYPE_INTX 0
@@ -29,6 +31,8 @@
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
+#define COMMAND_ENABLE_DOORBELL BIT(6)
+#define COMMAND_DISABLE_DOORBELL BIT(7)
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
@@ -39,6 +43,11 @@
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
+#define STATUS_DOORBELL_SUCCESS BIT(9)
+#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
+#define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
+#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
+#define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
#define FLAG_USE_DMA BIT(0)
@@ -66,6 +75,7 @@ struct pci_epf_test {
bool dma_supported;
bool dma_private;
const struct pci_epc_features *epc_features;
+ struct pci_epf_bar db_bar;
};
struct pci_epf_test_reg {
@@ -80,6 +90,9 @@ struct pci_epf_test_reg {
__le32 irq_number;
__le32 flags;
__le32 caps;
+ __le32 doorbell_bar;
+ __le32 doorbell_offset;
+ __le32 doorbell_data;
} __packed;
static struct pci_epf_header test_header = {
@@ -667,6 +680,115 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
}
}
+static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
+{
+ struct pci_epf_test *epf_test = data;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ u32 status = le32_to_cpu(reg->status);
+
+ status |= STATUS_DOORBELL_SUCCESS;
+ reg->status = cpu_to_le32(status);
+ pci_epf_test_raise_irq(epf_test, reg);
+
+ return IRQ_HANDLED;
+}
+
+static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
+{
+ struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
+ struct pci_epf *epf = epf_test->epf;
+
+ free_irq(epf->db_msg[0].virq, epf_test);
+ reg->doorbell_bar = cpu_to_le32(NO_BAR);
+
+ pci_epf_free_doorbell(epf);
+}
+
+static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ u32 status = le32_to_cpu(reg->status);
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+ struct msi_msg *msg;
+ enum pci_barno bar;
+ size_t offset;
+ int ret;
+
+ ret = pci_epf_alloc_doorbell(epf, 1);
+ if (ret)
+ goto set_status_err;
+
+ msg = &epf->db_msg[0].msg;
+ bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
+ if (bar < BAR_0)
+ goto err_doorbell_cleanup;
+
+ ret = request_irq(epf->db_msg[0].virq, pci_epf_test_doorbell_handler, 0,
+ "pci-ep-test-doorbell", epf_test);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to request doorbell IRQ: %d\n",
+ epf->db_msg[0].virq);
+ goto err_doorbell_cleanup;
+ }
+
+ reg->doorbell_data = cpu_to_le32(msg->data);
+ reg->doorbell_bar = cpu_to_le32(bar);
+
+ msg = &epf->db_msg[0].msg;
+ ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
+ &epf_test->db_bar.phys_addr, &offset);
+
+ if (ret)
+ goto err_doorbell_cleanup;
+
+ reg->doorbell_offset = cpu_to_le32(offset);
+
+ epf_test->db_bar.barno = bar;
+ epf_test->db_bar.size = epf->bar[bar].size;
+ epf_test->db_bar.flags = epf->bar[bar].flags;
+
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+ if (ret)
+ goto err_doorbell_cleanup;
+
+ status |= STATUS_DOORBELL_ENABLE_SUCCESS;
+ reg->status = cpu_to_le32(status);
+ return;
+
+err_doorbell_cleanup:
+ pci_epf_test_doorbell_cleanup(epf_test);
+set_status_err:
+ status |= STATUS_DOORBELL_ENABLE_FAIL;
+ reg->status = cpu_to_le32(status);
+}
+
+static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
+ u32 status = le32_to_cpu(reg->status);
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+
+ if (bar < BAR_0)
+ goto set_status_err;
+
+ pci_epf_test_doorbell_cleanup(epf_test);
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+
+ status |= STATUS_DOORBELL_DISABLE_SUCCESS;
+ reg->status = cpu_to_le32(status);
+
+ return;
+
+set_status_err:
+ status |= STATUS_DOORBELL_DISABLE_FAIL;
+ reg->status = cpu_to_le32(status);
+}
+
static void pci_epf_test_cmd_handler(struct work_struct *work)
{
u32 command;
@@ -714,6 +836,14 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
pci_epf_test_copy(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg);
break;
+ case COMMAND_ENABLE_DOORBELL:
+ pci_epf_test_enable_doorbell(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
+ case COMMAND_DISABLE_DOORBELL:
+ pci_epf_test_disable_doorbell(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
default:
dev_err(dev, "Invalid command 0x%x\n", command);
break;
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index e4da3fdb0007..83e9ab10f9c4 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -70,9 +70,11 @@ static struct workqueue_struct *kpcintb_workqueue;
enum epf_ntb_bar {
BAR_CONFIG,
BAR_DB,
- BAR_MW0,
BAR_MW1,
BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+ VNTB_BAR_NUM,
};
/*
@@ -132,7 +134,7 @@ struct epf_ntb {
bool linkup;
u32 spad_size;
- enum pci_barno epf_ntb_bar[6];
+ enum pci_barno epf_ntb_bar[VNTB_BAR_NUM];
struct epf_ntb_ctrl *reg;
@@ -510,7 +512,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
struct device *dev = &ntb->epf->dev;
int ret;
struct pci_epf_bar *epf_bar;
- void __iomem *mw_addr;
+ void *mw_addr;
enum pci_barno barno;
size_t size = sizeof(u32) * ntb->db_count;
@@ -576,7 +578,7 @@ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
for (i = 0; i < ntb->num_mws; i++) {
size = ntb->mws_size[i];
- barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + i];
ntb->epf->bar[barno].barno = barno;
ntb->epf->bar[barno].size = size;
@@ -629,7 +631,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
int i;
for (i = 0; i < num_mws; i++) {
- barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + i];
pci_epc_clear_bar(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
@@ -654,6 +656,63 @@ static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
pci_epc_put(ntb->epf->epc);
}
+
+/**
+ * epf_ntb_is_bar_used() - Check if a bar is used in the ntb configuration
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @barno: Checked bar number
+ *
+ * Returns: true if used, false if free.
+ */
+static bool epf_ntb_is_bar_used(struct epf_ntb *ntb,
+ enum pci_barno barno)
+{
+ int i;
+
+ for (i = 0; i < VNTB_BAR_NUM; i++) {
+ if (ntb->epf_ntb_bar[i] == barno)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * epf_ntb_find_bar() - Assign BAR number when no configuration is provided
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @epc_features: The features provided by the EPC specific to this EPF
+ * @bar: NTB BAR index
+ * @barno: Bar start index
+ *
+ * When the BAR configuration was not provided through the userspace
+ * configuration, automatically assign BAR as it has been historically
+ * done by this endpoint function.
+ *
+ * Returns: the BAR number found, if any. -1 otherwise
+ */
+static int epf_ntb_find_bar(struct epf_ntb *ntb,
+ const struct pci_epc_features *epc_features,
+ enum epf_ntb_bar bar,
+ enum pci_barno barno)
+{
+ while (ntb->epf_ntb_bar[bar] < 0) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0)
+ break; /* No more BAR available */
+
+ /*
+ * Verify if the BAR found is not already assigned
+ * through the provided configuration
+ */
+ if (!epf_ntb_is_bar_used(ntb, barno))
+ ntb->epf_ntb_bar[bar] = barno;
+
+ barno += 1;
+ }
+
+ return barno;
+}
+
/**
* epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
* constructs (scratchpad region, doorbell, memorywindow)
@@ -676,23 +735,21 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
/* These are required BARs which are mandatory for NTB functionality */
- for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
- barno = pci_epc_get_next_free_bar(epc_features, barno);
+ for (bar = BAR_CONFIG; bar <= BAR_MW1; bar++) {
+ barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
if (barno < 0) {
dev_err(dev, "Fail to get NTB function BAR\n");
- return barno;
+ return -ENOENT;
}
- ntb->epf_ntb_bar[bar] = barno;
}
/* These are optional BARs which don't impact NTB functionality */
- for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
- barno = pci_epc_get_next_free_bar(epc_features, barno);
+ for (bar = BAR_MW1, i = 1; i < num_mws; bar++, i++) {
+ barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
if (barno < 0) {
ntb->num_mws = i;
dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
}
- ntb->epf_ntb_bar[bar] = barno;
}
return 0;
@@ -860,6 +917,37 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
return len; \
}
+#define EPF_NTB_BAR_R(_name, _id) \
+ static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+ { \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->epf_ntb_bar[_id]); \
+ }
+
+#define EPF_NTB_BAR_W(_name, _id) \
+ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+ { \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ int val; \
+ int ret; \
+ \
+ ret = kstrtoint(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (val < NO_BAR || val > BAR_5) \
+ return -EINVAL; \
+ \
+ ntb->epf_ntb_bar[_id] = val; \
+ \
+ return len; \
+ }
+
static ssize_t epf_ntb_num_mws_store(struct config_item *item,
const char *page, size_t len)
{
@@ -899,6 +987,18 @@ EPF_NTB_MW_R(mw3)
EPF_NTB_MW_W(mw3)
EPF_NTB_MW_R(mw4)
EPF_NTB_MW_W(mw4)
+EPF_NTB_BAR_R(ctrl_bar, BAR_CONFIG)
+EPF_NTB_BAR_W(ctrl_bar, BAR_CONFIG)
+EPF_NTB_BAR_R(db_bar, BAR_DB)
+EPF_NTB_BAR_W(db_bar, BAR_DB)
+EPF_NTB_BAR_R(mw1_bar, BAR_MW1)
+EPF_NTB_BAR_W(mw1_bar, BAR_MW1)
+EPF_NTB_BAR_R(mw2_bar, BAR_MW2)
+EPF_NTB_BAR_W(mw2_bar, BAR_MW2)
+EPF_NTB_BAR_R(mw3_bar, BAR_MW3)
+EPF_NTB_BAR_W(mw3_bar, BAR_MW3)
+EPF_NTB_BAR_R(mw4_bar, BAR_MW4)
+EPF_NTB_BAR_W(mw4_bar, BAR_MW4)
CONFIGFS_ATTR(epf_ntb_, spad_count);
CONFIGFS_ATTR(epf_ntb_, db_count);
@@ -910,6 +1010,12 @@ CONFIGFS_ATTR(epf_ntb_, mw4);
CONFIGFS_ATTR(epf_ntb_, vbus_number);
CONFIGFS_ATTR(epf_ntb_, vntb_pid);
CONFIGFS_ATTR(epf_ntb_, vntb_vid);
+CONFIGFS_ATTR(epf_ntb_, ctrl_bar);
+CONFIGFS_ATTR(epf_ntb_, db_bar);
+CONFIGFS_ATTR(epf_ntb_, mw1_bar);
+CONFIGFS_ATTR(epf_ntb_, mw2_bar);
+CONFIGFS_ATTR(epf_ntb_, mw3_bar);
+CONFIGFS_ATTR(epf_ntb_, mw4_bar);
static struct configfs_attribute *epf_ntb_attrs[] = {
&epf_ntb_attr_spad_count,
@@ -922,6 +1028,12 @@ static struct configfs_attribute *epf_ntb_attrs[] = {
&epf_ntb_attr_vbus_number,
&epf_ntb_attr_vntb_pid,
&epf_ntb_attr_vntb_vid,
+ &epf_ntb_attr_ctrl_bar,
+ &epf_ntb_attr_db_bar,
+ &epf_ntb_attr_mw1_bar,
+ &epf_ntb_attr_mw2_bar,
+ &epf_ntb_attr_mw3_bar,
+ &epf_ntb_attr_mw4_bar,
NULL,
};
@@ -1048,7 +1160,7 @@ static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
struct device *dev;
dev = &ntb->ntb.dev;
- barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + idx];
epf_bar = &ntb->epf->bar[barno];
epf_bar->phys_addr = addr;
epf_bar->barno = barno;
@@ -1379,6 +1491,7 @@ static int epf_ntb_probe(struct pci_epf *epf,
{
struct epf_ntb *ntb;
struct device *dev;
+ int i;
dev = &epf->dev;
@@ -1389,6 +1502,11 @@ static int epf_ntb_probe(struct pci_epf *epf,
epf->header = &epf_ntb_header;
ntb->epf = epf;
ntb->vbus_number = 0xff;
+
+ /* Initially, no bar is assigned */
+ for (i = 0; i < VNTB_BAR_NUM; i++)
+ ntb->epf_ntb_bar[i] = NO_BAR;
+
epf_set_drvdata(epf, ntb);
dev_info(dev, "pci-ep epf driver loaded\n");
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index d712c7a866d2..ef50c82e647f 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -691,6 +691,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group)
if (IS_ERR_OR_NULL(group))
return;
+ list_del(&group->group_entry);
configfs_unregister_default_group(group);
}
EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
new file mode 100644
index 000000000000..9ca89cbfec15
--- /dev/null
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Endpoint *Controller* (EPC) MSI library
+ *
+ * Copyright (C) 2025 NXP
+ * Author: Frank Li <Frank.Li@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+#include <linux/pci-ep-msi.h>
+#include <linux/slab.h>
+
+static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ epc = pci_epc_get(dev_name(msi_desc_to_dev(desc)));
+ if (!epc)
+ return;
+
+ epf = list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list);
+
+ if (epf && epf->db_msg && desc->msi_index < epf->num_db)
+ memcpy(&epf->db_msg[desc->msi_index].msg, msg, sizeof(*msg));
+
+ pci_epc_put(epc);
+}
+
+int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
+{
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ struct irq_domain *domain;
+ void *msg;
+ int ret;
+ int i;
+
+ /* TODO: Multi-EPF support */
+ if (list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list) != epf) {
+ dev_err(dev, "MSI doorbell doesn't support multiple EPF\n");
+ return -EINVAL;
+ }
+
+ domain = of_msi_map_get_device_domain(epc->dev.parent, 0,
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (!domain) {
+ dev_err(dev, "Can't find MSI domain for EPC\n");
+ return -ENODEV;
+ }
+
+ if (!irq_domain_is_msi_parent(domain))
+ return -ENODEV;
+
+ if (!irq_domain_is_msi_immutable(domain)) {
+ dev_err(dev, "Mutable MSI controller not supported\n");
+ return -ENODEV;
+ }
+
+ dev_set_msi_domain(epc->dev.parent, domain);
+
+ msg = kcalloc(num_db, sizeof(struct pci_epf_doorbell_msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ epf->num_db = num_db;
+ epf->db_msg = msg;
+
+ ret = platform_device_msi_init_and_alloc_irqs(epc->dev.parent, num_db,
+ pci_epf_write_msi_msg);
+ if (ret) {
+ dev_err(dev, "Failed to allocate MSI\n");
+ kfree(msg);
+ return ret;
+ }
+
+ for (i = 0; i < num_db; i++)
+ epf->db_msg[i].virq = msi_get_virq(epc->dev.parent, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epf_alloc_doorbell);
+
+void pci_epf_free_doorbell(struct pci_epf *epf)
+{
+ platform_device_msi_free_irqs_all(epf->epc->dev.parent);
+
+ kfree(epf->db_msg);
+ epf->db_msg = NULL;
+ epf->num_db = 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_free_doorbell);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 577a9e490115..d54e18872aef 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -338,7 +338,7 @@ static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
mutex_lock(&pci_epf_mutex);
list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
pci_ep_cfs_remove_epf_group(group);
- list_del(&driver->epf_group);
+ WARN_ON(!list_empty(&driver->epf_group));
mutex_unlock(&pci_epf_mutex);
}
@@ -477,6 +477,44 @@ struct pci_epf *pci_epf_create(const char *name)
}
EXPORT_SYMBOL_GPL(pci_epf_create);
+/**
+ * pci_epf_align_inbound_addr() - Align the given address based on the BAR
+ * alignment requirement
+ * @epf: the EPF device
+ * @addr: inbound address to be aligned
+ * @bar: the BAR number corresponding to the given addr
+ * @base: base address matching the @bar alignment requirement
+ * @off: offset to be added to the @base address
+ *
+ * Helper function to align input @addr based on BAR's alignment requirement.
+ * The aligned base address and offset are returned via @base and @off.
+ *
+ * NOTE: The pci_epf_alloc_space() function already accounts for alignment.
+ * This API is primarily intended for use with other memory regions not
+ * allocated by pci_epf_alloc_space(), such as peripheral register spaces or
+ * the message address of a platform MSI controller.
+ *
+ * Return: 0 on success, errno otherwise.
+ */
+int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar,
+ u64 addr, dma_addr_t *base, size_t *off)
+{
+ /*
+ * Most EP controllers require the BAR start address to be aligned to
+ * the BAR size, because they mask off the lower bits.
+ *
+ * Alignment to BAR size also works for controllers that support
+ * unaligned addresses.
+ */
+ u64 align = epf->bar[bar].size;
+
+ *base = round_down(addr, align);
+ *off = addr & (align - 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_align_inbound_addr);
+
static void pci_epf_dev_release(struct device *dev)
{
struct pci_epf *epf = to_pci_epf(dev);
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index 92e6e20e8595..7397374af171 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -2,10 +2,6 @@ Contributions are solicited in particular to remedy the following issues:
cpcihp:
-* There are no implementations of the ->hardware_test, ->get_power and
- ->set_power callbacks in struct cpci_hp_controller_ops. Why were they
- introduced? Can they be removed from the struct?
-
* Returned code from pci_hp_add_bridge() is not checked.
cpqphp:
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 91d2d92717d9..bcc51b26d03d 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -995,7 +995,7 @@ static inline int pcie_hotplug_depth(struct pci_dev *dev)
while (bus->parent) {
bus = bus->parent;
- if (bus->self && bus->self->is_hotplug_bridge)
+ if (bus->self && bus->self->is_pciehp)
depth++;
}
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 573a41869c15..c5345bff9a55 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -3,12 +3,15 @@
* PCI Hotplug Driver for PowerPC PowerNV platform.
*
* Copyright Gavin Shan, IBM Corporation 2016.
+ * Copyright (C) 2025 Raptor Engineering, LLC
+ * Copyright (C) 2025 Raptor Computing Systems, LLC
*/
#include <linux/bitfield.h>
#include <linux/libfdt.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/pci_hotplug.h>
#include <linux/of_fdt.h>
@@ -36,8 +39,10 @@ static void pnv_php_register(struct device_node *dn);
static void pnv_php_unregister_one(struct device_node *dn);
static void pnv_php_unregister(struct device_node *dn);
+static void pnv_php_enable_irq(struct pnv_php_slot *php_slot);
+
static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
- bool disable_device)
+ bool disable_device, bool disable_msi)
{
struct pci_dev *pdev = php_slot->pdev;
u16 ctrl;
@@ -53,19 +58,15 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
php_slot->irq = 0;
}
- if (php_slot->wq) {
- destroy_workqueue(php_slot->wq);
- php_slot->wq = NULL;
- }
-
- if (disable_device) {
+ if (disable_device || disable_msi) {
if (pdev->msix_enabled)
pci_disable_msix(pdev);
else if (pdev->msi_enabled)
pci_disable_msi(pdev);
+ }
+ if (disable_device)
pci_disable_device(pdev);
- }
}
static void pnv_php_free_slot(struct kref *kref)
@@ -74,7 +75,8 @@ static void pnv_php_free_slot(struct kref *kref)
struct pnv_php_slot, kref);
WARN_ON(!list_empty(&php_slot->children));
- pnv_php_disable_irq(php_slot, false);
+ pnv_php_disable_irq(php_slot, false, false);
+ destroy_workqueue(php_slot->wq);
kfree(php_slot->name);
kfree(php_slot);
}
@@ -391,6 +393,20 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
return 0;
}
+static int pcie_check_link_active(struct pci_dev *pdev)
+{
+ u16 lnk_status;
+ int ret;
+
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
+ return -ENODEV;
+
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+
+ return ret;
+}
+
static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
@@ -403,6 +419,19 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
*/
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
if (ret >= 0) {
+ if (pci_pcie_type(php_slot->pdev) == PCI_EXP_TYPE_DOWNSTREAM &&
+ presence == OPAL_PCI_SLOT_EMPTY) {
+ /*
+ * Similar to pciehp_hpc, check whether the Link Active
+ * bit is set to account for broken downstream bridges
+ * that don't properly assert Presence Detect State, as
+ * was observed on the Microsemi Switchtec PM8533 PFX
+ * [11f8:8533].
+ */
+ if (pcie_check_link_active(php_slot->pdev) > 0)
+ presence = OPAL_PCI_SLOT_PRESENT;
+ }
+
*state = presence;
ret = 0;
} else {
@@ -412,10 +441,23 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
return ret;
}
+static int pnv_php_get_raw_indicator_status(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+ struct pci_dev *bridge = php_slot->pdev;
+ u16 status;
+
+ pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &status);
+ *state = (status & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
+ return 0;
+}
+
+
static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+ pnv_php_get_raw_indicator_status(slot, &php_slot->attention_state);
*state = php_slot->attention_state;
return 0;
}
@@ -433,7 +475,7 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
mask = PCI_EXP_SLTCTL_AIC;
if (state)
- new = PCI_EXP_SLTCTL_ATTN_IND_ON;
+ new = FIELD_PREP(PCI_EXP_SLTCTL_AIC, state);
else
new = PCI_EXP_SLTCTL_ATTN_IND_OFF;
@@ -442,6 +484,61 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
return 0;
}
+static int pnv_php_activate_slot(struct pnv_php_slot *php_slot,
+ struct hotplug_slot *slot)
+{
+ int ret, i;
+
+ /*
+ * Issue initial slot activation command to firmware
+ *
+ * Firmware will power slot on, attempt to train the link, and
+ * discover any downstream devices. If this process fails, firmware
+ * will return an error code and an invalid device tree. Failure
+ * can be caused for multiple reasons, including a faulty
+ * downstream device, poor connection to the downstream device, or
+ * a previously latched PHB fence. On failure, issue fundamental
+ * reset up to three times before aborting.
+ */
+ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
+ if (ret) {
+ SLOT_WARN(
+ php_slot,
+ "PCI slot activation failed with error code %d, possible frozen PHB",
+ ret);
+ SLOT_WARN(
+ php_slot,
+ "Attempting complete PHB reset before retrying slot activation\n");
+ for (i = 0; i < 3; i++) {
+ /*
+ * Slot activation failed, PHB may be fenced from a
+ * prior device failure.
+ *
+ * Use the OPAL fundamental reset call to both try a
+ * device reset and clear any potentially active PHB
+ * fence / freeze.
+ */
+ SLOT_WARN(php_slot, "Try %d...\n", i + 1);
+ pci_set_pcie_reset_state(php_slot->pdev,
+ pcie_warm_reset);
+ msleep(250);
+ pci_set_pcie_reset_state(php_slot->pdev,
+ pcie_deassert_reset);
+
+ ret = pnv_php_set_slot_power_state(
+ slot, OPAL_PCI_SLOT_POWER_ON);
+ if (!ret)
+ break;
+ }
+
+ if (i >= 3)
+ SLOT_WARN(php_slot,
+ "Failed to bring slot online, aborting!\n");
+ }
+
+ return ret;
+}
+
static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
{
struct hotplug_slot *slot = &php_slot->slot;
@@ -504,7 +601,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
goto scan;
/* Power is off, turn it on and then scan the slot */
- ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
+ ret = pnv_php_activate_slot(php_slot, slot);
if (ret)
return ret;
@@ -561,8 +658,58 @@ static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe)
static int pnv_php_enable_slot(struct hotplug_slot *slot)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+ u32 prop32;
+ int ret;
+
+ ret = pnv_php_enable(php_slot, true);
+ if (ret)
+ return ret;
+
+ /* (Re-)enable interrupt if the slot supports surprise hotplug */
+ ret = of_property_read_u32(php_slot->dn, "ibm,slot-surprise-pluggable",
+ &prop32);
+ if (!ret && prop32)
+ pnv_php_enable_irq(php_slot);
+
+ return 0;
+}
+
+/*
+ * Disable any hotplug interrupts for all slots on the provided bus, as well as
+ * all downstream slots in preparation for a hot unplug.
+ */
+static int pnv_php_disable_all_irqs(struct pci_bus *bus)
+{
+ struct pci_bus *child_bus;
+ struct pci_slot *slot;
+
+ /* First go down child buses */
+ list_for_each_entry(child_bus, &bus->children, node)
+ pnv_php_disable_all_irqs(child_bus);
+
+ /* Disable IRQs for all pnv_php slots on this bus */
+ list_for_each_entry(slot, &bus->slots, list) {
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot->hotplug);
- return pnv_php_enable(php_slot, true);
+ pnv_php_disable_irq(php_slot, false, true);
+ }
+
+ return 0;
+}
+
+/*
+ * Disable any hotplug interrupts for all downstream slots on the provided
+ * bus in preparation for a hot unplug.
+ */
+static int pnv_php_disable_all_downstream_irqs(struct pci_bus *bus)
+{
+ struct pci_bus *child_bus;
+
+ /* Go down child buses, recursively deactivating their IRQs */
+ list_for_each_entry(child_bus, &bus->children, node)
+ pnv_php_disable_all_irqs(child_bus);
+
+ return 0;
}
static int pnv_php_disable_slot(struct hotplug_slot *slot)
@@ -579,6 +726,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
php_slot->state != PNV_PHP_STATE_REGISTERED)
return 0;
+ /*
+ * Free all IRQ resources from all child slots before remove.
+ * Note that we do not disable the root slot IRQ here as that
+ * would also deactivate the slot hot (re)plug interrupt!
+ */
+ pnv_php_disable_all_downstream_irqs(php_slot->bus);
+
/* Remove all devices behind the slot */
pci_lock_rescan_remove();
pci_hp_remove_devices(php_slot->bus);
@@ -647,6 +801,15 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
return NULL;
}
+ /* Allocate workqueue for this slot's interrupt handling */
+ php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
+ if (!php_slot->wq) {
+ SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
+ kfree(php_slot->name);
+ kfree(php_slot);
+ return NULL;
+ }
+
if (dn->child && PCI_DN(dn->child))
php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
else
@@ -745,16 +908,63 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
return entry.vector;
}
+static void
+pnv_php_detect_clear_suprise_removal_freeze(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ int i, rc;
+
+ /*
+ * When a device is surprise removed from a downstream bridge slot,
+ * the upstream bridge port can still end up frozen due to related EEH
+ * events, which will in turn block the MSI interrupts for slot hotplug
+ * detection.
+ *
+ * Detect and thaw any frozen upstream PE after slot deactivation.
+ */
+ edev = pci_dev_to_eeh_dev(pdev);
+ pe = edev ? edev->pe : NULL;
+ rc = eeh_pe_get_state(pe);
+ if ((rc == -ENODEV) || (rc == -ENOENT)) {
+ SLOT_WARN(
+ php_slot,
+ "Upstream bridge PE state unknown, hotplug detect may fail\n");
+ } else {
+ if (pe->state & EEH_PE_ISOLATED) {
+ SLOT_WARN(
+ php_slot,
+ "Upstream bridge PE %02x frozen, thawing...\n",
+ pe->addr);
+ for (i = 0; i < 3; i++)
+ if (!eeh_unfreeze_pe(pe))
+ break;
+ if (i >= 3)
+ SLOT_WARN(
+ php_slot,
+ "Unable to thaw PE %02x, hotplug detect will fail!\n",
+ pe->addr);
+ else
+ SLOT_WARN(php_slot,
+ "PE %02x thawed successfully\n",
+ pe->addr);
+ }
+ }
+}
+
static void pnv_php_event_handler(struct work_struct *work)
{
struct pnv_php_event *event =
container_of(work, struct pnv_php_event, work);
struct pnv_php_slot *php_slot = event->php_slot;
- if (event->added)
+ if (event->added) {
pnv_php_enable_slot(&php_slot->slot);
- else
+ } else {
pnv_php_disable_slot(&php_slot->slot);
+ pnv_php_detect_clear_suprise_removal_freeze(php_slot);
+ }
kfree(event);
}
@@ -843,14 +1053,6 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
u16 sts, ctrl;
int ret;
- /* Allocate workqueue */
- php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
- if (!php_slot->wq) {
- SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
- pnv_php_disable_irq(php_slot, true);
- return;
- }
-
/* Check PDC (Presence Detection Change) is broken or not */
ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc",
&broken_pdc);
@@ -869,7 +1071,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
php_slot->name, php_slot);
if (ret) {
- pnv_php_disable_irq(php_slot, true);
+ pnv_php_disable_irq(php_slot, true, true);
SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq);
return;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 10693b5d7eb6..ac4375954c94 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -7,11 +7,16 @@
* Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/log2.h>
#include <linux/pci.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
+#include <asm/div64.h>
#include "pci.h"
#define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */
@@ -150,7 +155,28 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
if (!dev->is_physfn)
return 0;
- return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
+ return dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)];
+}
+
+void pci_iov_resource_set_size(struct pci_dev *dev, int resno,
+ resource_size_t size)
+{
+ if (!pci_resource_is_iov(resno)) {
+ pci_warn(dev, "%s is not an IOV resource\n",
+ pci_resource_name(dev, resno));
+ return;
+ }
+
+ dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)] = size;
+}
+
+bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_CTRL, &cmd);
+
+ return cmd & PCI_SRIOV_CTRL_MSE;
}
static void pci_read_vf_config_common(struct pci_dev *virtfn)
@@ -341,12 +367,14 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
virtfn->multifunction = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+
+ res = &dev->resource[idx];
if (!res->parent)
continue;
virtfn->resource[i].name = pci_name(virtfn);
virtfn->resource[i].flags = res->flags;
- size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ size = pci_iov_resource_size(dev, idx);
resource_set_range(&virtfn->resource[i],
res->start + size * id, size);
rc = request_resource(res, &virtfn->resource[i]);
@@ -643,8 +671,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- bars |= (1 << (i + PCI_IOV_RESOURCES));
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+ resource_size_t vf_bar_sz = pci_iov_resource_size(dev, idx);
+
+ bars |= (1 << idx);
+ res = &dev->resource[idx];
+ if (vf_bar_sz * nr_virtfn > resource_size(res))
+ continue;
if (res->parent)
nres++;
}
@@ -810,8 +843,10 @@ found:
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
- res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES);
+ int idx = pci_resource_num_from_vf_bar(i);
+
+ res = &dev->resource[idx];
+ res_name = pci_resource_name(dev, idx);
/*
* If it is already FIXED, don't change it, something
@@ -850,6 +885,7 @@ found:
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
+ iov->vf_rebar_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VF_REBAR);
if (pdev)
iov->dev = pci_dev_get(pdev);
@@ -869,7 +905,7 @@ fail_max_buses:
dev->is_physfn = 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ res = &dev->resource[pci_resource_num_from_vf_bar(i)];
res->flags = 0;
}
@@ -888,6 +924,30 @@ static void sriov_release(struct pci_dev *dev)
dev->sriov = NULL;
}
+static void sriov_restore_vf_rebar_state(struct pci_dev *dev)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pci_iov_vf_rebar_cap(dev);
+ if (!pos)
+ return;
+
+ pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
+ nbars = FIELD_GET(PCI_VF_REBAR_CTRL_NBAR_MASK, ctrl);
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ int bar_idx, size;
+
+ pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
+ bar_idx = FIELD_GET(PCI_VF_REBAR_CTRL_BAR_IDX, ctrl);
+ size = pci_rebar_bytes_to_size(dev->sriov->barsz[bar_idx]);
+ ctrl &= ~PCI_VF_REBAR_CTRL_BAR_SIZE;
+ ctrl |= FIELD_PREP(PCI_VF_REBAR_CTRL_BAR_SIZE, size);
+ pci_write_config_dword(dev, pos + PCI_VF_REBAR_CTRL, ctrl);
+ }
+}
+
static void sriov_restore_state(struct pci_dev *dev)
{
int i;
@@ -907,7 +967,7 @@ static void sriov_restore_state(struct pci_dev *dev)
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
- pci_update_resource(dev, i + PCI_IOV_RESOURCES);
+ pci_update_resource(dev, pci_resource_num_from_vf_bar(i));
pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
pci_iov_set_numvfs(dev, iov->num_VFs);
@@ -973,7 +1033,7 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
struct resource *res = pci_resource_n(dev, resno);
- int vf_bar = resno - PCI_IOV_RESOURCES;
+ int vf_bar = pci_resource_num_to_vf_bar(resno);
struct pci_bus_region region;
u16 cmd;
u32 new;
@@ -1047,8 +1107,10 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
*/
void pci_restore_iov_state(struct pci_dev *dev)
{
- if (dev->is_physfn)
+ if (dev->is_physfn) {
+ sriov_restore_vf_rebar_state(dev);
sriov_restore_state(dev);
+ }
}
/**
@@ -1255,3 +1317,72 @@ int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
return nr_virtfn;
}
EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
+
+/**
+ * pci_iov_vf_bar_set_size - set a new size for a VF BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @size: new size as defined in the spec (0=1MB, 31=128TB)
+ *
+ * Set the new size of a VF BAR that supports VF resizable BAR capability.
+ * Unlike pci_resize_resource(), this does not cause the resource that
+ * reserves the MMIO space (originally up to total_VFs) to be resized, which
+ * means that following calls to pci_enable_sriov() can fail if the resources
+ * no longer fit.
+ *
+ * Return: 0 on success, or negative on failure.
+ */
+int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
+{
+ u32 sizes;
+ int ret;
+
+ if (!pci_resource_is_iov(resno))
+ return -EINVAL;
+
+ if (pci_iov_is_memory_decoding_enabled(dev))
+ return -EBUSY;
+
+ sizes = pci_rebar_get_possible_sizes(dev, resno);
+ if (!sizes)
+ return -ENOTSUPP;
+
+ if (!(sizes & BIT(size)))
+ return -EINVAL;
+
+ ret = pci_rebar_set_size(dev, resno, size);
+ if (ret)
+ return ret;
+
+ pci_iov_resource_set_size(dev, resno, pci_rebar_size_to_bytes(size));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_iov_vf_bar_set_size);
+
+/**
+ * pci_iov_vf_bar_get_sizes - get VF BAR sizes allowing to create up to num_vfs
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @num_vfs: number of VFs
+ *
+ * Get the sizes of a VF resizable BAR that can accommodate @num_vfs within
+ * the currently assigned size of the resource @resno.
+ *
+ * Return: A bitmask of sizes in format defined in the spec (bit 0=1MB,
+ * bit 31=128TB).
+ */
+u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
+{
+ u64 vf_len = pci_resource_len(dev, resno);
+ u32 sizes;
+
+ if (!num_vfs)
+ return 0;
+
+ do_div(vf_len, num_vfs);
+ sizes = (roundup_pow_of_two(vf_len + 1) - 1) >> ilog2(SZ_1M);
+
+ return sizes & pci_rebar_get_possible_sizes(dev, resno);
+}
+EXPORT_SYMBOL_GPL(pci_iov_vf_bar_get_sizes);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 8d8de0ad2bb7..34d664139f48 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -943,7 +943,7 @@ int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
/*
* This is a horrible hack, but short of implementing a PCI
* specific interrupt chip callback and a huge pile of
- * infrastructure, this is the minor nuissance. It provides the
+ * infrastructure, this is the minor nuisance. It provides the
* protection against concurrent operations on this entry and keeps
* the control word cache in sync.
*/
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index af370628e583..ddb25960ea47 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -816,15 +816,10 @@ int pci_acpi_program_hp_params(struct pci_dev *dev)
bool pciehp_is_native(struct pci_dev *bridge)
{
const struct pci_host_bridge *host;
- u32 slot_cap;
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
- pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
- if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
- return false;
-
if (pcie_ports_native)
return true;
@@ -1002,7 +997,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
struct acpi_device *adev, *rpadev;
const union acpi_object *obj;
- if (acpi_pci_disabled || !dev->is_hotplug_bridge)
+ if (acpi_pci_disabled || !dev->is_pciehp)
return false;
adev = ACPI_COMPANION(&dev->dev);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index b853585cb1f8..63665240ae87 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1632,7 +1632,7 @@ static int pci_bus_num_vf(struct device *dev)
*/
static int pci_dma_configure(struct device *dev)
{
- struct pci_driver *driver = to_pci_driver(dev->driver);
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *bridge;
int ret = 0;
@@ -1649,8 +1649,8 @@ static int pci_dma_configure(struct device *dev)
pci_put_host_bridge_device(bridge);
- /* @driver may not be valid when we're called from the IOMMU layer */
- if (!ret && dev->driver && !driver->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_pci_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 9e42090fb108..b0f4d98036cd 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3030,8 +3030,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
* pci_bridge_d3_possible - Is it possible to put the bridge into D3
* @bridge: Bridge to check
*
- * This function checks if it is possible to move the bridge to D3.
* Currently we only allow D3 for some PCIe ports and for Thunderbolt.
+ *
+ * Return: Whether it is possible to move the bridge to D3.
+ *
+ * The return value is guaranteed to be constant across the entire lifetime
+ * of the bridge, including its hot-removal.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -3046,10 +3050,14 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * Hotplug ports handled by firmware in System Management Mode
- * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+ * Hotplug ports handled by platform firmware may not be put
+ * into D3 by the OS, e.g. ACPI slots ...
*/
- if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+ if (bridge->is_hotplug_bridge && !bridge->is_pciehp)
+ return false;
+
+ /* ... or PCIe hotplug ports not handled natively by the OS. */
+ if (bridge->is_pciehp && !pciehp_is_native(bridge))
return false;
if (pci_bridge_d3_force)
@@ -3068,7 +3076,7 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
* by vendors for runtime D3 at least until 2018 because there
* was no OS support.
*/
- if (bridge->is_hotplug_bridge)
+ if (bridge->is_pciehp)
return false;
if (dmi_check_system(bridge_d3_blacklist))
@@ -3205,7 +3213,6 @@ void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
void pci_pm_init(struct pci_dev *dev)
{
int pm;
- u16 status;
u16 pmc;
device_enable_async_suspend(&dev->dev);
@@ -3266,9 +3273,6 @@ void pci_pm_init(struct pci_dev *dev)
pci_pme_active(dev, false);
}
- pci_read_config_word(dev, PCI_STATUS, &status);
- if (status & PCI_STATUS_IMM_READY)
- dev->imm_ready = 1;
poweron:
pci_pm_power_up_and_verify_state(dev);
pm_runtime_forbid(&dev->dev);
@@ -3753,7 +3757,13 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pdev->rebar_cap;
+ if (pci_resource_is_iov(bar)) {
+ pos = pci_iov_vf_rebar_cap(pdev);
+ bar = pci_resource_num_to_vf_bar(bar);
+ } else {
+ pos = pdev->rebar_cap;
+ }
+
if (!pos)
return -ENOTSUPP;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 12215ee72afb..34f65d69662e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -36,13 +36,6 @@ struct pcie_tlp_log;
#define PCIE_T_PERST_CLK_US 100
/*
- * End of conventional reset (PERST# de-asserted) to first configuration
- * request (device able to respond with a "Request Retry Status" completion),
- * from PCIe r6.0, sec 6.6.1.
- */
-#define PCIE_T_RRS_READY_MS 100
-
-/*
* PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization>
* Recommends 1ms to 10ms timeout to check L2 ready.
*/
@@ -61,7 +54,11 @@ struct pcie_tlp_log;
* completes before sending a Configuration Request to the device
* immediately below that Port."
*/
-#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100
+#define PCIE_RESET_CONFIG_WAIT_MS 100
+
+/* Parameters for the waiting for link up routine */
+#define PCIE_LINK_WAIT_MAX_RETRIES 10
+#define PCIE_LINK_WAIT_SLEEP_MS 90
/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
#define PCIE_MSG_TYPE_R_RC 0
@@ -391,12 +388,14 @@ void pci_bus_put(struct pci_bus *bus);
#define PCIE_LNKCAP_SLS2SPEED(lnkcap) \
({ \
- ((lnkcap) == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
- (lnkcap) == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
+ u32 lnkcap_sls = (lnkcap) & PCI_EXP_LNKCAP_SLS; \
+ \
+ (lnkcap_sls == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
PCI_SPEED_UNKNOWN); \
})
@@ -411,13 +410,17 @@ void pci_bus_put(struct pci_bus *bus);
PCI_SPEED_UNKNOWN)
#define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \
- ((lnkctl2) == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \
- (lnkctl2) == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \
- PCI_SPEED_UNKNOWN)
+({ \
+ u16 lnkctl2_tls = (lnkctl2) & PCI_EXP_LNKCTL2_TLS; \
+ \
+ (lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \
+ PCI_SPEED_UNKNOWN); \
+})
/* PCIe speed to Mb/s reduced by encoding overhead */
#define PCIE_SPEED2MBS_ENC(speed) \
@@ -486,6 +489,7 @@ struct pci_sriov {
u16 subsystem_vendor; /* VF subsystem vendor */
u16 subsystem_device; /* VF subsystem device */
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
+ u16 vf_rebar_cap; /* VF Resizable BAR capability offset */
bool drivers_autoprobe; /* Auto probing of VFs by driver */
};
@@ -710,10 +714,28 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
+void pci_iov_resource_set_size(struct pci_dev *dev, int resno,
+ resource_size_t size);
+bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev);
+static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev)
+{
+ if (!dev->is_physfn)
+ return 0;
+
+ return dev->sriov->vf_rebar_cap;
+}
static inline bool pci_resource_is_iov(int resno)
{
return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END;
}
+static inline int pci_resource_num_from_vf_bar(int resno)
+{
+ return resno + PCI_IOV_RESOURCES;
+}
+static inline int pci_resource_num_to_vf_bar(int resno)
+{
+ return resno - PCI_IOV_RESOURCES;
+}
extern const struct attribute_group sriov_pf_dev_attr_group;
extern const struct attribute_group sriov_vf_dev_attr_group;
#else
@@ -734,10 +756,30 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
{
return 0;
}
+static inline void pci_iov_resource_set_size(struct pci_dev *dev, int resno,
+ resource_size_t size) { }
+static inline bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev)
+{
+ return false;
+}
+static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev)
+{
+ return 0;
+}
static inline bool pci_resource_is_iov(int resno)
{
return false;
}
+static inline int pci_resource_num_from_vf_bar(int resno)
+{
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+}
+static inline int pci_resource_num_to_vf_bar(int resno)
+{
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+}
#endif /* CONFIG_PCI_IOV */
#ifdef CONFIG_PCIE_TPH
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 70ac66188367..e286c197d716 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -116,12 +116,12 @@ struct aer_info {
PCI_ERR_ROOT_MULTI_COR_RCV | \
PCI_ERR_ROOT_MULTI_UNCOR_RCV)
-static int pcie_aer_disable;
+static bool pcie_aer_disable;
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
void pci_no_aer(void)
{
- pcie_aer_disable = 1;
+ pcie_aer_disable = true;
}
bool pci_aer_available(void)
@@ -1039,7 +1039,8 @@ static int find_device_iter(struct pci_dev *dev, void *data)
/* List this device */
if (add_error_device(e_info, dev)) {
/* We cannot handle more... Stop iteration */
- /* TODO: Should print error message here? */
+ pci_err(dev, "Exceeded max supported (%d) devices with errors logged\n",
+ AER_MAX_MULTI_ERR_DEVICES);
return 1;
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 29fcb0689a91..919a05b97647 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -245,7 +245,7 @@ struct pcie_link_state {
u32 clkpm_disable:1; /* Clock PM disabled */
};
-static int aspm_disabled, aspm_force;
+static bool aspm_disabled, aspm_force;
static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
@@ -884,10 +884,9 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Configure the ASPM L1 substates. Caller must disable L1 first. */
static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
{
- u32 val;
+ u32 val = 0;
struct pci_dev *child = link->downstream, *parent = link->pdev;
- val = 0;
if (state & PCIE_LINK_STATE_L1_1)
val |= PCI_L1SS_CTL1_ASPM_L1_1;
if (state & PCIE_LINK_STATE_L1_2)
@@ -1712,11 +1711,11 @@ static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
aspm_policy = POLICY_DEFAULT;
- aspm_disabled = 1;
+ aspm_disabled = true;
aspm_support_enabled = false;
pr_info("PCIe ASPM is disabled\n");
} else if (!strcmp(str, "force")) {
- aspm_force = 1;
+ aspm_force = true;
pr_info("PCIe ASPM is forcibly enabled\n");
}
return 1;
@@ -1734,7 +1733,7 @@ void pcie_no_aspm(void)
*/
if (!aspm_force) {
aspm_policy = POLICY_DEFAULT;
- aspm_disabled = 1;
+ aspm_disabled = true;
}
}
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index e8318fd5f6ed..d1b68c18444f 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -220,7 +220,7 @@ static int get_port_device_capability(struct pci_dev *dev)
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
int services = 0;
- if (dev->is_hotplug_bridge &&
+ if (dev->is_pciehp &&
(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) &&
(pcie_ports_native || host->native_pcie_hotplug)) {
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 4bd73f038ffb..65e4b008be00 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -507,7 +507,7 @@ struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
if (!ops->check_capability)
return NULL;
- /* Check for PTM capability before creating debugfs attrbutes */
+ /* Check for PTM capability before creating debugfs attributes */
ret = ops->check_capability(pdata);
if (!ret) {
dev_dbg(dev, "PTM capability not present\n");
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e6a34db77826..f41128f91ca7 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1678,7 +1678,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC)
- pdev->is_hotplug_bridge = 1;
+ pdev->is_hotplug_bridge = pdev->is_pciehp = 1;
}
static void set_pcie_thunderbolt(struct pci_dev *dev)
@@ -2602,6 +2602,15 @@ void pcie_report_downtraining(struct pci_dev *dev)
__pcie_print_link_status(dev, false);
}
+static void pci_imm_ready_init(struct pci_dev *dev)
+{
+ u16 status;
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+ if (status & PCI_STATUS_IMM_READY)
+ dev->imm_ready = 1;
+}
+
static void pci_init_capabilities(struct pci_dev *dev)
{
pci_ea_init(dev); /* Enhanced Allocation */
@@ -2611,6 +2620,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers(dev);
+ pci_imm_ready_init(dev); /* Immediate Readiness */
pci_pm_init(dev); /* Power Management */
pci_vpd_init(dev); /* Vital Product Data */
pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index cf483d82572c..d97335a40193 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -105,13 +105,13 @@ int pcie_failed_link_retrain(struct pci_dev *dev)
!pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
return ret;
- pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) {
- u16 oldlnkctl2 = lnkctl2;
+ u16 oldlnkctl2;
pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &oldlnkctl2);
ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false);
if (ret) {
pci_info(dev, "retraining failed\n");
@@ -123,6 +123,8 @@ int pcie_failed_link_retrain(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
}
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
+
if ((lnksta & PCI_EXP_LNKSTA_DLLLA) &&
(lnkctl2 & PCI_EXP_LNKCTL2_TLS) == PCI_EXP_LNKCTL2_TLS_2_5GT &&
pci_match_id(ids, dev)) {
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 07c3d021a47e..7853ac6999e2 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1888,7 +1888,8 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
bool *unassigned = data;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+ struct resource *r = &dev->resource[idx];
struct pci_bus_region region;
/* Not assigned or rejected by kernel? */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index c6657cdd06f6..d2b3ed51e880 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -423,13 +423,39 @@ void pci_release_resource(struct pci_dev *dev, int resno)
}
EXPORT_SYMBOL(pci_release_resource);
+static bool pci_resize_is_memory_decoding_enabled(struct pci_dev *dev,
+ int resno)
+{
+ u16 cmd;
+
+ if (pci_resource_is_iov(resno))
+ return pci_iov_is_memory_decoding_enabled(dev);
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+ return cmd & PCI_COMMAND_MEMORY;
+}
+
+static void pci_resize_resource_set_size(struct pci_dev *dev, int resno,
+ int size)
+{
+ resource_size_t res_size = pci_rebar_size_to_bytes(size);
+ struct resource *res = pci_resource_n(dev, resno);
+
+ if (!pci_resource_is_iov(resno)) {
+ resource_set_size(res, res_size);
+ } else {
+ resource_set_size(res, res_size * pci_sriov_get_totalvfs(dev));
+ pci_iov_resource_set_size(dev, resno, res_size);
+ }
+}
+
int pci_resize_resource(struct pci_dev *dev, int resno, int size)
{
struct resource *res = pci_resource_n(dev, resno);
struct pci_host_bridge *host;
int old, ret;
u32 sizes;
- u16 cmd;
/* Check if we must preserve the firmware's resource assignment */
host = pci_find_host_bridge(dev->bus);
@@ -440,8 +466,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
if (!(res->flags & IORESOURCE_UNSET))
return -EBUSY;
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (cmd & PCI_COMMAND_MEMORY)
+ if (pci_resize_is_memory_decoding_enabled(dev, resno))
return -EBUSY;
sizes = pci_rebar_get_possible_sizes(dev, resno);
@@ -459,7 +484,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
if (ret)
return ret;
- resource_set_size(res, pci_rebar_size_to_bytes(size));
+ pci_resize_resource_set_size(dev, resno, size);
/* Check if the new config works by trying to assign everything. */
if (dev->bus->self) {
@@ -471,7 +496,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
error_resize:
pci_rebar_set_size(dev, resno, old);
- resource_set_size(res, pci_rebar_size_to_bytes(old));
+ pci_resize_resource_set_size(dev, resno, old);
return ret;
}
EXPORT_SYMBOL(pci_resize_resource);
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 29b8fd4b9351..8873aed3a52a 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -754,7 +754,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
if (of_property_present(np, "usb0_vbus_power-supply")) {
- data->vbus_power_supply = devm_power_supply_get_by_phandle(dev,
+ data->vbus_power_supply = devm_power_supply_get_by_reference(dev,
"usb0_vbus_power-supply");
if (IS_ERR(data->vbus_power_supply)) {
dev_err(dev, "Couldn't get the VBUS power supply\n");
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
index 2eaa41f8fc70..67a6ae5ecba0 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
@@ -61,8 +61,6 @@ static int ns2_pci_phy_probe(struct mdio_device *mdiodev)
return PTR_ERR(provider);
}
- dev_info(dev, "%s PHY registered\n", dev_name(dev));
-
return 0;
}
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index 36ad02c33ac5..8473fa574529 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -395,7 +395,6 @@ static int ns2_drd_phy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, driver);
- dev_info(dev, "Registered NS2 DRD Phy device\n");
queue_delayed_work(system_power_efficient_wq, &driver->wq_extcon,
driver->debounce_jiffies);
diff --git a/drivers/phy/broadcom/phy-bcm-sr-pcie.c b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
index ff9b3862bf7a..706e1d83b4ce 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
@@ -277,8 +277,6 @@ static int sr_pcie_phy_probe(struct platform_device *pdev)
return PTR_ERR(provider);
}
- dev_info(dev, "Stingray PCIe PHY driver initialized\n");
-
return 0;
}
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 228100357054..d52dd065e862 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -832,7 +832,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
return PTR_ERR(provider);
}
- dev_info(dev, "registered %d port(s)\n", count);
+ dev_dbg(dev, "registered %d port(s)\n", count);
return 0;
}
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 45a5c00843bf..74613382ccb0 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -58,8 +58,11 @@
#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
#define SIERRA_CMN_PLLLC1_FBDIV_INT_PREG 0xC3
#define SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG 0xC5
+#define SIERRA_CMN_PLLLC1_MODE_PREG 0xC8
+#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE1_PREG 0xC9
#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
#define SIERRA_CMN_PLLLC1_CLK0_PREG 0xCE
+#define SIERRA_CMN_PLLLC1_BWCAL_MODE1_PREG 0xCF
#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
#define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2
@@ -1541,6 +1544,137 @@ static void cdns_sierra_phy_remove(struct platform_device *pdev)
cdns_sierra_clk_unregister(phy);
}
+/* USB refclk 100MHz, 20b, SuperSpeed opt2, ext ssc, PLL LC1, multilink */
+static const struct cdns_reg_pairs usb_100_ext_ssc_plllc1_cmn_regs[] = {
+ {0x002D, SIERRA_CMN_PLLLC1_FBDIV_INT_PREG},
+ {0x2086, SIERRA_CMN_PLLLC1_LF_COEFF_MODE1_PREG},
+ {0x2086, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
+ {0x1005, SIERRA_CMN_PLLLC1_CLK0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/* USB refclk 100MHz, 20b, SuperSpeed opt2, int ssc, PLL LC1, multilink */
+static const struct cdns_reg_pairs usb_100_int_ssc_plllc1_cmn_regs[] = {
+ {0x002D, SIERRA_CMN_PLLLC1_FBDIV_INT_PREG},
+ {0x000E, SIERRA_CMN_PLLLC1_MODE_PREG},
+ {0x1005, SIERRA_CMN_PLLLC1_CLK0_PREG}
+};
+
+static const struct cdns_reg_pairs usb_100_ml_ln_regs[] = {
+ {0xFE0A, SIERRA_DET_STANDEC_A_PREG},
+ {0x000F, SIERRA_DET_STANDEC_B_PREG},
+ {0x55A5, SIERRA_DET_STANDEC_C_PREG},
+ {0x69AD, SIERRA_DET_STANDEC_D_PREG},
+ {0x0241, SIERRA_DET_STANDEC_E_PREG},
+ {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+ {0x0014, SIERRA_PSM_A0IN_TMR_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x001F, SIERRA_PSC_TX_A0_PREG},
+ {0x0007, SIERRA_PSC_TX_A1_PREG},
+ {0x0003, SIERRA_PSC_TX_A2_PREG},
+ {0x0003, SIERRA_PSC_TX_A3_PREG},
+ {0x0FFF, SIERRA_PSC_RX_A0_PREG},
+ {0x0619, SIERRA_PSC_RX_A1_PREG},
+ {0x0003, SIERRA_PSC_RX_A2_PREG},
+ {0x0001, SIERRA_PSC_RX_A3_PREG},
+ {0x0606, SIERRA_PLLCTRL_FBDIV_MODE01_PREG},
+ {0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0003, SIERRA_PLLCTRL_GEN_A_PREG},
+ {0x0406, SIERRA_PLLCTRL_GEN_D_PREG},
+ {0x5211, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
+ {0x2512, SIERRA_DFE_BIASTRIM_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x823E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x023F, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x8452, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4121, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4121, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
+ {0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0xA9A9, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0014, SIERRA_DEQ_GLUT0},
+ {0x0014, SIERRA_DEQ_GLUT1},
+ {0x0014, SIERRA_DEQ_GLUT2},
+ {0x0014, SIERRA_DEQ_GLUT3},
+ {0x0014, SIERRA_DEQ_GLUT4},
+ {0x0014, SIERRA_DEQ_GLUT5},
+ {0x0014, SIERRA_DEQ_GLUT6},
+ {0x0014, SIERRA_DEQ_GLUT7},
+ {0x0014, SIERRA_DEQ_GLUT8},
+ {0x0014, SIERRA_DEQ_GLUT9},
+ {0x0014, SIERRA_DEQ_GLUT10},
+ {0x0014, SIERRA_DEQ_GLUT11},
+ {0x0014, SIERRA_DEQ_GLUT12},
+ {0x0014, SIERRA_DEQ_GLUT13},
+ {0x0014, SIERRA_DEQ_GLUT14},
+ {0x0014, SIERRA_DEQ_GLUT15},
+ {0x0014, SIERRA_DEQ_GLUT16},
+ {0x0BAE, SIERRA_DEQ_ALUT0},
+ {0x0AEB, SIERRA_DEQ_ALUT1},
+ {0x0A28, SIERRA_DEQ_ALUT2},
+ {0x0965, SIERRA_DEQ_ALUT3},
+ {0x08A2, SIERRA_DEQ_ALUT4},
+ {0x07DF, SIERRA_DEQ_ALUT5},
+ {0x071C, SIERRA_DEQ_ALUT6},
+ {0x0659, SIERRA_DEQ_ALUT7},
+ {0x0596, SIERRA_DEQ_ALUT8},
+ {0x0514, SIERRA_DEQ_ALUT9},
+ {0x0492, SIERRA_DEQ_ALUT10},
+ {0x0410, SIERRA_DEQ_ALUT11},
+ {0x038E, SIERRA_DEQ_ALUT12},
+ {0x030C, SIERRA_DEQ_ALUT13},
+ {0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG},
+ {0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG},
+ {0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
+ {0x0005, SIERRA_LFPSDET_SUPPORT_PREG},
+ {0x000F, SIERRA_LFPSFILT_NS_PREG},
+ {0x0009, SIERRA_LFPSFILT_RD_PREG},
+ {0x0001, SIERRA_LFPSFILT_MP_PREG},
+ {0x8013, SIERRA_SDFILT_H2L_A_PREG},
+ {0x8009, SIERRA_SDFILT_L2H_PREG},
+ {0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static const struct cdns_sierra_vals usb_100_ext_ssc_plllc1_cmn_vals = {
+ .reg_pairs = usb_100_ext_ssc_plllc1_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_100_ext_ssc_plllc1_cmn_regs),
+};
+
+static const struct cdns_sierra_vals usb_100_int_ssc_plllc1_cmn_vals = {
+ .reg_pairs = usb_100_int_ssc_plllc1_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_100_int_ssc_plllc1_cmn_regs),
+};
+
+static const struct cdns_sierra_vals usb_100_ml_ln_vals = {
+ .reg_pairs = usb_100_ml_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_100_ml_ln_regs),
+};
+
/* SGMII PHY PMA lane configuration */
static const struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = {
{0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
@@ -2513,6 +2647,11 @@ static const struct cdns_sierra_data cdns_map_sierra = {
[EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
},
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
},
},
.pma_cmn_vals = {
@@ -2532,11 +2671,20 @@ static const struct cdns_sierra_data cdns_map_sierra = {
[EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
},
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
},
[TYPE_USB] = {
[TYPE_NONE] = {
[EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &usb_100_int_ssc_plllc1_cmn_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -2573,11 +2721,20 @@ static const struct cdns_sierra_data cdns_map_sierra = {
[EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
[INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
},
+ [TYPE_USB] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
},
[TYPE_USB] = {
[TYPE_NONE] = {
[EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
},
+ [TYPE_PCIE] = {
+ [EXTERNAL_SSC] = &usb_100_ml_ln_vals,
+ [INTERNAL_SSC] = &usb_100_ml_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -2620,6 +2777,11 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = {
[EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
},
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
},
},
.phy_pma_ln_vals = {
@@ -2655,11 +2817,20 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = {
[EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
},
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
},
[TYPE_USB] = {
[TYPE_NONE] = {
[EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &usb_100_int_ssc_plllc1_cmn_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_PCIE] = {
@@ -2693,11 +2864,20 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = {
[EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
[INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
},
+ [TYPE_USB] = {
+ [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
+ },
},
[TYPE_USB] = {
[TYPE_NONE] = {
[EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
},
+ [TYPE_PCIE] = {
+ [EXTERNAL_SSC] = &usb_100_ml_ln_vals,
+ [INTERNAL_SSC] = &usb_100_ml_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_PCIE] = {
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index a281c0dfae97..37fa4bad6bd7 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -197,6 +197,7 @@
#define RX_SDCAL1_INIT_TMR 0x004CU
#define RX_SDCAL1_ITER_TMR 0x004DU
#define RX_CDRLF_CNFG 0x0080U
+#define RX_CDRLF_CNFG2 0x0081U
#define RX_CDRLF_CNFG3 0x0082U
#define RX_SIGDET_HL_FILT_TMR 0x0090U
#define RX_REE_GCSM1_CTRL 0x0108U
@@ -204,6 +205,8 @@
#define RX_REE_GCSM1_EQENM_PH2 0x010AU
#define RX_REE_GCSM2_CTRL 0x0110U
#define RX_REE_PERGCSM_CTRL 0x0118U
+#define RX_REE_PEAK_UTHR 0x0142U
+#define RX_REE_PEAK_LTHR 0x0143U
#define RX_REE_ATTEN_THR 0x0149U
#define RX_REE_TAP1_CLIP 0x0171U
#define RX_REE_TAP2TON_CLIP 0x0172U
@@ -212,6 +215,7 @@
#define RX_DIAG_DFE_CTRL 0x01E0U
#define RX_DIAG_DFE_AMP_TUNE_2 0x01E2U
#define RX_DIAG_DFE_AMP_TUNE_3 0x01E3U
+#define RX_DIAG_REE_DAC_CTRL 0x01E4U
#define RX_DIAG_NQST_CTRL 0x01E5U
#define RX_DIAG_SIGDET_TUNE 0x01E8U
#define RX_DIAG_PI_RATE 0x01F4U
@@ -295,6 +299,7 @@ enum cdns_torrent_phy_type {
TYPE_QSGMII,
TYPE_USB,
TYPE_USXGMII,
+ TYPE_PCIE_ML,
};
enum cdns_torrent_ref_clk {
@@ -693,6 +698,7 @@ static const char *cdns_torrent_get_phy_type(enum cdns_torrent_phy_type phy_type
case TYPE_DP:
return "DisplayPort";
case TYPE_PCIE:
+ case TYPE_PCIE_ML:
return "PCIe";
case TYPE_SGMII:
return "SGMII";
@@ -2478,6 +2484,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
enum cdns_torrent_ssc_mode ssc;
struct regmap *regmap;
u32 num_regs, num_protocols, protocol;
+ u32 num_pcie_links = 0;
num_protocols = hweight32(cdns_phy->protocol_bitmask);
/* Maximum 2 protocols are supported */
@@ -2510,6 +2517,44 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
phy_t1 = fns(cdns_phy->protocol_bitmask, 0);
phy_t2 = fns(cdns_phy->protocol_bitmask, 1);
+
+ /*
+ * PCIe Multilink configuration can be supported along with a
+ * non-PCIe protocol. The existing limitation associated with
+ * the standalone PCIe Multilink configuration still remains,
+ * implying that there can be only two links (subnodes) of the
+ * PHY type PCIe which constitute the PCIe Multilink.
+ *
+ * Such configurations are handled by introducing a new protocol
+ * namely TYPE_PCIE_ML. Both of the PCIe links which have the
+ * protocol as TYPE_PCIE shall be treated as though the protocol
+ * corresponding to them is TYPE_PCIE_ML only for the sake of
+ * configuring the SERDES.
+ *
+ * PCIe Multilink configuration can be identified by checking if
+ * there are exactly two links with phy_type set to TYPE_PCIE.
+ * phy_t1 and phy_t2 are modified in such cases to support the
+ * PCIe Multilink configuration with a non-PCIe protocol.
+ */
+ for (node = 0; node < cdns_phy->nsubnodes; node++) {
+ if (cdns_phy->phys[node].phy_type == TYPE_PCIE)
+ num_pcie_links++;
+ }
+
+ if (num_pcie_links > 2) {
+ dev_err(dev, "cannot support PCIe Multilink with %u PCIe links\n",
+ num_pcie_links);
+ return -EINVAL;
+ } else if (num_pcie_links == 2) {
+ phy_t1 = TYPE_PCIE_ML;
+ for (node = 0; node < cdns_phy->nsubnodes; node++) {
+ if (cdns_phy->phys[node].phy_type == TYPE_PCIE) {
+ cdns_phy->phys[node].phy_type = TYPE_PCIE_ML;
+ continue;
+ }
+ phy_t2 = cdns_phy->phys[node].phy_type;
+ }
+ }
}
/**
@@ -2676,6 +2721,11 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
}
+ /* Restore TYPE_PCIE_ML to TYPE_PCIE to be compatible with suspend-resume */
+ for (node = 0; node < cdns_phy->nsubnodes; node++)
+ if (cdns_phy->phys[node].phy_type == TYPE_PCIE_ML)
+ cdns_phy->phys[node].phy_type = TYPE_PCIE;
+
/* Take the PHY out of reset */
ret = reset_control_deassert(cdns_phy->phy_rst);
if (ret)
@@ -3088,15 +3138,14 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
}
if (cdns_phy->nsubnodes > 1)
- dev_dbg(dev, "Multi-link: %s (%d lanes) & %s (%d lanes)",
- cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type),
- cdns_phy->phys[0].num_lanes,
- cdns_torrent_get_phy_type(cdns_phy->phys[1].phy_type),
- cdns_phy->phys[1].num_lanes);
+ dev_dbg(dev, "Multi link configuration:\n");
else
- dev_dbg(dev, "Single link: %s (%d lanes)",
- cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type),
- cdns_phy->phys[0].num_lanes);
+ dev_dbg(dev, "Single link configuration:\n");
+
+ for (i = 0; i < cdns_phy->nsubnodes; i++)
+ dev_dbg(dev, "%s (%d lanes)",
+ cdns_torrent_get_phy_type(cdns_phy->phys[i].phy_type),
+ cdns_phy->phys[i].num_lanes);
return 0;
@@ -3131,6 +3180,132 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev)
cdns_torrent_clk_cleanup(cdns_phy);
}
+/* Multilink PCIe and USB Same SSC link configuration */
+static const struct cdns_reg_pairs ml_pcie_usb_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static const struct cdns_reg_pairs ml_pcie_usb_xcvr_diag_ln_regs[] = {
+ {0x0100, XCVR_DIAG_HSCLK_SEL},
+ {0x0013, XCVR_DIAG_HSCLK_DIV},
+ {0x0812, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static const struct cdns_reg_pairs usb_ml_pcie_xcvr_diag_ln_regs[] = {
+ {0x0041, XCVR_DIAG_PLLDRC_CTRL},
+};
+
+static const struct cdns_torrent_vals ml_pcie_usb_link_cmn_vals = {
+ .reg_pairs = ml_pcie_usb_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_usb_link_cmn_regs),
+};
+
+static const struct cdns_torrent_vals ml_pcie_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = ml_pcie_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_usb_xcvr_diag_ln_regs),
+};
+
+static const struct cdns_torrent_vals usb_ml_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = usb_ml_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_ml_pcie_xcvr_diag_ln_regs),
+};
+
+/* Multi link PCIe configuration */
+static const struct cdns_reg_pairs ml_pcie_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static const struct cdns_reg_pairs ml_pcie_xcvr_diag_ln_regs[] = {
+ {0x0100, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0812, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static const struct cdns_torrent_vals ml_pcie_link_cmn_vals = {
+ .reg_pairs = ml_pcie_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_link_cmn_regs),
+};
+
+static const struct cdns_torrent_vals ml_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = ml_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_xcvr_diag_ln_regs),
+};
+
+/* Multi link PCIe, 100 MHz Ref clk, no SSC */
+static const struct cdns_reg_pairs ml_pcie_100_no_ssc_cmn_regs[] = {
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static const struct cdns_reg_pairs ml_pcie_100_no_ssc_rx_ln_regs[] = {
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0008, RX_REE_PEAK_UTHR},
+ {0x018E, RX_CDRLF_CNFG},
+ {0x2E33, RX_CDRLF_CNFG2},
+ {0x0001, RX_DIAG_ACYA},
+ {0x0C21, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0002, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0005, RX_DIAG_REE_DAC_CTRL}
+};
+
+static const struct cdns_torrent_vals ml_pcie_100_no_ssc_cmn_vals = {
+ .reg_pairs = ml_pcie_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_cmn_regs),
+};
+
+static const struct cdns_torrent_vals ml_pcie_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = ml_pcie_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_rx_ln_regs),
+};
+
+/* Multi link PCIe, 100 MHz Ref clk, internal SSC */
+static const struct cdns_reg_pairs ml_pcie_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+};
+
+static const struct cdns_torrent_vals ml_pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = ml_pcie_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_cmn_regs),
+};
+
/* SGMII and QSGMII link configuration */
static const struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = {
{0x0002, PHY_PLL_CFG}
@@ -4042,6 +4217,8 @@ static const struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
{0x0C02, RX_REE_ATTEN_THR},
{0x0330, RX_REE_SMGM_CTRL1},
{0x0300, RX_REE_SMGM_CTRL2},
+ {0x0000, RX_REE_PEAK_UTHR},
+ {0x01F5, RX_REE_PEAK_LTHR},
{0x0019, RX_REE_TAP1_CLIP},
{0x0019, RX_REE_TAP2TON_CLIP},
{0x1004, RX_DIAG_SIGDET_TUNE},
@@ -4531,7 +4708,7 @@ static const struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = {
.num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs),
};
-/* Multi link PCIe, 100 MHz Ref clk, internal SSC */
+/* For PCIe (with some other protocol), 100 MHz Ref clk, internal SSC */
static const struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
{0x0004, CMN_PLL0_DSM_DIAG_M0},
{0x0004, CMN_PLL0_DSM_DIAG_M1},
@@ -4670,12 +4847,15 @@ static const struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_PCIE), &ml_pcie_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USXGMII), &pcie_usxgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE_ML, TYPE_USB), &ml_pcie_usb_link_cmn_vals},
+
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_link_cmn_vals},
@@ -4690,6 +4870,7 @@ static const struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &pcie_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE_ML), &ml_pcie_usb_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_link_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_link_cmn_vals},
@@ -4706,12 +4887,15 @@ static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_PCIE), &ml_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USXGMII), &pcie_usxgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE_ML, TYPE_USB), &ml_pcie_usb_xcvr_diag_ln_vals},
+
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_xcvr_diag_ln_vals},
@@ -4726,6 +4910,7 @@ static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE_ML), &usb_ml_pcie_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_xcvr_diag_ln_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_xcvr_diag_ln_vals},
@@ -4739,6 +4924,7 @@ static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
static const struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE_ML), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_phy_pcs_cmn_vals},
{CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals},
@@ -4756,6 +4942,10 @@ static const struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), &ml_pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), &ml_pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), &ml_pcie_100_int_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
@@ -4770,6 +4960,10 @@ static const struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), &ml_pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), &ml_pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), &ml_pcie_100_int_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
@@ -4802,6 +4996,10 @@ static const struct cdns_torrent_vals_entry cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
@@ -4838,6 +5036,10 @@ static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), NULL},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
@@ -4852,6 +5054,10 @@ static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), NULL},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
@@ -4884,6 +5090,10 @@ static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
@@ -4920,6 +5130,10 @@ static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), &ml_pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), &ml_pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), &ml_pcie_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
@@ -4934,6 +5148,10 @@ static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), &ml_pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), &ml_pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), &ml_pcie_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
@@ -4966,6 +5184,10 @@ static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
@@ -5038,6 +5260,10 @@ static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), NULL},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
@@ -5052,6 +5278,10 @@ static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), NULL},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
@@ -5084,6 +5314,10 @@ static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
@@ -5154,6 +5388,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), &ml_pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), &ml_pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), &ml_pcie_100_int_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
@@ -5168,6 +5406,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), &ml_pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), &ml_pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), &ml_pcie_100_int_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
@@ -5200,6 +5442,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
@@ -5236,6 +5482,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), NULL},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
@@ -5250,6 +5500,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), NULL},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
@@ -5282,6 +5536,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
@@ -5318,6 +5576,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
@@ -5332,6 +5594,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
@@ -5364,6 +5630,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = {
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
{CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c
index 6c98eb9608e9..c0bb71f80c04 100644
--- a/drivers/phy/marvell/phy-pxa-usb.c
+++ b/drivers/phy/marvell/phy-pxa-usb.c
@@ -325,7 +325,6 @@ static int pxa_usb_phy_probe(struct platform_device *pdev)
phy_create_lookup(pxa_usb_phy->phy, "usb", "mv-otg");
}
- dev_info(dev, "Marvell PXA USB PHY");
return 0;
}
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 644a34bd2b0b..f6504e0ecd1a 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -210,8 +210,6 @@
#define P2F_USB_FM_VALID BIT(0)
#define P2F_RG_FRCK_EN BIT(8)
-#define U3P_REF_CLK 26 /* MHZ */
-#define U3P_SLEW_RATE_COEF 28
#define U3P_SR_COEF_DIVISOR 1000
#define U3P_FM_DET_CYCLE_CNT 1024
@@ -277,20 +275,24 @@ enum mtk_phy_version {
MTK_PHY_V3,
};
+/**
+ * mtk_phy_pdata - SoC specific platform data
+ * @avoid_rx_sen_degradation: Avoid TX Sensitivity level degradation (MT6795/8173 only)
+ * @sw_pll_48m_to_26m: Workaround for V3 IP (MT8195) - switch the 48MHz PLL from
+ * fractional mode to integer to output 26MHz for U2PHY
+ * @sw_efuse_supported: Switches off eFuse auto-load from PHY and applies values
+ * read from different nvmem (usually different eFuse array)
+ * that is pointed at in the device tree node for this PHY
+ * @slew_ref_clk_mhz: Default reference clock (in MHz) for slew rate calibration
+ * @slew_rate_coefficient: Coefficient for slew rate calibration
+ * @version: PHY IP Version
+ */
struct mtk_phy_pdata {
- /* avoid RX sensitivity level degradation only for mt8173 */
bool avoid_rx_sen_degradation;
- /*
- * workaround only for mt8195, HW fix it for others of V3,
- * u2phy should use integer mode instead of fractional mode of
- * 48M PLL, fix it by switching PLL to 26M from default 48M
- */
bool sw_pll_48m_to_26m;
- /*
- * Some SoCs (e.g. mt8195) drop a bit when use auto load efuse,
- * support sw way, also support it for v2/v3 optionally.
- */
bool sw_efuse_supported;
+ u8 slew_ref_clock_mhz;
+ u8 slew_rate_coefficient;
enum mtk_phy_version version;
};
@@ -686,12 +688,14 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
int fm_out;
u32 tmp;
- /* HW V3 doesn't support slew rate cal anymore */
- if (tphy->pdata->version == MTK_PHY_V3)
- return;
-
- /* use force value */
- if (instance->eye_src)
+ /*
+ * If a fixed HS slew rate (EYE) value was supplied, don't run the
+ * calibration sequence and prefer using that value instead; also,
+ * if there is no reference clock for slew calibration or there is
+ * no slew coefficient, this means that the slew rate calibration
+ * sequence is not supported.
+ */
+ if (instance->eye_src || !tphy->src_ref_clk || !tphy->src_coef)
return;
/* enable USB ring oscillator */
@@ -1516,12 +1520,16 @@ static const struct phy_ops mtk_tphy_ops = {
static const struct mtk_phy_pdata tphy_v1_pdata = {
.avoid_rx_sen_degradation = false,
+ .slew_ref_clock_mhz = 26,
+ .slew_rate_coefficient = 28,
.version = MTK_PHY_V1,
};
static const struct mtk_phy_pdata tphy_v2_pdata = {
.avoid_rx_sen_degradation = false,
.sw_efuse_supported = true,
+ .slew_ref_clock_mhz = 26,
+ .slew_rate_coefficient = 28,
.version = MTK_PHY_V2,
};
@@ -1532,6 +1540,8 @@ static const struct mtk_phy_pdata tphy_v3_pdata = {
static const struct mtk_phy_pdata mt8173_pdata = {
.avoid_rx_sen_degradation = true,
+ .slew_ref_clock_mhz = 26,
+ .slew_rate_coefficient = 28,
.version = MTK_PHY_V1,
};
@@ -1561,7 +1571,7 @@ static int mtk_tphy_probe(struct platform_device *pdev)
struct resource *sif_res;
struct mtk_tphy *tphy;
struct resource res;
- int port;
+ int port, ret;
tphy = devm_kzalloc(dev, sizeof(*tphy), GFP_KERNEL);
if (!tphy)
@@ -1591,15 +1601,14 @@ static int mtk_tphy_probe(struct platform_device *pdev)
}
}
- if (tphy->pdata->version < MTK_PHY_V3) {
- tphy->src_ref_clk = U3P_REF_CLK;
- tphy->src_coef = U3P_SLEW_RATE_COEF;
- /* update parameters of slew rate calibrate if exist */
- device_property_read_u32(dev, "mediatek,src-ref-clk-mhz",
- &tphy->src_ref_clk);
- device_property_read_u32(dev, "mediatek,src-coef",
- &tphy->src_coef);
- }
+ /* Optional properties for slew calibration variation */
+ ret = device_property_read_u32(dev, "mediatek,src-ref-clk-mhz", &tphy->src_ref_clk);
+ if (ret)
+ tphy->src_ref_clk = tphy->pdata->slew_ref_clock_mhz;
+
+ ret = device_property_read_u32(dev, "mediatek,src-coef", &tphy->src_coef);
+ if (ret)
+ tphy->src_coef = tphy->pdata->slew_rate_coefficient;
port = 0;
for_each_child_of_node_scoped(np, child_np) {
diff --git a/drivers/phy/phy-snps-eusb2.c b/drivers/phy/phy-snps-eusb2.c
index 751b6d8ba2be..f90bf7e95463 100644
--- a/drivers/phy/phy-snps-eusb2.c
+++ b/drivers/phy/phy-snps-eusb2.c
@@ -256,7 +256,7 @@ static int exynos_eusb2_ref_clk_init(struct snps_eusb2_hsphy *phy)
}
if (!config) {
- dev_err(&phy->phy->dev, "unsupported ref_clk_freq:%lu\n", ref_clk_freq);
+ dev_err(&phy->phy->dev, "unsupported ref_clk_freq: %lu\n", ref_clk_freq);
return -EINVAL;
}
@@ -293,7 +293,7 @@ static int qcom_eusb2_ref_clk_init(struct snps_eusb2_hsphy *phy)
}
if (!config) {
- dev_err(&phy->phy->dev, "unsupported ref_clk_freq:%lu\n", ref_clk_freq);
+ dev_err(&phy->phy->dev, "unsupported ref_clk_freq: %lu\n", ref_clk_freq);
return -EINVAL;
}
@@ -392,7 +392,7 @@ static int qcom_snps_eusb2_hsphy_init(struct phy *p)
snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_1,
PHY_CFG_PLL_CPBIAS_CNTRL_MASK,
- FIELD_PREP(PHY_CFG_PLL_CPBIAS_CNTRL_MASK, 0x1));
+ FIELD_PREP(PHY_CFG_PLL_CPBIAS_CNTRL_MASK, 0x0));
snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_4,
PHY_CFG_PLL_INT_CNTRL_MASK,
@@ -437,6 +437,9 @@ static int qcom_snps_eusb2_hsphy_init(struct phy *p)
snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL2,
USB2_SUSPEND_N_SEL, 0);
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG0,
+ CMN_CTRL_OVERRIDE_EN, 0);
+
return 0;
}
@@ -461,39 +464,40 @@ static int snps_eusb2_hsphy_init(struct phy *p)
ret = phy_init(phy->repeater);
if (ret) {
- dev_err(&p->dev, "repeater init failed. %d\n", ret);
+ dev_err(&p->dev, "repeater init failed: %d\n", ret);
goto disable_vreg;
}
ret = clk_bulk_prepare_enable(phy->data->num_clks, phy->clks);
if (ret) {
- dev_err(&p->dev, "failed to enable ref clock, %d\n", ret);
- goto disable_vreg;
+ dev_err(&p->dev, "failed to enable ref clock: %d\n", ret);
+ goto exit_repeater;
}
ret = reset_control_assert(phy->phy_reset);
if (ret) {
- dev_err(&p->dev, "failed to assert phy_reset, %d\n", ret);
- goto disable_ref_clk;
+ dev_err(&p->dev, "failed to assert phy_reset: %d\n", ret);
+ goto disable_clks;
}
usleep_range(100, 150);
ret = reset_control_deassert(phy->phy_reset);
if (ret) {
- dev_err(&p->dev, "failed to de-assert phy_reset, %d\n", ret);
- goto disable_ref_clk;
+ dev_err(&p->dev, "failed to de-assert phy_reset: %d\n", ret);
+ goto disable_clks;
}
ret = phy->data->phy_init(p);
if (ret)
- goto disable_ref_clk;
+ goto disable_clks;
return 0;
-disable_ref_clk:
+disable_clks:
clk_bulk_disable_unprepare(phy->data->num_clks, phy->clks);
-
+exit_repeater:
+ phy_exit(phy->repeater);
disable_vreg:
regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs);
@@ -504,7 +508,7 @@ static int snps_eusb2_hsphy_exit(struct phy *p)
{
struct snps_eusb2_hsphy *phy = phy_get_drvdata(p);
- clk_disable_unprepare(phy->ref_clk);
+ clk_bulk_disable_unprepare(phy->data->num_clks, phy->clks);
regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs);
@@ -551,7 +555,7 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev)
if (!phy->clks)
return -ENOMEM;
- for (int i = 0; i < phy->data->num_clks; ++i)
+ for (i = 0; i < phy->data->num_clks; ++i)
phy->clks[i].id = phy->data->clk_names[i];
ret = devm_clk_bulk_get(dev, phy->data->num_clks, phy->clks);
@@ -560,7 +564,7 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev)
"failed to get phy clock(s)\n");
phy->ref_clk = NULL;
- for (int i = 0; i < phy->data->num_clks; ++i) {
+ for (i = 0; i < phy->data->num_clks; ++i) {
if (!strcmp(phy->clks[i].id, "ref")) {
phy->ref_clk = phy->clks[i].clk;
break;
@@ -582,14 +586,14 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"failed to get regulator supplies\n");
- phy->repeater = devm_of_phy_optional_get(dev, np, 0);
+ phy->repeater = devm_of_phy_optional_get(dev, np, NULL);
if (IS_ERR(phy->repeater))
return dev_err_probe(dev, PTR_ERR(phy->repeater),
"failed to get repeater\n");
generic_phy = devm_phy_create(dev, NULL, &snps_eusb2_hsphy_ops);
if (IS_ERR(generic_phy)) {
- dev_err(dev, "failed to create phy %d\n", ret);
+ dev_err(dev, "failed to create phy: %d\n", ret);
return PTR_ERR(generic_phy);
}
@@ -600,8 +604,6 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev)
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
- dev_info(dev, "Registered Snps-eUSB2 phy\n");
-
return 0;
}
@@ -612,7 +614,9 @@ static const struct of_device_id snps_eusb2_hsphy_of_match_table[] = {
}, {
.compatible = "samsung,exynos2200-eusb2-phy",
.data = &exynos2200_snps_eusb2_phy,
- }, { },
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, snps_eusb2_hsphy_of_match_table);
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index ef14f4e33973..60a0ead127fa 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -126,12 +126,12 @@ config PHY_QCOM_QUSB2
USB IPs on MSM SOCs.
config PHY_QCOM_EUSB2_REPEATER
- tristate "Qualcomm SNPS eUSB2 Repeater Driver"
+ tristate "Qualcomm PMIC eUSB2 Repeater Driver"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
select GENERIC_PHY
help
- Enable support for the USB high-speed SNPS eUSB2 repeater on Qualcomm
- PMICs. The repeater is paired with a Synopsys eUSB2 Phy
+ Enable support for the USB high-speed eUSB2 repeater on Qualcomm
+ PMICs. The repeater is paired with a Synopsys or M31 eUSB2 Phy
on Qualcomm SOCs.
config PHY_QCOM_M31_USB
@@ -158,6 +158,16 @@ config PHY_QCOM_UNIPHY_PCIE_28LP
handles PHY initialization, clock management required after
resetting the hardware and power management.
+config PHY_QCOM_M31_EUSB
+ tristate "Qualcomm M31 eUSB2 PHY driver support"
+ depends on USB && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable this to support M31 EUSB2 PHY transceivers on Qualcomm
+ chips with DWC3 USB core. It supports initializing and cleaning
+ up of the associated USB repeater that is paired with the eUSB2
+ PHY.
+
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 3851e28a212d..b71a6a0bed3f 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o
obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_M31_USB) += phy-qcom-m31.o
+obj-$(CONFIG_PHY_QCOM_M31_EUSB) += phy-qcom-m31-eusb2.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o phy-qcom-qmp-usbc.o
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index 6bd1b3c75c77..e0f2acc8109c 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -37,32 +37,13 @@
#define EUSB2_TUNE_EUSB_EQU 0x5A
#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
-enum eusb2_reg_layout {
- TUNE_EUSB_HS_COMP_CUR,
- TUNE_EUSB_EQU,
- TUNE_EUSB_SLEW,
- TUNE_USB2_HS_COMP_CUR,
- TUNE_USB2_PREEM,
- TUNE_USB2_EQU,
- TUNE_USB2_SLEW,
- TUNE_SQUELCH_U,
- TUNE_HSDISC,
- TUNE_RES_FSDIF,
- TUNE_IUSB2,
- TUNE_USB2_CROSSOVER,
- NUM_TUNE_FIELDS,
-
- FORCE_VAL_5 = NUM_TUNE_FIELDS,
- FORCE_EN_5,
-
- EN_CTL1,
-
- RPTR_STATUS,
- LAYOUT_SIZE,
+struct eusb2_repeater_init_tbl_reg {
+ unsigned int reg;
+ unsigned int value;
};
struct eusb2_repeater_cfg {
- const u32 *init_tbl;
+ const struct eusb2_repeater_init_tbl_reg *init_tbl;
int init_tbl_num;
const char * const *vreg_list;
int num_vregs;
@@ -82,16 +63,16 @@ static const char * const pm8550b_vreg_l[] = {
"vdd18", "vdd3",
};
-static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
- [TUNE_IUSB2] = 0x8,
- [TUNE_SQUELCH_U] = 0x3,
- [TUNE_USB2_PREEM] = 0x5,
+static const struct eusb2_repeater_init_tbl_reg pm8550b_init_tbl[] = {
+ { EUSB2_TUNE_IUSB2, 0x8 },
+ { EUSB2_TUNE_SQUELCH_U, 0x3 },
+ { EUSB2_TUNE_USB2_PREEM, 0x5 },
};
-static const u32 smb2360_init_tbl[NUM_TUNE_FIELDS] = {
- [TUNE_IUSB2] = 0x5,
- [TUNE_SQUELCH_U] = 0x3,
- [TUNE_USB2_PREEM] = 0x2,
+static const struct eusb2_repeater_init_tbl_reg smb2360_init_tbl[] = {
+ { EUSB2_TUNE_IUSB2, 0x5 },
+ { EUSB2_TUNE_SQUELCH_U, 0x3 },
+ { EUSB2_TUNE_USB2_PREEM, 0x2 },
};
static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
@@ -129,17 +110,10 @@ static int eusb2_repeater_init(struct phy *phy)
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
struct device_node *np = rptr->dev->of_node;
struct regmap *regmap = rptr->regmap;
- const u32 *init_tbl = rptr->cfg->init_tbl;
- u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
- u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
- u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
u32 base = rptr->base;
- u32 val;
+ u32 poll_val;
int ret;
-
- of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
- of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
- of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
+ u8 val;
ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
if (ret)
@@ -147,21 +121,24 @@ static int eusb2_repeater_init(struct phy *phy)
regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
- regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
- regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
- regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
- regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
- regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
- regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
- regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
- regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
- regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
-
- regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
- regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
- regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
-
- ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
+ /* Write registers from init table */
+ for (int i = 0; i < rptr->cfg->init_tbl_num; i++)
+ regmap_write(regmap, base + rptr->cfg->init_tbl[i].reg,
+ rptr->cfg->init_tbl[i].value);
+
+ /* Override registers from devicetree values */
+ if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, val);
+
+ if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, val);
+
+ if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, val);
+
+ /* Wait for status OK */
+ ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, poll_val,
+ poll_val & RPTR_OK, 10, 5);
if (ret)
dev_err(rptr->dev, "initialization timed-out\n");
@@ -264,8 +241,6 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
- dev_info(dev, "Registered Qcom-eUSB2 repeater\n");
-
return 0;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c b/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c
new file mode 100644
index 000000000000..bf32572566c4
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <linux/regulator/consumer.h>
+
+#define USB_PHY_UTMI_CTRL0 (0x3c)
+#define SLEEPM BIT(0)
+
+#define USB_PHY_UTMI_CTRL5 (0x50)
+#define POR BIT(1)
+
+#define USB_PHY_HS_PHY_CTRL_COMMON0 (0x54)
+#define SIDDQ_SEL BIT(1)
+#define SIDDQ BIT(2)
+#define FSEL GENMASK(6, 4)
+#define FSEL_38_4_MHZ_VAL (0x6)
+
+#define USB_PHY_HS_PHY_CTRL2 (0x64)
+#define USB2_SUSPEND_N BIT(2)
+#define USB2_SUSPEND_N_SEL BIT(3)
+
+#define USB_PHY_CFG0 (0x94)
+#define UTMI_PHY_CMN_CTRL_OVERRIDE_EN BIT(1)
+
+#define USB_PHY_CFG1 (0x154)
+#define PLL_EN BIT(0)
+
+#define USB_PHY_FSEL_SEL (0xb8)
+#define FSEL_SEL BIT(0)
+
+#define USB_PHY_XCFGI_39_32 (0x16c)
+#define HSTX_PE GENMASK(3, 2)
+
+#define USB_PHY_XCFGI_71_64 (0x17c)
+#define HSTX_SWING GENMASK(3, 0)
+
+#define USB_PHY_XCFGI_31_24 (0x168)
+#define HSTX_SLEW GENMASK(2, 0)
+
+#define USB_PHY_XCFGI_7_0 (0x15c)
+#define PLL_LOCK_TIME GENMASK(1, 0)
+
+#define M31_EUSB_PHY_INIT_CFG(o, b, v) \
+{ \
+ .off = o, \
+ .mask = b, \
+ .val = v, \
+}
+
+struct m31_phy_tbl_entry {
+ u32 off;
+ u32 mask;
+ u32 val;
+};
+
+struct m31_eusb2_priv_data {
+ const struct m31_phy_tbl_entry *setup_seq;
+ unsigned int setup_seq_nregs;
+ const struct m31_phy_tbl_entry *override_seq;
+ unsigned int override_seq_nregs;
+ const struct m31_phy_tbl_entry *reset_seq;
+ unsigned int reset_seq_nregs;
+ unsigned int fsel;
+};
+
+static const struct m31_phy_tbl_entry m31_eusb2_setup_tbl[] = {
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_CFG0, UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 1),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_UTMI_CTRL5, POR, 1),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_CFG1, PLL_EN, 1),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_FSEL_SEL, FSEL_SEL, 1),
+};
+
+static const struct m31_phy_tbl_entry m31_eusb_phy_override_tbl[] = {
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_XCFGI_39_32, HSTX_PE, 0),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_XCFGI_71_64, HSTX_SWING, 7),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_XCFGI_31_24, HSTX_SLEW, 0),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_XCFGI_7_0, PLL_LOCK_TIME, 0),
+};
+
+static const struct m31_phy_tbl_entry m31_eusb_phy_reset_tbl[] = {
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL2, USB2_SUSPEND_N_SEL, 1),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL2, USB2_SUSPEND_N, 1),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_UTMI_CTRL0, SLEEPM, 1),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL_COMMON0, SIDDQ_SEL, 1),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL_COMMON0, SIDDQ, 0),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_UTMI_CTRL5, POR, 0),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL2, USB2_SUSPEND_N_SEL, 0),
+ M31_EUSB_PHY_INIT_CFG(USB_PHY_CFG0, UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0),
+};
+
+static const struct regulator_bulk_data m31_eusb_phy_vregs[] = {
+ { .supply = "vdd" },
+ { .supply = "vdda12" },
+};
+
+#define M31_EUSB_NUM_VREGS ARRAY_SIZE(m31_eusb_phy_vregs)
+
+struct m31eusb2_phy {
+ struct phy *phy;
+ void __iomem *base;
+ const struct m31_eusb2_priv_data *data;
+ enum phy_mode mode;
+
+ struct regulator_bulk_data *vregs;
+ struct clk *clk;
+ struct reset_control *reset;
+
+ struct phy *repeater;
+};
+
+static int m31eusb2_phy_write_readback(void __iomem *base, u32 offset,
+ const u32 mask, u32 val)
+{
+ u32 write_val;
+ u32 tmp;
+
+ tmp = readl(base + offset);
+ tmp &= ~mask;
+ write_val = tmp | val;
+
+ writel(write_val, base + offset);
+
+ tmp = readl(base + offset);
+ tmp &= mask;
+
+ if (tmp != val) {
+ pr_err("write: %x to offset: %x FAILED\n", val, offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int m31eusb2_phy_write_sequence(struct m31eusb2_phy *phy,
+ const struct m31_phy_tbl_entry *tbl,
+ int num)
+{
+ int i;
+ int ret;
+
+ for (i = 0 ; i < num; i++, tbl++) {
+ dev_dbg(&phy->phy->dev, "Offset:%x BitMask:%x Value:%x",
+ tbl->off, tbl->mask, tbl->val);
+
+ ret = m31eusb2_phy_write_readback(phy->base,
+ tbl->off, tbl->mask,
+ tbl->val << __ffs(tbl->mask));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m31eusb2_phy_set_mode(struct phy *uphy, enum phy_mode mode, int submode)
+{
+ struct m31eusb2_phy *phy = phy_get_drvdata(uphy);
+
+ phy->mode = mode;
+
+ return phy_set_mode_ext(phy->repeater, mode, submode);
+}
+
+static int m31eusb2_phy_init(struct phy *uphy)
+{
+ struct m31eusb2_phy *phy = phy_get_drvdata(uphy);
+ const struct m31_eusb2_priv_data *data = phy->data;
+ int ret;
+
+ ret = regulator_bulk_enable(M31_EUSB_NUM_VREGS, phy->vregs);
+ if (ret) {
+ dev_err(&uphy->dev, "failed to enable regulator, %d\n", ret);
+ return ret;
+ }
+
+ ret = phy_init(phy->repeater);
+ if (ret) {
+ dev_err(&uphy->dev, "repeater init failed. %d\n", ret);
+ goto disable_vreg;
+ }
+
+ ret = clk_prepare_enable(phy->clk);
+ if (ret) {
+ dev_err(&uphy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ goto disable_repeater;
+ }
+
+ /* Perform phy reset */
+ reset_control_assert(phy->reset);
+ udelay(5);
+ reset_control_deassert(phy->reset);
+
+ m31eusb2_phy_write_sequence(phy, data->setup_seq, data->setup_seq_nregs);
+ m31eusb2_phy_write_readback(phy->base,
+ USB_PHY_HS_PHY_CTRL_COMMON0, FSEL,
+ FIELD_PREP(FSEL, data->fsel));
+ m31eusb2_phy_write_sequence(phy, data->override_seq, data->override_seq_nregs);
+ m31eusb2_phy_write_sequence(phy, data->reset_seq, data->reset_seq_nregs);
+
+ return 0;
+
+disable_repeater:
+ phy_exit(phy->repeater);
+disable_vreg:
+ regulator_bulk_disable(M31_EUSB_NUM_VREGS, phy->vregs);
+
+ return 0;
+}
+
+static int m31eusb2_phy_exit(struct phy *uphy)
+{
+ struct m31eusb2_phy *phy = phy_get_drvdata(uphy);
+
+ clk_disable_unprepare(phy->clk);
+ regulator_bulk_disable(M31_EUSB_NUM_VREGS, phy->vregs);
+ phy_exit(phy->repeater);
+
+ return 0;
+}
+
+static const struct phy_ops m31eusb2_phy_gen_ops = {
+ .init = m31eusb2_phy_init,
+ .exit = m31eusb2_phy_exit,
+ .set_mode = m31eusb2_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int m31eusb2_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ const struct m31_eusb2_priv_data *data;
+ struct device *dev = &pdev->dev;
+ struct m31eusb2_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+ phy->data = data;
+
+ phy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->base))
+ return PTR_ERR(phy->base);
+
+ phy->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(phy->reset))
+ return PTR_ERR(phy->reset);
+
+ phy->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(phy->clk))
+ return dev_err_probe(dev, PTR_ERR(phy->clk),
+ "failed to get clk\n");
+
+ phy->phy = devm_phy_create(dev, NULL, &m31eusb2_phy_gen_ops);
+ if (IS_ERR(phy->phy))
+ return dev_err_probe(dev, PTR_ERR(phy->phy),
+ "failed to create phy\n");
+
+ ret = devm_regulator_bulk_get_const(dev, M31_EUSB_NUM_VREGS,
+ m31_eusb_phy_vregs, &phy->vregs);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
+
+ phy_set_drvdata(phy->phy, phy);
+
+ phy->repeater = devm_of_phy_get_by_index(dev, dev->of_node, 0);
+ if (IS_ERR(phy->repeater))
+ return dev_err_probe(dev, PTR_ERR(phy->repeater),
+ "failed to get repeater\n");
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct m31_eusb2_priv_data m31_eusb_v1_data = {
+ .setup_seq = m31_eusb2_setup_tbl,
+ .setup_seq_nregs = ARRAY_SIZE(m31_eusb2_setup_tbl),
+ .override_seq = m31_eusb_phy_override_tbl,
+ .override_seq_nregs = ARRAY_SIZE(m31_eusb_phy_override_tbl),
+ .reset_seq = m31_eusb_phy_reset_tbl,
+ .reset_seq_nregs = ARRAY_SIZE(m31_eusb_phy_reset_tbl),
+ .fsel = FSEL_38_4_MHZ_VAL,
+};
+
+static const struct of_device_id m31eusb2_phy_id_table[] = {
+ { .compatible = "qcom,sm8750-m31-eusb2-phy", .data = &m31_eusb_v1_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, m31eusb2_phy_id_table);
+
+static struct platform_driver m31eusb2_phy_driver = {
+ .probe = m31eusb2_phy_probe,
+ .driver = {
+ .name = "qcom-m31eusb2-phy",
+ .of_match_table = m31eusb2_phy_id_table,
+ },
+};
+
+module_platform_driver(m31eusb2_phy_driver);
+
+MODULE_AUTHOR("Wesley Cheng <quic_wcheng@quicinc.com>");
+MODULE_DESCRIPTION("eUSB2 Qualcomm M31 HSPHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index 20d4c020a83c..168ea980fda0 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -58,14 +58,16 @@
#define USB2_0_TX_ENABLE BIT(2)
#define USB2PHY_USB_PHY_M31_XCFGI_4 0xc8
- #define HSTX_SLEW_RATE_565PS GENMASK(1, 0)
+ #define HSTX_SLEW_RATE_400PS GENMASK(2, 0)
#define PLL_CHARGING_PUMP_CURRENT_35UA GENMASK(4, 3)
#define ODT_VALUE_38_02_OHM GENMASK(7, 6)
#define USB2PHY_USB_PHY_M31_XCFGI_5 0xcc
- #define ODT_VALUE_45_02_OHM BIT(2)
#define HSTX_PRE_EMPHASIS_LEVEL_0_55MA BIT(0)
+#define USB2PHY_USB_PHY_M31_XCFGI_9 0xdc
+ #define HSTX_CURRENT_17_1MA_385MV BIT(1)
+
#define USB2PHY_USB_PHY_M31_XCFGI_11 0xe4
#define XCFG_COARSE_TUNE_NUM BIT(1)
#define XCFG_FINE_TUNE_NUM BIT(3)
@@ -164,7 +166,7 @@ static struct m31_phy_regs m31_ipq5332_regs[] = {
},
{
USB2PHY_USB_PHY_M31_XCFGI_4,
- HSTX_SLEW_RATE_565PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM,
+ HSTX_SLEW_RATE_400PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM,
0
},
{
@@ -174,10 +176,14 @@ static struct m31_phy_regs m31_ipq5332_regs[] = {
},
{
USB2PHY_USB_PHY_M31_XCFGI_5,
- ODT_VALUE_45_02_OHM | HSTX_PRE_EMPHASIS_LEVEL_0_55MA,
+ HSTX_PRE_EMPHASIS_LEVEL_0_55MA,
4
},
{
+ USB2PHY_USB_PHY_M31_XCFGI_9,
+ HSTX_CURRENT_17_1MA_385MV,
+ },
+ {
USB_PHY_UTMI_CTRL5,
0x0,
0
@@ -305,8 +311,6 @@ static int m31usb_phy_probe(struct platform_device *pdev)
phy_set_drvdata(qphy->phy, qphy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered M31 USB phy\n");
return PTR_ERR_OR_ZERO(phy_provider);
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index b09fa00e9fe7..f07d097b129f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -32,6 +32,7 @@
#include "phy-qcom-qmp-pcs-usb-v4.h"
#include "phy-qcom-qmp-pcs-usb-v5.h"
#include "phy-qcom-qmp-pcs-usb-v6.h"
+#include "phy-qcom-qmp-pcs-usb-v8.h"
#include "phy-qcom-qmp-dp-com-v3.h"
@@ -212,6 +213,31 @@ static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN,
};
+static const unsigned int qmp_v8_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V8_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V8_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V8_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V8_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V8_PCS_USB_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V8_PCS_USB_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V8_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V8_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V8_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V8_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V8_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V8_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V8_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V8_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V8_TX_TRANSCEIVER_BIAS_EN,
+};
+
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -1471,6 +1497,139 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10),
};
+static const struct qmp_phy_init_tbl sm8750_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_STEP_SIZE1_MODE1, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_STEP_SIZE2_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP1_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP2_MODE1, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DEC_START_MODE1, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DEC_START_MSB_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START2_MODE1, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START3_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE1_MODE1, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_STEP_SIZE1_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP1_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DEC_START_MSB_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START2_MODE0, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START3_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE1_MODE0, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SYSCLK_BUF_ENABLE, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP_CFG, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_3, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V8_COM_ADDITIONAL_MISC, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8750_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_LANE_MODE_1, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_LANE_MODE_3, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_LANE_MODE_4, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_LANE_MODE_5, 0x5f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V8_TX_PI_QEC_CTRL, 0x21, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V8_TX_PI_QEC_CTRL, 0x05, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8750_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SB2_GAIN2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_AUX_DATA_TCOARSE_TFINE, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_GM_CAL, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x27),
+
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_ENABLES, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_LOW, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_HIGH3, 0xdf),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_HIGH4, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_LOW, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_HIGH, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_HIGH2, 0x91),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_HIGH3, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_HIGH4, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_VTH_CODE, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_CAL_CTRL1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_CAL_TRIM, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sm8750_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_RX_SIGDET_LVL, 0x55),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8750_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_USB_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_USB_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_USB_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V8_PCS_USB_RCVR_DTCT_DLY_U3_H, 0x00),
+};
+
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
@@ -1781,6 +1940,22 @@ static const struct qmp_combo_offsets qmp_combo_offsets_v5 = {
.dp_dp_phy = 0x2200,
};
+static const struct qmp_combo_offsets qmp_combo_offsets_v8 = {
+ .com = 0x0000,
+ .txa = 0x1400,
+ .rxa = 0x1600,
+ .txb = 0x1800,
+ .rxb = 0x1a00,
+ .usb3_serdes = 0x1000,
+ .usb3_pcs_misc = 0x1c00,
+ .usb3_pcs = 0x1e00,
+ .usb3_pcs_usb = 0x2100,
+ .dp_serdes = 0x3000,
+ .dp_txa = 0x3400,
+ .dp_txb = 0x3800,
+ .dp_dp_phy = 0x3c00,
+};
+
static const struct qmp_phy_cfg sar2130p_usb3dpphy_cfg = {
.offsets = &qmp_combo_offsets_v3,
@@ -2280,6 +2455,51 @@ static const struct qmp_phy_cfg sm8650_usb3dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
};
+static const struct qmp_phy_cfg sm8750_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v8,
+
+ .serdes_tbl = sm8750_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8750_usb3_serdes_tbl),
+ .tx_tbl = sm8750_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8750_usb3_tx_tbl),
+ .rx_tbl = sm8750_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8750_usb3_rx_tbl),
+ .pcs_tbl = sm8750_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8750_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8750_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8750_usb3_pcs_usb_tbl),
+
+ .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v4_dp_aux_init,
+ .configure_dp_tx = qmp_v4_configure_dp_tx,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
+
+ .regs = qmp_v8_usb3phy_regs_layout,
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+};
+
static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
@@ -3916,6 +4136,10 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
.data = &sm8650_usb3dpphy_cfg,
},
{
+ .compatible = "qcom,sm8750-qmp-usb3-dp-phy",
+ .data = &sm8750_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,x1e80100-qmp-usb3-dp-phy",
.data = &x1e80100_usb3dpphy_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 461b9e0af610..95830dcfdec9 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -2639,29 +2639,29 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl[]
};
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rx_alt_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9b),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0xe4),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9b),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xfb),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xe4),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xec),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xb3),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf8),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xec),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd6),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf5),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x5e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xe5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x8d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7e),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
@@ -2680,12 +2680,12 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rx_alt_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x08),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x08),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x7c),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
};
@@ -2699,6 +2699,8 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_tx_tbl[] = {
};
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V5_20_PCS_G3_RXEQEVAL_TIME, 0x27),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V5_20_PCS_G4_RXEQEVAL_TIME, 0x27),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
@@ -2711,11 +2713,19 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
};
-static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl[] = {
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_pcs_alt_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LOCK_DETECT_CONFIG1, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_ALIGN_DETECT_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_ALIGN_DETECT_CONFIG2, 0x50),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_ln_shrd_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_v5_LN_SHRD_UCDR_PI_CTRL2, 0x00),
};
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rx_alt_tbl[] = {
@@ -2739,27 +2749,27 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rx_alt_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9b),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0xd2),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9b),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xb6),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xd2),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xf0),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xb3),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf6),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xee),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xe6),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf9),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7e),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
@@ -2767,14 +2777,7 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rx_alt_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08),
-};
-
-static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
- QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x06),
};
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl[] = {
@@ -3191,6 +3194,7 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = {
.rx = 0x0200,
.tx2 = 0x0800,
.rx2 = 0x0a00,
+ .ln_shrd = 0x0e00,
};
static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_30 = {
@@ -3398,8 +3402,8 @@ static const struct qmp_phy_cfg qcs8300_qmp_gen4x2_pciephy_cfg = {
.tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl),
.rx = qcs8300_qmp_gen4x2_pcie_rx_alt_tbl,
.rx_num = ARRAY_SIZE(qcs8300_qmp_gen4x2_pcie_rx_alt_tbl),
- .pcs = sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl,
- .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl),
+ .pcs = sa8775p_qmp_gen4_pcie_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_alt_tbl),
.pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
},
@@ -4067,12 +4071,15 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
.tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl),
.rx = sa8775p_qmp_gen4x2_pcie_rx_alt_tbl,
.rx_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_rx_alt_tbl),
- .pcs = sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl,
- .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl),
- .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
+ .pcs = sa8775p_qmp_gen4_pcie_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
.pcs_lane1 = sdx65_qmp_pcie_pcs_lane1_tbl,
.pcs_lane1_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_lane1_tbl),
+ .ln_shrd = sa8775p_qmp_gen4x2_pcie_ln_shrd_tbl,
+ .ln_shrd_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_ln_shrd_tbl),
+
},
.tbls_rc = &(const struct qmp_phy_cfg_tbls) {
@@ -4112,8 +4119,8 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x4_pciephy_cfg = {
.tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl),
.rx = sa8775p_qmp_gen4x4_pcie_rx_alt_tbl,
.rx_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_rx_alt_tbl),
- .pcs = sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl,
- .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl),
+ .pcs = sa8775p_qmp_gen4_pcie_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_alt_tbl),
.pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
},
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
index 283d63c81593..951de964dc12 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
@@ -13,6 +13,8 @@
#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090
#define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0
#define QPHY_V5_20_PCS_PCIE_PRESET_P10_POST 0x0e0
+#define QPHY_PCIE_V5_20_PCS_G3_RXEQEVAL_TIME 0x0f0
+#define QPHY_PCIE_V5_20_PCS_G4_RXEQEVAL_TIME 0x0f4
#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc
#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h
new file mode 100644
index 000000000000..89ace8024bc0
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_USB_V8_H_
+#define QCOM_PHY_QMP_PCS_USB_V8_H_
+
+#define QPHY_V8_PCS_USB_POWER_STATE_CONFIG1 0x00
+#define QPHY_V8_PCS_USB_AUTONOMOUS_MODE_STATUS 0x04
+#define QPHY_V8_PCS_USB_AUTONOMOUS_MODE_CTRL 0x08
+#define QPHY_V8_PCS_USB_AUTONOMOUS_MODE_CTRL2 0x0c
+#define QPHY_V8_PCS_USB_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x10
+#define QPHY_V8_PCS_USB_LFPS_RXTERM_IRQ_CLEAR 0x14
+#define QPHY_V8_PCS_USB_LFPS_DET_HIGH_COUNT_VAL 0x18
+#define QPHY_V8_PCS_USB_LFPS_TX_ECSTART 0x1c
+#define QPHY_V8_PCS_USB_LFPS_PER_TIMER_VAL 0x20
+#define QPHY_V8_PCS_USB_LFPS_TX_END_CNT_U3_START 0x24
+#define QPHY_V8_PCS_USB_LFPS_CONFIG1 0x28
+#define QPHY_V8_PCS_USB_RXEQTRAINING_LOCK_TIME 0x2c
+#define QPHY_V8_PCS_USB_RXEQTRAINING_WAIT_TIME 0x30
+#define QPHY_V8_PCS_USB_RXEQTRAINING_CTLE_TIME 0x34
+#define QPHY_V8_PCS_USB_RXEQTRAINING_WAIT_TIME_S2 0x38
+#define QPHY_V8_PCS_USB_RXEQTRAINING_DFE_TIME_S2 0x3c
+#define QPHY_V8_PCS_USB_RCVR_DTCT_DLY_U3_L 0x40
+#define QPHY_V8_PCS_USB_RCVR_DTCT_DLY_U3_H 0x44
+#define QPHY_V8_PCS_USB_ARCVR_DTCT_EN_PERIOD 0x48
+#define QPHY_V8_PCS_USB_ARCVR_DTCT_CM_DLY 0x4c
+#define QPHY_V8_PCS_USB_TXONESZEROS_RUN_LENGTH 0x50
+#define QPHY_V8_PCS_USB_ALFPS_DEGLITCH_VAL 0x54
+#define QPHY_V8_PCS_USB_SIGDET_STARTUP_TIMER_VAL 0x58
+#define QPHY_V8_PCS_USB_TEST_CONTROL 0x5c
+#define QPHY_V8_PCS_USB_RXTERMINATION_DLY_SEL 0x60
+#define QPHY_V8_PCS_USB_POWER_STATE_CONFIG2 0x64
+#define QPHY_V8_PCS_USB_POWER_STATE_CONFIG3 0x68
+#define QPHY_V8_PCS_USB_POWER_STATE_CONFIG4 0x6c
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
index d3ad5b7f5425..bbee68df4e14 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h
@@ -8,8 +8,12 @@
#define QPHY_V5_20_PCS_INSIG_SW_CTRL7 0x060
#define QPHY_V5_20_PCS_INSIG_MX_CTRL7 0x07c
+#define QPHY_V5_20_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V5_20_PCS_LOCK_DETECT_CONFIG2 0x0c8
#define QPHY_V5_20_PCS_G3S2_PRE_GAIN 0x170
#define QPHY_V5_20_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V5_20_PCS_ALIGN_DETECT_CONFIG1 0x1b8
+#define QPHY_V5_20_PCS_ALIGN_DETECT_CONFIG2 0x1bc
#define QPHY_V5_20_PCS_EQ_CONFIG2 0x1d8
#define QPHY_V5_20_PCS_EQ_CONFIG4 0x1e0
#define QPHY_V5_20_PCS_EQ_CONFIG5 0x1e4
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h
new file mode 100644
index 000000000000..169fd5de7474
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V8_H_
+#define QCOM_PHY_QMP_PCS_V8_H_
+
+/* Only for QMP V8 PHY - USB/PCIe PCS registers */
+#define QPHY_V8_PCS_SW_RESET 0x000
+#define QPHY_V8_PCS_PCS_STATUS1 0x014
+#define QPHY_V8_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V8_PCS_START_CONTROL 0x044
+#define QPHY_V8_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V8_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V8_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V8_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V8_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V8_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V8_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V8_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V8_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V8_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V8_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_V8_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V8_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V8_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V8_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V8_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V8_PCS_EQ_CONFIG5 0x1ec
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h
new file mode 100644
index 000000000000..d3b2292257bc
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_COM_V8_H_
+#define QCOM_PHY_QMP_QSERDES_COM_V8_H_
+
+/* Only for QMP V8 PHY - QSERDES COM registers */
+#define QSERDES_V8_COM_SSC_STEP_SIZE1_MODE1 0x000
+#define QSERDES_V8_COM_SSC_STEP_SIZE2_MODE1 0x004
+#define QSERDES_V8_COM_SSC_STEP_SIZE3_MODE1 0x008
+#define QSERDES_V8_COM_CP_CTRL_MODE1 0x010
+#define QSERDES_V8_COM_PLL_RCTRL_MODE1 0x014
+#define QSERDES_V8_COM_PLL_CCTRL_MODE1 0x018
+#define QSERDES_V8_COM_CORECLK_DIV_MODE1 0x01c
+#define QSERDES_V8_COM_LOCK_CMP1_MODE1 0x020
+#define QSERDES_V8_COM_LOCK_CMP2_MODE1 0x024
+#define QSERDES_V8_COM_DEC_START_MODE1 0x028
+#define QSERDES_V8_COM_DEC_START_MSB_MODE1 0x02c
+#define QSERDES_V8_COM_DIV_FRAC_START1_MODE1 0x030
+#define QSERDES_V8_COM_DIV_FRAC_START2_MODE1 0x034
+#define QSERDES_V8_COM_DIV_FRAC_START3_MODE1 0x038
+#define QSERDES_V8_COM_HSCLK_SEL_1 0x03c
+#define QSERDES_V8_COM_VCO_TUNE1_MODE1 0x048
+#define QSERDES_V8_COM_VCO_TUNE2_MODE1 0x04c
+#define QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x050
+#define QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x054
+#define QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x058
+#define QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x05c
+#define QSERDES_V8_COM_SSC_STEP_SIZE1_MODE0 0x060
+#define QSERDES_V8_COM_SSC_STEP_SIZE2_MODE0 0x064
+#define QSERDES_V8_COM_CP_CTRL_MODE0 0x070
+#define QSERDES_V8_COM_PLL_RCTRL_MODE0 0x074
+#define QSERDES_V8_COM_PLL_CCTRL_MODE0 0x078
+#define QSERDES_V8_COM_LOCK_CMP1_MODE0 0x080
+#define QSERDES_V8_COM_LOCK_CMP2_MODE0 0x084
+#define QSERDES_V8_COM_DEC_START_MODE0 0x088
+#define QSERDES_V8_COM_DEC_START_MSB_MODE0 0x08c
+#define QSERDES_V8_COM_DIV_FRAC_START1_MODE0 0x090
+#define QSERDES_V8_COM_DIV_FRAC_START2_MODE0 0x094
+#define QSERDES_V8_COM_DIV_FRAC_START3_MODE0 0x098
+#define QSERDES_V8_COM_VCO_TUNE1_MODE0 0x0a8
+#define QSERDES_V8_COM_VCO_TUNE2_MODE0 0x0ac
+#define QSERDES_V8_COM_BG_TIMER 0x0bc
+#define QSERDES_V8_COM_SSC_EN_CENTER 0x0c0
+#define QSERDES_V8_COM_SSC_PER1 0x0cc
+#define QSERDES_V8_COM_SSC_PER2 0x0d0
+#define QSERDES_V8_COM_BIAS_EN_CLKBUFLR_EN 0x0dc
+#define QSERDES_V8_COM_SYSCLK_BUF_ENABLE 0x0e8
+#define QSERDES_V8_COM_SYSCLK_EN_SEL 0x110
+#define QSERDES_V8_COM_RESETSM_CNTRL 0x118
+#define QSERDES_V8_COM_LOCK_CMP_CFG 0x124
+#define QSERDES_V8_COM_VCO_TUNE_MAP 0x140
+#define QSERDES_V8_COM_CORE_CLK_EN 0x170
+#define QSERDES_V8_COM_CMN_CONFIG_1 0x174
+#define QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_1 0x1a4
+#define QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_2 0x1a8
+#define QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_3 0x1ac
+#define QSERDES_V8_COM_ADDITIONAL_MISC 0x1b4
+#define QSERDES_V8_COM_CMN_STATUS 0x2c8
+#define QSERDES_V8_COM_C_READY_STATUS 0x2f0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h
new file mode 100644
index 000000000000..68c38fdfc1d8
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2025, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_LN_SHRD_V5_H_
+#define QCOM_PHY_QMP_QSERDES_LN_SHRD_V5_H_
+
+#define QSERDES_v5_LN_SHRD_UCDR_PI_CTRL2 0x04c
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h
new file mode 100644
index 000000000000..4cb8b1708607
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V8_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V8_H_
+
+#define QSERDES_V8_TX_TX_EMP_POST1_LVL 0x00c
+#define QSERDES_V8_TX_TX_DRV_LVL 0x014
+#define QSERDES_V8_TX_RES_CODE_LANE_TX 0x034
+#define QSERDES_V8_TX_RES_CODE_LANE_RX 0x038
+#define QSERDES_V8_TX_RES_CODE_LANE_OFFSET_TX 0x03c
+#define QSERDES_V8_TX_RES_CODE_LANE_OFFSET_RX 0x040
+#define QSERDES_V8_TX_TRANSCEIVER_BIAS_EN 0x054
+#define QSERDES_V8_TX_HIGHZ_DRVR_EN 0x058
+#define QSERDES_V8_TX_TX_POL_INV 0x05c
+#define QSERDES_V8_TX_LANE_MODE_1 0x084
+#define QSERDES_V8_TX_LANE_MODE_2 0x088
+#define QSERDES_V8_TX_LANE_MODE_3 0x08c
+#define QSERDES_V8_TX_LANE_MODE_4 0x090
+#define QSERDES_V8_TX_LANE_MODE_5 0x094
+#define QSERDES_V8_TX_RCV_DETECT_LVL_2 0x0a4
+#define QSERDES_V8_TX_PI_QEC_CTRL 0x0e4
+
+#define QSERDES_V8_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V8_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V8_RX_UCDR_SVS_FO_GAIN 0x020
+#define QSERDES_V8_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V8_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V8_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V8_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
+#define QSERDES_V8_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V8_RX_UCDR_SB2_THRESH1 0x04c
+#define QSERDES_V8_RX_UCDR_SB2_THRESH2 0x050
+#define QSERDES_V8_RX_UCDR_SB2_GAIN1 0x054
+#define QSERDES_V8_RX_UCDR_SB2_GAIN2 0x058
+#define QSERDES_V8_RX_AUX_DATA_TCOARSE_TFINE 0x060
+#define QSERDES_V8_RX_VGA_CAL_CNTRL1 0x0d4
+#define QSERDES_V8_RX_VGA_CAL_CNTRL2 0x0d8
+#define QSERDES_V8_RX_GM_CAL 0x0dc
+#define QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_V8_RX_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_V8_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_V8_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
+#define QSERDES_V8_RX_SIGDET_ENABLES 0x118
+#define QSERDES_V8_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V8_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V8_RX_RX_MODE_00_LOW 0x15c
+#define QSERDES_V8_RX_RX_MODE_00_HIGH 0x160
+#define QSERDES_V8_RX_RX_MODE_00_HIGH2 0x164
+#define QSERDES_V8_RX_RX_MODE_00_HIGH3 0x168
+#define QSERDES_V8_RX_RX_MODE_00_HIGH4 0x16c
+#define QSERDES_V8_RX_RX_MODE_01_LOW 0x170
+#define QSERDES_V8_RX_RX_MODE_01_HIGH 0x174
+#define QSERDES_V8_RX_RX_MODE_01_HIGH2 0x178
+#define QSERDES_V8_RX_RX_MODE_01_HIGH3 0x17c
+#define QSERDES_V8_RX_RX_MODE_01_HIGH4 0x180
+#define QSERDES_V8_RX_DFE_EN_TIMER 0x1a0
+#define QSERDES_V8_RX_DFE_CTLE_POST_CAL_OFFSET 0x1a4
+#define QSERDES_V8_RX_DCC_CTRL1 0x1a8
+#define QSERDES_V8_RX_VTH_CODE 0x1b0
+#define QSERDES_V8_RX_SIGDET_CAL_CTRL1 0x1e4
+#define QSERDES_V8_RX_SIGDET_CAL_TRIM 0x1f8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index b33e2e2b5014..9c69c77d10c8 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -1758,8 +1758,9 @@ static void qmp_ufs_init_registers(struct qmp_ufs *qmp, const struct qmp_phy_cfg
qmp_ufs_init_all(qmp, &cfg->tbls_hs_b);
}
-static int qmp_ufs_com_init(struct qmp_ufs *qmp)
+static int qmp_ufs_power_on(struct phy *phy)
{
+ struct qmp_ufs *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *pcs = qmp->pcs;
int ret;
@@ -1775,70 +1776,14 @@ static int qmp_ufs_com_init(struct qmp_ufs *qmp)
goto err_disable_regulators;
qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
-
return 0;
err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-
return ret;
}
-static int qmp_ufs_com_exit(struct qmp_ufs *qmp)
-{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
-
- reset_control_assert(qmp->ufs_reset);
-
- clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
-
- regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-
- return 0;
-}
-
-static int qmp_ufs_init(struct phy *phy)
-{
- struct qmp_ufs *qmp = phy_get_drvdata(phy);
- const struct qmp_phy_cfg *cfg = qmp->cfg;
- int ret;
- dev_vdbg(qmp->dev, "Initializing QMP phy\n");
-
- if (cfg->no_pcs_sw_reset) {
- /*
- * Get UFS reset, which is delayed until now to avoid a
- * circular dependency where UFS needs its PHY, but the PHY
- * needs this UFS reset.
- */
- if (!qmp->ufs_reset) {
- qmp->ufs_reset =
- devm_reset_control_get_exclusive(qmp->dev,
- "ufsphy");
-
- if (IS_ERR(qmp->ufs_reset)) {
- ret = PTR_ERR(qmp->ufs_reset);
- dev_err(qmp->dev,
- "failed to get UFS reset: %d\n",
- ret);
-
- qmp->ufs_reset = NULL;
- return ret;
- }
- }
-
- ret = reset_control_assert(qmp->ufs_reset);
- if (ret)
- return ret;
- }
-
- ret = qmp_ufs_com_init(qmp);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int qmp_ufs_power_on(struct phy *phy)
+static int qmp_ufs_phy_calibrate(struct phy *phy)
{
struct qmp_ufs *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
@@ -1847,6 +1792,10 @@ static int qmp_ufs_power_on(struct phy *phy)
unsigned int val;
int ret;
+ ret = reset_control_assert(qmp->ufs_reset);
+ if (ret)
+ return ret;
+
qmp_ufs_init_registers(qmp, cfg);
ret = reset_control_deassert(qmp->ufs_reset);
@@ -1876,54 +1825,17 @@ static int qmp_ufs_power_off(struct phy *phy)
struct qmp_ufs *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
- /* PHY reset */
- if (!cfg->no_pcs_sw_reset)
- qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
-
- /* stop SerDes */
- qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL], SERDES_START);
-
/* Put PHY into POWER DOWN state: active low */
qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
SW_PWRDN);
- return 0;
-}
-
-static int qmp_ufs_exit(struct phy *phy)
-{
- struct qmp_ufs *qmp = phy_get_drvdata(phy);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
- qmp_ufs_com_exit(qmp);
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
return 0;
}
-static int qmp_ufs_enable(struct phy *phy)
-{
- int ret;
-
- ret = qmp_ufs_init(phy);
- if (ret)
- return ret;
-
- ret = qmp_ufs_power_on(phy);
- if (ret)
- qmp_ufs_exit(phy);
-
- return ret;
-}
-
-static int qmp_ufs_disable(struct phy *phy)
-{
- int ret;
-
- ret = qmp_ufs_power_off(phy);
- if (ret)
- return ret;
- return qmp_ufs_exit(phy);
-}
-
static int qmp_ufs_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct qmp_ufs *qmp = phy_get_drvdata(phy);
@@ -1940,9 +1852,40 @@ static int qmp_ufs_set_mode(struct phy *phy, enum phy_mode mode, int submode)
return 0;
}
+static int qmp_ufs_phy_init(struct phy *phy)
+{
+ struct qmp_ufs *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int ret;
+
+ if (!cfg->no_pcs_sw_reset)
+ return 0;
+
+ /*
+ * Get UFS reset, which is delayed until now to avoid a
+ * circular dependency where UFS needs its PHY, but the PHY
+ * needs this UFS reset.
+ */
+ if (!qmp->ufs_reset) {
+ qmp->ufs_reset =
+ devm_reset_control_get_exclusive(qmp->dev, "ufsphy");
+
+ if (IS_ERR(qmp->ufs_reset)) {
+ ret = PTR_ERR(qmp->ufs_reset);
+ dev_err(qmp->dev, "failed to get PHY reset: %d\n", ret);
+ qmp->ufs_reset = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static const struct phy_ops qcom_qmp_ufs_phy_ops = {
- .power_on = qmp_ufs_enable,
- .power_off = qmp_ufs_disable,
+ .init = qmp_ufs_phy_init,
+ .power_on = qmp_ufs_power_on,
+ .power_off = qmp_ufs_power_off,
+ .calibrate = qmp_ufs_phy_calibrate,
.set_mode = qmp_ufs_set_mode,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index d0f41e4aaa85..f58c82b2dd23 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -25,11 +25,15 @@
#include "phy-qcom-qmp-qserdes-txrx-v6.h"
#include "phy-qcom-qmp-qserdes-txrx-v6_20.h"
#include "phy-qcom-qmp-qserdes-txrx-v6_n4.h"
+#include "phy-qcom-qmp-qserdes-ln-shrd-v5.h"
#include "phy-qcom-qmp-qserdes-ln-shrd-v6.h"
#include "phy-qcom-qmp-qserdes-com-v7.h"
#include "phy-qcom-qmp-qserdes-txrx-v7.h"
+#include "phy-qcom-qmp-qserdes-com-v8.h"
+#include "phy-qcom-qmp-qserdes-txrx-v8.h"
+
#include "phy-qcom-qmp-qserdes-pll.h"
#include "phy-qcom-qmp-pcs-v2.h"
@@ -52,6 +56,8 @@
#include "phy-qcom-qmp-pcs-v7.h"
+#include "phy-qcom-qmp-pcs-v8.h"
+
/* QPHY_SW_RESET bit */
#define SW_RESET BIT(0)
/* QPHY_POWER_DOWN_CONTROL */
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 49c37c53b38e..b5514a32ff8f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -1114,9 +1114,7 @@ static int qusb2_phy_probe(struct platform_device *pdev)
phy_set_drvdata(generic_phy, qphy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QUSB2 phy\n");
- else
+ if (IS_ERR(phy_provider))
pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
index bd44af36c67a..4e2dfd01adf2 100644
--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
@@ -30,9 +30,8 @@
#define PHY_CFG_ADDR_SHIFT 1
#define PHY_CFG_DATA_MASK 0xf
#define PHY_CFG_ADDR_MASK 0x3f
-#define PHY_CFG_RD_MASK 0x3ff
#define PHY_CFG_WR_ENABLE 1
-#define PHY_CFG_WR_DISABLE 1
+#define PHY_CFG_WR_DISABLE 0
#define PHY_CFG_WR_SHIFT 0
#define PHY_CFG_WR_MASK 1
#define PHY_CFG_PLL_LOCK 0x10
@@ -160,6 +159,12 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
guard(mutex)(&rk_phy->pcie_mutex);
+ regmap_write(rk_phy->reg_base,
+ rk_phy->phy_data->pcie_laneoff,
+ HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
+ PHY_LANE_IDLE_MASK,
+ PHY_LANE_IDLE_A_SHIFT + inst->index));
+
if (rk_phy->pwr_cnt++) {
return 0;
}
@@ -176,12 +181,6 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
PHY_CFG_ADDR_MASK,
PHY_CFG_ADDR_SHIFT));
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
-
/*
* No documented timeout value for phy operation below,
* so we make it large enough here. And we use loop-break
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index f6756a609a9a..be925508ed97 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -213,6 +213,55 @@ static const struct mipi_phy_device_desc exynos5433_mipi_phy = {
},
};
+static const struct mipi_phy_device_desc exynos7870_mipi_phy = {
+ .num_regmaps = 3,
+ .regmap_names = {
+ "samsung,pmu-syscon",
+ "samsung,disp-sysreg",
+ "samsung,cam0-sysreg"
+ },
+ .num_phys = 4,
+ .phys = {
+ {
+ /* EXYNOS_MIPI_PHY_ID_CSIS0 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS7870_MIPI_PHY_CONTROL0,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(0),
+ .resetn_reg = 0,
+ .resetn_map = EXYNOS_MIPI_REGMAP_CAM0,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_DSIM0 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS7870_MIPI_PHY_CONTROL0,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(0),
+ .resetn_reg = 0,
+ .resetn_map = EXYNOS_MIPI_REGMAP_DISP,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_CSIS1 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS7870_MIPI_PHY_CONTROL1,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(1),
+ .resetn_reg = 0,
+ .resetn_map = EXYNOS_MIPI_REGMAP_CAM0,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_CSIS2 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS7870_MIPI_PHY_CONTROL2,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(2),
+ .resetn_reg = 0,
+ .resetn_map = EXYNOS_MIPI_REGMAP_CAM0,
+ },
+ },
+};
+
struct exynos_mipi_video_phy {
struct regmap *regmaps[EXYNOS_MIPI_REGMAPS_NUM];
int num_phys;
@@ -351,6 +400,9 @@ static const struct of_device_id exynos_mipi_video_phy_of_match[] = {
}, {
.compatible = "samsung,exynos5433-mipi-video-phy",
.data = &exynos5433_mipi_phy,
+ }, {
+ .compatible = "samsung,exynos7870-mipi-video-phy",
+ .data = &exynos7870_mipi_phy,
},
{ /* sentinel */ },
};
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 917a76d584f0..dd660ebe8045 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -2025,6 +2025,35 @@ static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = {
.n_regulators = ARRAY_SIZE(exynos5_regulator_names),
};
+static const struct exynos5_usbdrd_phy_tuning exynos990_tunes_utmi_postinit[] = {
+ PHY_TUNING_ENTRY_PHY(EXYNOS850_DRD_HSPPARACON,
+ (HSPPARACON_TXVREF |
+ HSPPARACON_TXPREEMPAMP | HSPPARACON_SQRX |
+ HSPPARACON_COMPDIS),
+ (FIELD_PREP_CONST(HSPPARACON_TXVREF, 7) |
+ FIELD_PREP_CONST(HSPPARACON_TXPREEMPAMP, 3) |
+ FIELD_PREP_CONST(HSPPARACON_SQRX, 5) |
+ FIELD_PREP_CONST(HSPPARACON_COMPDIS, 7))),
+ PHY_TUNING_ENTRY_LAST
+};
+
+static const struct exynos5_usbdrd_phy_tuning *exynos990_tunes[PTS_MAX] = {
+ [PTS_UTMI_POSTINIT] = exynos990_tunes_utmi_postinit,
+};
+
+static const struct exynos5_usbdrd_phy_drvdata exynos990_usbdrd_phy = {
+ .phy_cfg = phy_cfg_exynos850,
+ .phy_ops = &exynos850_usbdrd_phy_ops,
+ .phy_tunes = exynos990_tunes,
+ .pmu_offset_usbdrd0_phy = EXYNOS990_PHY_CTRL_USB20,
+ .clk_names = exynos5_clk_names,
+ .n_clks = ARRAY_SIZE(exynos5_clk_names),
+ .core_clk_names = exynos5_core_clk_names,
+ .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names),
+ .regulator_names = exynos5_regulator_names,
+ .n_regulators = ARRAY_SIZE(exynos5_regulator_names),
+};
+
static const struct exynos5_usbdrd_phy_config phy_cfg_gs101[] = {
{
.id = EXYNOS5_DRDPHY_UTMI,
@@ -2228,6 +2257,9 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
}, {
.compatible = "samsung,exynos850-usbdrd-phy",
.data = &exynos850_usbdrd_phy
+ }, {
+ .compatible = "samsung,exynos990-usbdrd-phy",
+ .data = &exynos990_usbdrd_phy
},
{ },
};
diff --git a/drivers/phy/st/phy-stih407-usb.c b/drivers/phy/st/phy-stih407-usb.c
index ebb1d0858aa3..7a3e4584895c 100644
--- a/drivers/phy/st/phy-stih407-usb.c
+++ b/drivers/phy/st/phy-stih407-usb.c
@@ -139,8 +139,6 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
- dev_info(dev, "STiH407 USB Generic picoPHY driver probed!");
-
return 0;
}
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index b917cd413de7..27fe92f73f33 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -757,8 +757,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
}
version = readl_relaxed(usbphyc->base + STM32_USBPHYC_VERSION);
- dev_info(dev, "registered rev:%lu.%lu\n",
- FIELD_GET(MAJREV, version), FIELD_GET(MINREV, version));
+ dev_dbg(dev, "registered rev: %lu.%lu\n",
+ FIELD_GET(MAJREV, version), FIELD_GET(MINREV, version));
return 0;
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index 6f12b38cd894..a26aec3ab29e 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -784,7 +784,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(twl->dev);
- dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
return 0;
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index eb1b37af81fb..be1ca8e85754 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -206,6 +206,17 @@ config PINCTRL_DIGICOLOR
select PINMUX
select GENERIC_PINCONF
+config PINCTRL_EIC7700
+ tristate "EIC7700 PINCTRL driver"
+ depends on ARCH_ESWIN || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This driver support for the pin controller in ESWIN's EIC7700 SoC,
+ which supports pin multiplexing, pin configuration,and rgmii voltage
+ control.
+ Say Y here to enable the eic7700 pinctrl driver
+
config PINCTRL_EP93XX
bool
depends on ARCH_EP93XX || COMPILE_TEST
@@ -269,7 +280,8 @@ config PINCTRL_INGENIC
config PINCTRL_K210
bool "Pinctrl driver for the Canaan Kendryte K210 SoC"
- depends on RISCV && SOC_CANAAN_K210 && OF
+ depends on RISCV && SOC_CANAAN_K210 || COMPILE_TEST
+ depends on OF
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
select GPIOLIB
@@ -527,6 +539,7 @@ config PINCTRL_STMFX
tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
depends on I2C
depends on OF_GPIO
+ depends on HAS_IOMEM
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
select MFD_STMFX
@@ -554,8 +567,8 @@ config PINCTRL_SX150X
- 16 bits: sx1509q, sx1506q
config PINCTRL_TB10X
- bool
- depends on OF && ARC_PLAT_TB10X
+ bool "Pinctrl for TB10X" if COMPILE_TEST
+ depends on OF && ARC_PLAT_TB10X || COMPILE_TEST
select GPIOLIB
config PINCTRL_TPS6594
@@ -590,7 +603,8 @@ config PINCTRL_TH1520
config PINCTRL_ZYNQ
bool "Pinctrl driver for Xilinx Zynq"
- depends on ARCH_ZYNQ
+ depends on ARCH_ZYNQ || COMPILE_TEST
+ depends on OF
select PINMUX
select GENERIC_PINCONF
help
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 65dac8e38798..909ab89a56d2 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_PINCTRL_CY8C95X0) += pinctrl-cy8c95x0.o
obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
+obj-$(CONFIG_PINCTRL_EIC7700) += pinctrl-eic7700.o
obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-$(CONFIG_PINCTRL_EP93XX) += pinctrl-ep93xx.o
obj-$(CONFIG_PINCTRL_EYEQ5) += pinctrl-eyeq5.o
@@ -83,7 +84,7 @@ obj-y += sophgo/
obj-y += spacemit/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-y += sprd/
-obj-$(CONFIG_SOC_STARFIVE) += starfive/
+obj-y += starfive/
obj-$(CONFIG_PINCTRL_STM32) += stm32/
obj-y += sunplus/
obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 86f3d5c69e36..1f0ef4727ba7 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -962,7 +962,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
pctrl->chip.direction_input = owl_gpio_direction_input;
pctrl->chip.direction_output = owl_gpio_direction_output;
pctrl->chip.get = owl_gpio_get;
- pctrl->chip.set_rv = owl_gpio_set;
+ pctrl->chip.set = owl_gpio_set;
pctrl->chip.request = owl_gpio_request;
pctrl->chip.free = owl_gpio_free;
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
index 774f8d05142f..cb295856dda1 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
@@ -2653,7 +2653,7 @@ static const struct pinconf_ops aspeed_g4_conf_ops = {
.pin_config_group_set = aspeed_pin_config_group_set,
};
-static struct pinctrl_desc aspeed_g4_pinctrl_desc = {
+static const struct pinctrl_desc aspeed_g4_pinctrl_desc = {
.name = "aspeed-g4-pinctrl",
.pins = aspeed_g4_pins,
.npins = ARRAY_SIZE(aspeed_g4_pins),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 5bb8fd0d1e41..792089628362 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2845,7 +2845,7 @@ static const struct pinconf_ops aspeed_g5_conf_ops = {
.pin_config_group_set = aspeed_pin_config_group_set,
};
-static struct pinctrl_desc aspeed_g5_pinctrl_desc = {
+static const struct pinctrl_desc aspeed_g5_pinctrl_desc = {
.name = "aspeed-g5-pinctrl",
.pins = aspeed_g5_pins,
.npins = ARRAY_SIZE(aspeed_g5_pins),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 5a7cd0a88687..b0c7e4f6df9c 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -17,6 +17,7 @@
#include "../pinctrl-utils.h"
#include "pinctrl-aspeed.h"
+#define SCU040 0x040 /* Reset Control Set 1 */
#define SCU400 0x400 /* Multi-function Pin Control #1 */
#define SCU404 0x404 /* Multi-function Pin Control #2 */
#define SCU40C 0x40C /* Multi-function Pin Control #3 */
@@ -52,7 +53,7 @@
#define SCU6D0 0x6D0 /* Multi-function Pin Control #29 */
#define SCUC20 0xC20 /* PCIE configuration Setting Control */
-#define ASPEED_G6_NR_PINS 256
+#define ASPEED_G6_NR_PINS 258
#define M24 0
SIG_EXPR_LIST_DECL_SESG(M24, MDC3, MDIO3, SIG_DESC_SET(SCU410, 0));
@@ -1636,6 +1637,12 @@ FUNC_DECL_1(USB11BHID, USBB);
FUNC_DECL_1(USB2BD, USBB);
FUNC_DECL_1(USB2BH, USBB);
+#define D7 257
+SIG_EXPR_LIST_DECL_SESG(D7, RCRST, PCIERC1, SIG_DESC_SET(SCU040, 19),
+ SIG_DESC_SET(SCU500, 24));
+PIN_DECL_(D7, SIG_EXPR_LIST_PTR(D7, RCRST));
+FUNC_GROUP_DECL(PCIERC1, D7);
+
/* Pins, groups and functions are sort(1):ed alphabetically for sanity */
static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
@@ -1806,6 +1813,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
ASPEED_PINCTRL_PIN(D6),
+ ASPEED_PINCTRL_PIN(D7),
ASPEED_PINCTRL_PIN(E1),
ASPEED_PINCTRL_PIN(E11),
ASPEED_PINCTRL_PIN(E12),
@@ -2073,6 +2081,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(SALT9G1),
ASPEED_PINCTRL_GROUP(SD1),
ASPEED_PINCTRL_GROUP(SD2),
+ ASPEED_PINCTRL_GROUP(PCIERC1),
ASPEED_PINCTRL_GROUP(EMMCG1),
ASPEED_PINCTRL_GROUP(EMMCG4),
ASPEED_PINCTRL_GROUP(EMMCG8),
@@ -2314,6 +2323,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(SPI2),
ASPEED_PINCTRL_FUNC(SPI2CS1),
ASPEED_PINCTRL_FUNC(SPI2CS2),
+ ASPEED_PINCTRL_FUNC(PCIERC1),
ASPEED_PINCTRL_FUNC(TACH0),
ASPEED_PINCTRL_FUNC(TACH1),
ASPEED_PINCTRL_FUNC(TACH10),
@@ -2763,7 +2773,7 @@ static const struct pinconf_ops aspeed_g6_conf_ops = {
.pin_config_group_set = aspeed_pin_config_group_set,
};
-static struct pinctrl_desc aspeed_g6_pinctrl_desc = {
+static const struct pinctrl_desc aspeed_g6_pinctrl_desc = {
.name = "aspeed-g6-pinctrl",
.pins = aspeed_g6_pins,
.npins = ARRAY_SIZE(aspeed_g6_pins),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 9c6ee46ac7a0..7e0ebf11af16 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -441,7 +441,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
}
int aspeed_pinctrl_probe(struct platform_device *pdev,
- struct pinctrl_desc *pdesc,
+ const struct pinctrl_desc *pdesc,
struct aspeed_pinctrl_data *pdata)
{
struct device *parent;
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
index 4dcde3bc29c8..28f3bde25081 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -102,7 +102,7 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int offset);
int aspeed_pinctrl_probe(struct platform_device *pdev,
- struct pinctrl_desc *pdesc,
+ const struct pinctrl_desc *pdesc,
struct aspeed_pinctrl_data *pdata);
int aspeed_pin_config_get(struct pinctrl_dev *pctldev, unsigned int offset,
unsigned long *config);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 826827800474..7dbf079739bc 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -397,7 +397,7 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.direction_output = bcm2835_gpio_direction_output,
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
- .set_rv = bcm2835_gpio_set,
+ .set = bcm2835_gpio_set,
.set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
@@ -414,7 +414,7 @@ static const struct gpio_chip bcm2711_gpio_chip = {
.direction_output = bcm2835_gpio_direction_output,
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
- .set_rv = bcm2835_gpio_set,
+ .set = bcm2835_gpio_set,
.set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2711_NUM_GPIOS,
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm4908.c b/drivers/pinctrl/bcm/pinctrl-bcm4908.c
index f190e0997f1f..12f7a253ea4d 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm4908.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm4908.c
@@ -456,7 +456,7 @@ static const struct pinmux_ops bcm4908_pinctrl_pmxops = {
* Controller code
*/
-static struct pinctrl_desc bcm4908_pinctrl_desc = {
+static const struct pinctrl_desc bcm4908_pinctrl_desc = {
.name = "bcm4908-pinctrl",
.pctlops = &bcm4908_pinctrl_ops,
.pmxops = &bcm4908_pinctrl_pmxops,
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index bf9597800954..e9aa99f85e05 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -903,6 +903,7 @@ static struct pinctrl_desc cygnus_pinctrl_desc = {
.name = "cygnus-pinmux",
.pctlops = &cygnus_pinctrl_ops,
.pmxops = &cygnus_pinmux_ops,
+ .npins = ARRAY_SIZE(cygnus_pins),
};
static int cygnus_mux_log_init(struct cygnus_pinctrl *pinctrl)
@@ -935,7 +936,6 @@ static int cygnus_pinmux_probe(struct platform_device *pdev)
struct cygnus_pinctrl *pinctrl;
int i, ret;
struct pinctrl_pin_desc *pins;
- unsigned num_pins = ARRAY_SIZE(cygnus_pins);
pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
if (!pinctrl)
@@ -963,11 +963,12 @@ static int cygnus_pinmux_probe(struct platform_device *pdev)
return ret;
}
- pins = devm_kcalloc(&pdev->dev, num_pins, sizeof(*pins), GFP_KERNEL);
+ pins = devm_kcalloc(&pdev->dev, ARRAY_SIZE(cygnus_pins), sizeof(*pins),
+ GFP_KERNEL);
if (!pins)
return -ENOMEM;
- for (i = 0; i < num_pins; i++) {
+ for (i = 0; i < ARRAY_SIZE(cygnus_pins); i++) {
pins[i].number = cygnus_pins[i].pin;
pins[i].name = cygnus_pins[i].name;
pins[i].drv_data = &cygnus_pins[i].gpio_mux;
@@ -978,7 +979,6 @@ static int cygnus_pinmux_probe(struct platform_device *pdev)
pinctrl->functions = cygnus_pin_functions;
pinctrl->num_functions = ARRAY_SIZE(cygnus_pin_functions);
cygnus_pinctrl_desc.pins = pins;
- cygnus_pinctrl_desc.npins = num_pins;
pinctrl->pctl = devm_pinctrl_register(&pdev->dev, &cygnus_pinctrl_desc,
pinctrl);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 1d08b8d4cdd7..8c353676f2af 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -865,7 +865,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
gc->direction_input = iproc_gpio_direction_input;
gc->direction_output = iproc_gpio_direction_output;
gc->get_direction = iproc_gpio_get_direction;
- gc->set_rv = iproc_gpio_set;
+ gc->set = iproc_gpio_set;
gc->get = iproc_gpio_get;
chip->pinmux_is_supported = of_property_read_bool(dev->of_node,
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index 6bb2b461950b..03bd01b4a945 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -192,7 +192,7 @@ static const struct pinmux_ops ns_pinctrl_pmxops = {
* Controller code
*/
-static struct pinctrl_desc ns_pinctrl_desc = {
+static const struct pinctrl_desc ns_pinctrl_desc = {
.name = "pinctrl-ns",
.pctlops = &ns_pinctrl_ops,
.pmxops = &ns_pinctrl_pmxops,
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 04f4fca854cc..23ab3ab064b6 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -971,6 +971,7 @@ static struct pinctrl_desc ns2_pinctrl_desc = {
.pctlops = &ns2_pinctrl_ops,
.pmxops = &ns2_pinmux_ops,
.confops = &ns2_pinconf_ops,
+ .npins = ARRAY_SIZE(ns2_pins),
};
static int ns2_mux_log_init(struct ns2_pinctrl *pinctrl)
@@ -1026,7 +1027,6 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
struct resource *res;
int i, ret;
struct pinctrl_pin_desc *pins;
- unsigned int num_pins = ARRAY_SIZE(ns2_pins);
pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
if (!pinctrl)
@@ -1060,11 +1060,12 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
return ret;
}
- pins = devm_kcalloc(&pdev->dev, num_pins, sizeof(*pins), GFP_KERNEL);
+ pins = devm_kcalloc(&pdev->dev, ARRAY_SIZE(ns2_pins), sizeof(*pins),
+ GFP_KERNEL);
if (!pins)
return -ENOMEM;
- for (i = 0; i < num_pins; i++) {
+ for (i = 0; i < ARRAY_SIZE(ns2_pins); i++) {
pins[i].number = ns2_pins[i].pin;
pins[i].name = ns2_pins[i].name;
pins[i].drv_data = &ns2_pins[i];
@@ -1075,7 +1076,6 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
pinctrl->functions = ns2_pin_functions;
pinctrl->num_functions = ARRAY_SIZE(ns2_pin_functions);
ns2_pinctrl_desc.pins = pins;
- ns2_pinctrl_desc.npins = num_pins;
pinctrl->pctl = pinctrl_register(&ns2_pinctrl_desc, &pdev->dev,
pinctrl);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index b08f8480ddc6..b425ecacd1b0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -656,7 +656,7 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
gc->get_direction = nsp_gpio_get_direction;
- gc->set_rv = nsp_gpio_set;
+ gc->set = nsp_gpio_set;
gc->get = nsp_gpio_get;
/* optional GPIO interrupt support */
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index eb6298507c1d..9b716c0d2b94 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -525,6 +525,7 @@ static struct pinctrl_desc nsp_pinctrl_desc = {
.name = "nsp-pinmux",
.pctlops = &nsp_pinctrl_ops,
.pmxops = &nsp_pinmux_ops,
+ .npins = ARRAY_SIZE(nsp_pins),
};
static int nsp_mux_log_init(struct nsp_pinctrl *pinctrl)
@@ -556,7 +557,6 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
struct resource *res;
int i, ret;
struct pinctrl_pin_desc *pins;
- unsigned int num_pins = ARRAY_SIZE(nsp_pins);
pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
if (!pinctrl)
@@ -589,11 +589,12 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
return ret;
}
- pins = devm_kcalloc(&pdev->dev, num_pins, sizeof(*pins), GFP_KERNEL);
+ pins = devm_kcalloc(&pdev->dev, ARRAY_SIZE(nsp_pins), sizeof(*pins),
+ GFP_KERNEL);
if (!pins)
return -ENOMEM;
- for (i = 0; i < num_pins; i++) {
+ for (i = 0; i < ARRAY_SIZE(nsp_pins); i++) {
pins[i].number = nsp_pins[i].pin;
pins[i].name = nsp_pins[i].name;
pins[i].drv_data = &nsp_pins[i].gpio_select;
@@ -604,7 +605,6 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
pinctrl->functions = nsp_pin_functions;
pinctrl->num_functions = ARRAY_SIZE(nsp_pin_functions);
nsp_pinctrl_desc.pins = pins;
- nsp_pinctrl_desc.npins = num_pins;
pinctrl->pctl = devm_pinctrl_register(&pdev->dev, &nsp_pinctrl_desc,
pinctrl);
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index c372a2a24be4..8afcfa4e5694 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -204,6 +204,7 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
const struct berlin_desc_group *desc_group;
const struct berlin_desc_function *desc_function;
int i, max_functions = 0;
+ struct pinfunction *new_functions;
pctrl->nfunctions = 0;
@@ -229,12 +230,15 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
}
}
- pctrl->functions = krealloc(pctrl->functions,
+ new_functions = krealloc(pctrl->functions,
pctrl->nfunctions * sizeof(*pctrl->functions),
GFP_KERNEL);
- if (!pctrl->functions)
+ if (!new_functions) {
+ kfree(pctrl->functions);
return -ENOMEM;
+ }
+ pctrl->functions = new_functions;
/* map functions to theirs groups */
for (i = 0; i < pctrl->desc->ngroups; i++) {
desc_group = pctrl->desc->groups + i;
@@ -283,7 +287,7 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
return 0;
}
-static struct pinctrl_desc berlin_pctrl_desc = {
+static const struct pinctrl_desc berlin_pctrl_desc = {
.name = "berlin-pinctrl",
.pctlops = &berlin_pinctrl_ops,
.pmxops = &berlin_pinmux_ops,
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
index 628b60ccc2b0..68abb6d6cecd 100644
--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
@@ -448,7 +448,7 @@ static const struct pinconf_ops cs42l43_pin_conf_ops = {
.pin_config_group_set = cs42l43_pin_config_group_set,
};
-static struct pinctrl_desc cs42l43_pin_desc = {
+static const struct pinctrl_desc cs42l43_pin_desc = {
.name = "cs42l43-pinctrl",
.owner = THIS_MODULE,
@@ -483,7 +483,8 @@ static int cs42l43_gpio_get(struct gpio_chip *chip, unsigned int offset)
return ret;
}
-static void cs42l43_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int cs42l43_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct cs42l43_pin *priv = gpiochip_get_data(chip);
unsigned int shift = offset + CS42L43_GPIO1_LVL_SHIFT;
@@ -493,23 +494,27 @@ static void cs42l43_gpio_set(struct gpio_chip *chip, unsigned int offset, int va
offset + 1, str_high_low(value));
ret = pm_runtime_resume_and_get(priv->dev);
- if (ret) {
- dev_err(priv->dev, "Failed to resume for set: %d\n", ret);
- return;
- }
+ if (ret)
+ return ret;
ret = regmap_update_bits(priv->regmap, CS42L43_GPIO_CTRL1,
BIT(shift), value << shift);
if (ret)
- dev_err(priv->dev, "Failed to set gpio%d: %d\n", offset + 1, ret);
+ return ret;
pm_runtime_put(priv->dev);
+
+ return 0;
}
static int cs42l43_gpio_direction_out(struct gpio_chip *chip,
unsigned int offset, int value)
{
- cs42l43_gpio_set(chip, offset, value);
+ int ret;
+
+ ret = cs42l43_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
return pinctrl_gpio_direction_output(chip, offset);
}
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
index 0f32866a4aef..ca6ae566082b 100644
--- a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -1058,13 +1058,12 @@ static const struct pinctrl_desc lochnagar_pin_desc = {
.confops = &lochnagar_pin_conf_ops,
};
-static void lochnagar_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int lochnagar_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
struct lochnagar_pin_priv *priv = gpiochip_get_data(chip);
struct lochnagar *lochnagar = priv->lochnagar;
const struct lochnagar_pin *pin = priv->pins[offset].drv_data;
- int ret;
value = !!value;
@@ -1075,29 +1074,31 @@ static void lochnagar_gpio_set(struct gpio_chip *chip,
case LN_PTYPE_MUX:
value |= LN2_OP_GPIO;
- ret = lochnagar_pin_set_mux(priv, pin, value);
+ return lochnagar_pin_set_mux(priv, pin, value);
break;
case LN_PTYPE_GPIO:
if (pin->invert)
value = !value;
- ret = regmap_update_bits(lochnagar->regmap, pin->reg,
- BIT(pin->shift), value << pin->shift);
+ return regmap_update_bits(lochnagar->regmap, pin->reg,
+ BIT(pin->shift),
+ value << pin->shift);
break;
default:
- ret = -EINVAL;
break;
}
- if (ret < 0)
- dev_err(chip->parent, "Failed to set %s value: %d\n",
- pin->name, ret);
+ return -EINVAL;
}
static int lochnagar_gpio_direction_out(struct gpio_chip *chip,
unsigned int offset, int value)
{
- lochnagar_gpio_set(chip, offset, value);
+ int ret;
+
+ ret = lochnagar_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
return pinctrl_gpio_direction_output(chip, offset);
}
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index 73ec5b9beb49..d19ef13224cc 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1061,8 +1061,9 @@ static int madera_pin_probe(struct platform_device *pdev)
/* if the configuration is provided through pdata, apply it */
if (pdata->gpio_configs) {
- ret = pinctrl_register_mappings(pdata->gpio_configs,
- pdata->n_gpio_configs);
+ ret = devm_pinctrl_register_mappings(priv->dev,
+ pdata->gpio_configs,
+ pdata->n_gpio_configs);
if (ret)
return dev_err_probe(priv->dev, ret,
"Failed to register pdata mappings\n");
@@ -1081,17 +1082,8 @@ static int madera_pin_probe(struct platform_device *pdev)
return 0;
}
-static void madera_pin_remove(struct platform_device *pdev)
-{
- struct madera_pin_private *priv = platform_get_drvdata(pdev);
-
- if (priv->madera->pdata.gpio_configs)
- pinctrl_unregister_mappings(priv->madera->pdata.gpio_configs);
-}
-
static struct platform_driver madera_pin_driver = {
.probe = madera_pin_probe,
- .remove = madera_pin_remove,
.driver = {
.name = "madera-pinctrl",
},
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 9046292d1360..73b78d6eac67 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2062,7 +2062,7 @@ static int pinctrl_check_ops(struct pinctrl_dev *pctldev)
* @driver_data: private pin controller data for this pin controller
*/
static struct pinctrl_dev *
-pinctrl_init_controller(struct pinctrl_desc *pctldesc, struct device *dev,
+pinctrl_init_controller(const struct pinctrl_desc *pctldesc, struct device *dev,
void *driver_data)
{
struct pinctrl_dev *pctldev;
@@ -2132,7 +2132,8 @@ out_err:
return ERR_PTR(ret);
}
-static void pinctrl_uninit_controller(struct pinctrl_dev *pctldev, struct pinctrl_desc *pctldesc)
+static void pinctrl_uninit_controller(struct pinctrl_dev *pctldev,
+ const struct pinctrl_desc *pctldesc)
{
pinctrl_free_pindescs(pctldev, pctldesc->pins,
pctldesc->npins);
@@ -2209,7 +2210,7 @@ EXPORT_SYMBOL_GPL(pinctrl_enable);
* struct pinctrl_dev handle. To avoid issues later on, please use the
* new pinctrl_register_and_init() below instead.
*/
-struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+struct pinctrl_dev *pinctrl_register(const struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data)
{
struct pinctrl_dev *pctldev;
@@ -2239,7 +2240,7 @@ EXPORT_SYMBOL_GPL(pinctrl_register);
* Note that pinctrl_enable() still needs to be manually called after
* this once the driver is ready.
*/
-int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
+int pinctrl_register_and_init(const struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data,
struct pinctrl_dev **pctldev)
{
@@ -2330,7 +2331,7 @@ static int devm_pinctrl_dev_match(struct device *dev, void *res, void *data)
* The pinctrl device will be automatically released when the device is unbound.
*/
struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
- struct pinctrl_desc *pctldesc,
+ const struct pinctrl_desc *pctldesc,
void *driver_data)
{
struct pinctrl_dev **ptr, *pctldev;
@@ -2364,7 +2365,7 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_register);
* The pinctrl device will be automatically released when the device is unbound.
*/
int devm_pinctrl_register_and_init(struct device *dev,
- struct pinctrl_desc *pctldesc,
+ const struct pinctrl_desc *pctldesc,
void *driver_data,
struct pinctrl_dev **pctldev)
{
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index d6c24978e708..fc513a9cdd4f 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -51,7 +51,7 @@ struct pinctrl_state;
*/
struct pinctrl_dev {
struct list_head node;
- struct pinctrl_desc *desc;
+ const struct pinctrl_desc *desc;
struct radix_tree_root pin_desc_tree;
#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
struct radix_tree_root pin_group_tree;
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 969137c4cb06..5fd107a00ef8 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1045,7 +1045,7 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BYT_LEVEL);
}
-static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg;
@@ -1053,7 +1053,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
if (!reg)
- return;
+ return -EINVAL;
guard(raw_spinlock_irqsave)(&byt_lock);
@@ -1062,6 +1062,8 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
writel(old_val | BYT_LEVEL, reg);
else
writel(old_val & ~BYT_LEVEL, reg);
+
+ return 0;
}
static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 69b18ce0f685..f81f7929cd3b 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1112,7 +1112,7 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(ctrl0 & CHV_PADCTRL0_GPIORXSTATE);
}
-static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
u32 ctrl0;
@@ -1127,6 +1127,8 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
ctrl0 &= ~CHV_PADCTRL0_GPIOTXSTATE;
chv_writel(pctrl, offset, CHV_PADCTRL0, ctrl0);
+
+ return 0;
}
static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index d889c7c878e2..d68cef4ec52a 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/cleanup.h>
+#include <linux/export.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/log2.h>
@@ -1033,8 +1034,8 @@ static int intel_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(padcfg0 & PADCFG0_GPIORXSTATE);
}
-static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
void __iomem *reg;
@@ -1043,11 +1044,11 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
if (pin < 0)
- return;
+ return -EINVAL;
reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
- return;
+ return -EINVAL;
guard(raw_spinlock_irqsave)(&pctrl->lock);
@@ -1057,6 +1058,8 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
else
padcfg0 &= ~PADCFG0_GPIOTXSTATE;
writel(padcfg0, reg);
+
+ return 0;
}
static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -1094,7 +1097,12 @@ static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offse
static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
- intel_gpio_set(chip, offset, value);
+ int ret;
+
+ ret = intel_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
return pinctrl_gpio_direction_output(chip, offset);
}
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index ac5459a4c63e..3fb628309fb2 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -503,7 +503,7 @@ static int lp_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(ioread32(reg) & IN_LVL_BIT);
}
-static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct intel_pinctrl *lg = gpiochip_get_data(chip);
void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
@@ -514,6 +514,8 @@ static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
iowrite32(ioread32(reg) | OUT_LVL_BIT, reg);
else
iowrite32(ioread32(reg) & ~OUT_LVL_BIT, reg);
+
+ return 0;
}
static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 2d15af6be276..5b191e12a8aa 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -259,6 +259,18 @@ config PINCTRL_MT8188
In MTK platform, we support virtual gpio and use it to
map specific eint which doesn't have real gpio pin.
+config PINCTRL_MT8189
+ bool "MediaTek MT8189 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+ help
+ Say yes here to support pin controller and gpio driver
+ on MediaTek MT8189 SoC.
+ In MTK platform, we support virtual gpio and use it to
+ map specific eint which doesn't have real gpio pin.
+
config PINCTRL_MT8192
bool "MediaTek MT8192 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 7518980fba59..5d4646939ba3 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
obj-$(CONFIG_PINCTRL_MT8186) += pinctrl-mt8186.o
obj-$(CONFIG_PINCTRL_MT8188) += pinctrl-mt8188.o
+obj-$(CONFIG_PINCTRL_MT8189) += pinctrl-mt8189.o
obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o
obj-$(CONFIG_PINCTRL_MT8195) += pinctrl-mt8195.o
obj-$(CONFIG_PINCTRL_MT8196) += pinctrl-mt8196.o
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index d906a5e4101f..9f175c73613f 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -561,8 +561,8 @@ int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin)
goto err_eint;
}
- eint->domain = irq_domain_create_linear(of_fwnode_handle(eint->dev->of_node),
- eint->hw->ap_num, &irq_domain_simple_ops, NULL);
+ eint->domain = irq_domain_create_linear(dev_fwnode(eint->dev), eint->hw->ap_num,
+ &irq_domain_simple_ops, NULL);
if (!eint->domain)
goto err_eint;
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index b97b28ebb37a..1b2f132d76f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -2418,7 +2418,7 @@ static int airoha_pinctrl_add_gpiochip(struct airoha_pinctrl *pinctrl,
gc->free = gpiochip_generic_free;
gc->direction_input = pinctrl_gpio_direction_input;
gc->direction_output = airoha_gpio_direction_output;
- gc->set_rv = airoha_gpio_set;
+ gc->set = airoha_gpio_set;
gc->get = airoha_gpio_get;
gc->base = -1;
gc->ngpio = AIROHA_NUM_PINS;
@@ -2696,7 +2696,7 @@ static int airoha_pinconf_get(struct pinctrl_dev *pctrl_dev,
arg = 1;
break;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
*config = pinconf_to_config_packed(param, arg);
@@ -2788,7 +2788,7 @@ static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
break;
}
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
}
@@ -2805,10 +2805,10 @@ static int airoha_pinconf_group_get(struct pinctrl_dev *pctrl_dev,
if (airoha_pinconf_get(pctrl_dev,
airoha_pinctrl_groups[group].pins[i],
config))
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
if (i && cur_config != *config)
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
cur_config = *config;
}
@@ -2852,7 +2852,7 @@ static const struct pinctrl_ops airoha_pctlops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-static struct pinctrl_desc airoha_pinctrl_desc = {
+static const struct pinctrl_desc airoha_pinctrl_desc = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
.pctlops = &airoha_pctlops,
@@ -2907,11 +2907,9 @@ static int airoha_pinctrl_probe(struct platform_device *pdev)
const struct airoha_pinctrl_func *func;
func = &airoha_pinctrl_funcs[i];
- err = pinmux_generic_add_function(pinctrl->ctrl,
- func->desc.func.name,
- func->desc.func.groups,
- func->desc.func.ngroups,
- (void *)func);
+ err = pinmux_generic_add_pinfunction(pinctrl->ctrl,
+ &func->desc.func,
+ (void *)func);
if (err < 0) {
dev_err(dev, "Failed to register function %s\n",
func->desc.func.name);
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index 827d0f191031..6e4f6c07a509 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -569,7 +569,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->direction_input = pinctrl_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
- chip->set_rv = mtk_gpio_set;
+ chip->set = mtk_gpio_set;
chip->to_irq = mtk_gpio_to_irq;
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
@@ -625,9 +625,8 @@ static int mtk_build_functions(struct mtk_pinctrl *hw)
const struct function_desc *function = hw->soc->funcs + i;
const struct pinfunction *func = &function->func;
- err = pinmux_generic_add_function(hw->pctrl, func->name,
- func->groups, func->ngroups,
- function->data);
+ err = pinmux_generic_add_pinfunction(hw->pctrl, func,
+ function->data);
if (err < 0) {
dev_err(hw->dev, "Failed to register function %s\n",
func->name);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8189.c b/drivers/pinctrl/mediatek/pinctrl-mt8189.c
new file mode 100644
index 000000000000..7028aff55ae5
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8189.c
@@ -0,0 +1,1700 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 MediaTek Inc.
+ * Author: Lei Xue <lei.xue@mediatek.com>
+ * Cathy Xu <ot_cathy.xu@mediatek.com>
+ */
+
+#include "pinctrl-mtk-mt8189.h"
+#include "pinctrl-paris.h"
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt8189_pin_mode_range[] = {
+ PIN_FIELD(0, 182, 0x0300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_dir_range[] = {
+ PIN_FIELD(0, 182, 0x0000, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_di_range[] = {
+ PIN_FIELD(0, 182, 0x0200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_do_range[] = {
+ PIN_FIELD(0, 182, 0x0100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 7, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(2, 2, 8, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(3, 3, 8, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(4, 4, 8, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(5, 5, 8, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(6, 6, 7, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(7, 7, 7, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(8, 8, 7, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(9, 9, 7, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 7, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(11, 11, 7, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 3, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(18, 18, 7, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(19, 19, 7, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 7, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 7, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 9, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 9, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 9, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 4, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(26, 26, 4, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(29, 29, 4, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x00f0, 0x10, 19, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00c0, 0x10, 30, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x00f0, 0x10, 21, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x00f0, 0x10, 20, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x00f0, 0x10, 23, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x00f0, 0x10, 22, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x00f0, 0x10, 25, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x00f0, 0x10, 24, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(48, 48, 4, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(49, 49, 4, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(52, 52, 8, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(53, 53, 8, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(54, 54, 8, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(55, 55, 4, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(56, 56, 4, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(59, 59, 2, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(60, 60, 2, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(61, 61, 2, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(62, 62, 2, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(69, 69, 2, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(70, 70, 2, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(71, 71, 2, 0x00e0, 0x10, 24, 1),
+ PIN_FIELD_BASE(72, 72, 2, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 2, 0x00e0, 0x10, 26, 1),
+ PIN_FIELD_BASE(74, 74, 2, 0x00e0, 0x10, 25, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(76, 76, 2, 0x00e0, 0x10, 27, 1),
+ PIN_FIELD_BASE(77, 77, 8, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(78, 78, 8, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(79, 79, 8, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(80, 80, 8, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(81, 81, 2, 0x00e0, 0x10, 29, 1),
+ PIN_FIELD_BASE(82, 82, 2, 0x00e0, 0x10, 28, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x00e0, 0x10, 30, 1),
+ PIN_FIELD_BASE(84, 84, 7, 0x00e0, 0x10, 24, 1),
+ PIN_FIELD_BASE(85, 85, 7, 0x00e0, 0x10, 25, 1),
+ PIN_FIELD_BASE(86, 86, 7, 0x00e0, 0x10, 26, 1),
+ PIN_FIELD_BASE(87, 87, 7, 0x00e0, 0x10, 27, 1),
+ PIN_FIELD_BASE(88, 88, 5, 0x0120, 0x10, 20, 1),
+ PIN_FIELD_BASE(89, 89, 5, 0x0120, 0x10, 19, 1),
+ PIN_FIELD_BASE(90, 90, 5, 0x0120, 0x10, 22, 1),
+ PIN_FIELD_BASE(91, 91, 5, 0x0120, 0x10, 21, 1),
+ PIN_FIELD_BASE(92, 92, 5, 0x0120, 0x10, 16, 1),
+ PIN_FIELD_BASE(93, 93, 5, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(94, 94, 5, 0x0120, 0x10, 23, 1),
+ PIN_FIELD_BASE(95, 95, 5, 0x0120, 0x10, 15, 1),
+ PIN_FIELD_BASE(96, 96, 5, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(97, 97, 5, 0x0120, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 5, 0x0120, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 5, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(100, 100, 5, 0x0120, 0x10, 4, 1),
+ PIN_FIELD_BASE(101, 101, 5, 0x0120, 0x10, 1, 1),
+ PIN_FIELD_BASE(102, 102, 5, 0x0120, 0x10, 2, 1),
+ PIN_FIELD_BASE(103, 103, 7, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(104, 104, 7, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(105, 105, 7, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 7, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(107, 107, 7, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(108, 108, 7, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(109, 109, 7, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(110, 110, 7, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 7, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(112, 112, 8, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(113, 113, 8, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(114, 114, 8, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(119, 119, 1, 0x00c0, 0x10, 26, 1),
+ PIN_FIELD_BASE(120, 120, 1, 0x00c0, 0x10, 25, 1),
+ PIN_FIELD_BASE(121, 121, 1, 0x00c0, 0x10, 24, 1),
+ PIN_FIELD_BASE(122, 122, 1, 0x00c0, 0x10, 23, 1),
+ PIN_FIELD_BASE(123, 123, 1, 0x00c0, 0x10, 19, 1),
+ PIN_FIELD_BASE(124, 124, 1, 0x00c0, 0x10, 18, 1),
+ PIN_FIELD_BASE(125, 125, 1, 0x00c0, 0x10, 17, 1),
+ PIN_FIELD_BASE(126, 126, 1, 0x00c0, 0x10, 16, 1),
+ PIN_FIELD_BASE(127, 127, 1, 0x00c0, 0x10, 22, 1),
+ PIN_FIELD_BASE(128, 128, 1, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(129, 129, 1, 0x00c0, 0x10, 20, 1),
+ PIN_FIELD_BASE(130, 130, 1, 0x00c0, 0x10, 27, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x00c0, 0x10, 28, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x00c0, 0x10, 21, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 2, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(138, 138, 2, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(150, 150, 3, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(151, 151, 1, 0x00c0, 0x10, 29, 1),
+ PIN_FIELD_BASE(152, 152, 3, 0x00f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(153, 153, 3, 0x00f0, 0x10, 16, 1),
+ PIN_FIELD_BASE(154, 154, 3, 0x00f0, 0x10, 17, 1),
+ PIN_FIELD_BASE(155, 155, 3, 0x00f0, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 5, 0x0120, 0x10, 12, 1),
+ PIN_FIELD_BASE(157, 157, 5, 0x0120, 0x10, 11, 1),
+ PIN_FIELD_BASE(158, 158, 5, 0x0120, 0x10, 10, 1),
+ PIN_FIELD_BASE(159, 159, 6, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x0120, 0x10, 14, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x0120, 0x10, 7, 1),
+ PIN_FIELD_BASE(162, 162, 5, 0x0120, 0x10, 6, 1),
+ PIN_FIELD_BASE(163, 163, 6, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 5, 0x0120, 0x10, 9, 1),
+ PIN_FIELD_BASE(165, 165, 5, 0x0120, 0x10, 8, 1),
+ PIN_FIELD_BASE(166, 166, 6, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(167, 167, 5, 0x0120, 0x10, 13, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(174, 174, 9, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(175, 175, 9, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(176, 176, 9, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(177, 177, 9, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(178, 178, 9, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(179, 179, 9, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(180, 180, 5, 0x0120, 0x10, 24, 1),
+ PIN_FIELD_BASE(181, 181, 5, 0x0120, 0x10, 25, 1),
+ PIN_FIELD_BASE(182, 182, 9, 0x00f0, 0x10, 3, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 7, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(2, 2, 8, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(3, 3, 8, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(4, 4, 8, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(5, 5, 8, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(6, 6, 7, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(7, 7, 7, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(8, 8, 7, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(9, 9, 7, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 7, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(11, 11, 7, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(18, 18, 7, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(19, 19, 7, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 7, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 7, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 9, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 9, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 9, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(26, 26, 4, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(29, 29, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0050, 0x10, 30, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(48, 48, 4, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(49, 49, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(52, 52, 8, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(53, 53, 8, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(54, 54, 8, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(55, 55, 4, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(56, 56, 4, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(59, 59, 2, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(60, 60, 2, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(61, 61, 2, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(62, 62, 2, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(69, 69, 2, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(70, 70, 2, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(71, 71, 2, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(72, 72, 2, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 2, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(74, 74, 2, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(76, 76, 2, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(77, 77, 8, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(78, 78, 8, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(79, 79, 8, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(80, 80, 8, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(81, 81, 2, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(82, 82, 2, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(84, 84, 7, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(85, 85, 7, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(86, 86, 7, 0x0050, 0x10, 26, 1),
+ PIN_FIELD_BASE(87, 87, 7, 0x0050, 0x10, 27, 1),
+ PIN_FIELD_BASE(88, 88, 5, 0x0060, 0x10, 20, 1),
+ PIN_FIELD_BASE(89, 89, 5, 0x0060, 0x10, 19, 1),
+ PIN_FIELD_BASE(90, 90, 5, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(91, 91, 5, 0x0060, 0x10, 21, 1),
+ PIN_FIELD_BASE(92, 92, 5, 0x0060, 0x10, 16, 1),
+ PIN_FIELD_BASE(93, 93, 5, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(94, 94, 5, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(95, 95, 5, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(96, 96, 5, 0x0060, 0x10, 18, 1),
+ PIN_FIELD_BASE(97, 97, 5, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 5, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 5, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(100, 100, 5, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(101, 101, 5, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(102, 102, 5, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(103, 103, 7, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(104, 104, 7, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(105, 105, 7, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 7, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(107, 107, 7, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(108, 108, 7, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(109, 109, 7, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(110, 110, 7, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 7, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(112, 112, 8, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(113, 113, 8, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(114, 114, 8, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(119, 119, 1, 0x0050, 0x10, 26, 1),
+ PIN_FIELD_BASE(120, 120, 1, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(121, 121, 1, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(122, 122, 1, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(123, 123, 1, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(124, 124, 1, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(125, 125, 1, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(126, 126, 1, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(127, 127, 1, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(128, 128, 1, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(129, 129, 1, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(130, 130, 1, 0x0050, 0x10, 27, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0050, 0x10, 28, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 2, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(138, 138, 2, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(150, 150, 3, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(151, 151, 1, 0x0050, 0x10, 29, 1),
+ PIN_FIELD_BASE(152, 152, 3, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(153, 153, 3, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(154, 154, 3, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(155, 155, 3, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 5, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(157, 157, 5, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(158, 158, 5, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(159, 159, 6, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(162, 162, 5, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(163, 163, 6, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 5, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(165, 165, 5, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(166, 166, 6, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(167, 167, 5, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(174, 174, 9, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(175, 175, 9, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(176, 176, 9, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(177, 177, 9, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(178, 178, 9, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(179, 179, 9, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(180, 180, 5, 0x0060, 0x10, 24, 1),
+ PIN_FIELD_BASE(181, 181, 5, 0x0060, 0x10, 25, 1),
+ PIN_FIELD_BASE(182, 182, 9, 0x0040, 0x10, 3, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_tdsel_range[] = {
+ PIN_FIELD_BASE(0, 0, 7, 0x00f0, 0x10, 0, 4),
+ PIN_FIELD_BASE(1, 1, 8, 0x00d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(2, 2, 8, 0x00d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(3, 3, 8, 0x00d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(4, 4, 8, 0x00d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(5, 5, 8, 0x00d0, 0x10, 16, 4),
+ PIN_FIELD_BASE(6, 6, 7, 0x00f0, 0x10, 4, 4),
+ PIN_FIELD_BASE(7, 7, 7, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(8, 8, 7, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(9, 9, 7, 0x00f0, 0x10, 16, 4),
+ PIN_FIELD_BASE(10, 10, 7, 0x00f0, 0x10, 20, 4),
+ PIN_FIELD_BASE(11, 11, 7, 0x00f0, 0x10, 24, 4),
+ PIN_FIELD_BASE(12, 12, 2, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(13, 13, 2, 0x00f0, 0x10, 16, 4),
+ PIN_FIELD_BASE(14, 14, 3, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(15, 15, 3, 0x0110, 0x10, 4, 4),
+ PIN_FIELD_BASE(16, 16, 2, 0x00f0, 0x10, 20, 4),
+ PIN_FIELD_BASE(17, 17, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(18, 18, 7, 0x0100, 0x10, 28, 4),
+ PIN_FIELD_BASE(19, 19, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(20, 20, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(21, 21, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(22, 22, 9, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(23, 23, 9, 0x0110, 0x10, 4, 4),
+ PIN_FIELD_BASE(24, 24, 9, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(25, 25, 4, 0x00d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(26, 26, 4, 0x00d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(27, 27, 2, 0x00f0, 0x10, 4, 4),
+ PIN_FIELD_BASE(28, 28, 2, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(29, 29, 4, 0x00d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(30, 30, 2, 0x00f0, 0x10, 0, 4),
+ PIN_FIELD_BASE(31, 31, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(32, 32, 1, 0x00f0, 0x10, 16, 4),
+ PIN_FIELD_BASE(33, 33, 3, 0x0120, 0x10, 16, 4),
+ PIN_FIELD_BASE(34, 34, 3, 0x0120, 0x10, 4, 4),
+ PIN_FIELD_BASE(35, 35, 3, 0x0120, 0x10, 0, 4),
+ PIN_FIELD_BASE(36, 36, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(37, 37, 3, 0x0120, 0x10, 4, 4),
+ PIN_FIELD_BASE(38, 38, 3, 0x0120, 0x10, 4, 4),
+ PIN_FIELD_BASE(39, 39, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(40, 40, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(41, 41, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(42, 42, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(43, 43, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(44, 44, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(45, 45, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(46, 46, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(47, 47, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(48, 48, 4, 0x00d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(49, 49, 4, 0x00d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(50, 50, 4, 0x00d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(51, 51, 8, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(52, 52, 8, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(53, 53, 8, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(54, 54, 8, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(55, 55, 4, 0x00d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(56, 56, 4, 0x00d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(57, 57, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(58, 58, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(59, 59, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(60, 60, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(61, 61, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(62, 62, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(63, 63, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(64, 64, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(65, 65, 9, 0x0120, 0x10, 4, 4),
+ PIN_FIELD_BASE(66, 66, 9, 0x0120, 0x10, 4, 4),
+ PIN_FIELD_BASE(67, 67, 9, 0x0120, 0x10, 4, 4),
+ PIN_FIELD_BASE(68, 68, 9, 0x0120, 0x10, 4, 4),
+ PIN_FIELD_BASE(69, 69, 2, 0x0100, 0x10, 4, 4),
+ PIN_FIELD_BASE(70, 70, 2, 0x0100, 0x10, 0, 4),
+ PIN_FIELD_BASE(71, 71, 2, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(72, 72, 2, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(73, 73, 2, 0x0100, 0x10, 20, 4),
+ PIN_FIELD_BASE(74, 74, 2, 0x0100, 0x10, 16, 4),
+ PIN_FIELD_BASE(75, 75, 3, 0x0120, 0x10, 12, 4),
+ PIN_FIELD_BASE(76, 76, 2, 0x0100, 0x10, 24, 4),
+ PIN_FIELD_BASE(77, 77, 8, 0x00d0, 0x10, 28, 4),
+ PIN_FIELD_BASE(78, 78, 8, 0x00d0, 0x10, 24, 4),
+ PIN_FIELD_BASE(79, 79, 8, 0x00e0, 0x10, 4, 4),
+ PIN_FIELD_BASE(80, 80, 8, 0x00e0, 0x10, 0, 4),
+ PIN_FIELD_BASE(81, 81, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(82, 82, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(83, 83, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(84, 84, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(85, 85, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(86, 86, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(87, 87, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(88, 88, 5, 0x0140, 0x10, 4, 4),
+ PIN_FIELD_BASE(89, 89, 5, 0x0140, 0x10, 4, 4),
+ PIN_FIELD_BASE(90, 90, 5, 0x0140, 0x10, 4, 4),
+ PIN_FIELD_BASE(91, 91, 5, 0x0140, 0x10, 4, 4),
+ PIN_FIELD_BASE(92, 92, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(93, 93, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(94, 94, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(95, 95, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(96, 96, 5, 0x0140, 0x10, 12, 4),
+ PIN_FIELD_BASE(97, 97, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(98, 98, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(99, 99, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(100, 100, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(101, 101, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(102, 102, 5, 0x0140, 0x10, 8, 4),
+ PIN_FIELD_BASE(103, 103, 7, 0x0100, 0x10, 8, 4),
+ PIN_FIELD_BASE(104, 104, 7, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(105, 105, 7, 0x0100, 0x10, 4, 4),
+ PIN_FIELD_BASE(106, 106, 7, 0x0100, 0x10, 0, 4),
+ PIN_FIELD_BASE(107, 107, 7, 0x0100, 0x10, 24, 4),
+ PIN_FIELD_BASE(108, 108, 7, 0x0100, 0x10, 12, 4),
+ PIN_FIELD_BASE(109, 109, 7, 0x0100, 0x10, 20, 4),
+ PIN_FIELD_BASE(110, 110, 7, 0x0100, 0x10, 16, 4),
+ PIN_FIELD_BASE(111, 111, 7, 0x0110, 0x10, 0, 4),
+ PIN_FIELD_BASE(112, 112, 8, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(113, 113, 8, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(114, 114, 8, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(115, 115, 2, 0x00f0, 0x10, 24, 4),
+ PIN_FIELD_BASE(116, 116, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(117, 117, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(118, 118, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(119, 119, 1, 0x00e0, 0x10, 24, 4),
+ PIN_FIELD_BASE(120, 120, 1, 0x00e0, 0x10, 20, 4),
+ PIN_FIELD_BASE(121, 121, 1, 0x00e0, 0x10, 16, 4),
+ PIN_FIELD_BASE(122, 122, 1, 0x00e0, 0x10, 12, 4),
+ PIN_FIELD_BASE(123, 123, 1, 0x00d0, 0x10, 28, 4),
+ PIN_FIELD_BASE(124, 124, 1, 0x00d0, 0x10, 24, 4),
+ PIN_FIELD_BASE(125, 125, 1, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(126, 126, 1, 0x00d0, 0x10, 16, 4),
+ PIN_FIELD_BASE(127, 127, 1, 0x00e0, 0x10, 8, 4),
+ PIN_FIELD_BASE(128, 128, 1, 0x00d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(129, 129, 1, 0x00e0, 0x10, 0, 4),
+ PIN_FIELD_BASE(130, 130, 1, 0x00e0, 0x10, 28, 4),
+ PIN_FIELD_BASE(131, 131, 1, 0x00d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(132, 132, 1, 0x00d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(133, 133, 1, 0x00f0, 0x10, 0, 4),
+ PIN_FIELD_BASE(134, 134, 1, 0x00e0, 0x10, 4, 4),
+ PIN_FIELD_BASE(135, 135, 1, 0x00d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(136, 136, 1, 0x00f0, 0x10, 4, 4),
+ PIN_FIELD_BASE(137, 137, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(138, 138, 2, 0x00f0, 0x10, 28, 4),
+ PIN_FIELD_BASE(139, 139, 1, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(140, 140, 1, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(141, 141, 1, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(142, 142, 1, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(143, 143, 1, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(144, 144, 1, 0x00f0, 0x10, 12, 4),
+ PIN_FIELD_BASE(145, 145, 1, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(146, 146, 1, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(147, 147, 1, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(148, 148, 1, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(149, 149, 1, 0x00f0, 0x10, 8, 4),
+ PIN_FIELD_BASE(150, 150, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(151, 151, 1, 0x00f0, 0x10, 16, 4),
+ PIN_FIELD_BASE(152, 152, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(153, 153, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(154, 154, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(155, 155, 3, 0x0120, 0x10, 8, 4),
+ PIN_FIELD_BASE(156, 156, 5, 0x0130, 0x10, 24, 4),
+ PIN_FIELD_BASE(157, 157, 5, 0x0130, 0x10, 20, 4),
+ PIN_FIELD_BASE(158, 158, 5, 0x0130, 0x10, 16, 4),
+ PIN_FIELD_BASE(159, 159, 6, 0x00a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(160, 160, 5, 0x0140, 0x10, 0, 4),
+ PIN_FIELD_BASE(161, 161, 5, 0x0130, 0x10, 4, 4),
+ PIN_FIELD_BASE(162, 162, 5, 0x0130, 0x10, 0, 4),
+ PIN_FIELD_BASE(163, 163, 6, 0x00a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(164, 164, 5, 0x0130, 0x10, 12, 4),
+ PIN_FIELD_BASE(165, 165, 5, 0x0130, 0x10, 8, 4),
+ PIN_FIELD_BASE(166, 166, 6, 0x00a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(167, 167, 5, 0x0130, 0x10, 28, 4),
+ PIN_FIELD_BASE(168, 168, 3, 0x0110, 0x10, 12, 4),
+ PIN_FIELD_BASE(169, 169, 3, 0x0110, 0x10, 8, 4),
+ PIN_FIELD_BASE(170, 170, 3, 0x0110, 0x10, 16, 4),
+ PIN_FIELD_BASE(171, 171, 3, 0x0110, 0x10, 20, 4),
+ PIN_FIELD_BASE(172, 172, 3, 0x0110, 0x10, 24, 4),
+ PIN_FIELD_BASE(173, 173, 3, 0x0110, 0x10, 28, 4),
+ PIN_FIELD_BASE(174, 174, 9, 0x0110, 0x10, 16, 4),
+ PIN_FIELD_BASE(175, 175, 9, 0x0110, 0x10, 12, 4),
+ PIN_FIELD_BASE(176, 176, 9, 0x0110, 0x10, 20, 4),
+ PIN_FIELD_BASE(177, 177, 9, 0x0110, 0x10, 24, 4),
+ PIN_FIELD_BASE(178, 178, 9, 0x0110, 0x10, 28, 4),
+ PIN_FIELD_BASE(179, 179, 9, 0x0120, 0x10, 0, 4),
+ PIN_FIELD_BASE(180, 180, 5, 0x0140, 0x10, 16, 4),
+ PIN_FIELD_BASE(181, 181, 5, 0x0140, 0x10, 20, 4),
+ PIN_FIELD_BASE(182, 182, 9, 0x0120, 0x10, 8, 4),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_rdsel_range[] = {
+ PIN_FIELD_BASE(0, 0, 7, 0x00d0, 0x10, 0, 2),
+ PIN_FIELD_BASE(1, 1, 8, 0x00a0, 0x10, 0, 2),
+ PIN_FIELD_BASE(2, 2, 8, 0x00a0, 0x10, 2, 2),
+ PIN_FIELD_BASE(3, 3, 8, 0x00a0, 0x10, 4, 2),
+ PIN_FIELD_BASE(4, 4, 8, 0x00a0, 0x10, 6, 2),
+ PIN_FIELD_BASE(5, 5, 8, 0x00a0, 0x10, 8, 2),
+ PIN_FIELD_BASE(6, 6, 7, 0x00d0, 0x10, 2, 2),
+ PIN_FIELD_BASE(7, 7, 7, 0x00d0, 0x10, 4, 2),
+ PIN_FIELD_BASE(8, 8, 7, 0x00d0, 0x10, 6, 2),
+ PIN_FIELD_BASE(9, 9, 7, 0x00d0, 0x10, 8, 2),
+ PIN_FIELD_BASE(10, 10, 7, 0x00d0, 0x10, 10, 2),
+ PIN_FIELD_BASE(11, 11, 7, 0x00d0, 0x10, 12, 2),
+ PIN_FIELD_BASE(12, 12, 2, 0x00c0, 0x10, 6, 2),
+ PIN_FIELD_BASE(13, 13, 2, 0x00c0, 0x10, 8, 2),
+ PIN_FIELD_BASE(14, 14, 3, 0x00d0, 0x10, 0, 2),
+ PIN_FIELD_BASE(15, 15, 3, 0x00d0, 0x10, 2, 2),
+ PIN_FIELD_BASE(16, 16, 2, 0x00c0, 0x10, 10, 2),
+ PIN_FIELD_BASE(17, 17, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(18, 18, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(19, 19, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(20, 20, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(21, 21, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(22, 22, 9, 0x00c0, 0x10, 0, 2),
+ PIN_FIELD_BASE(23, 23, 9, 0x00c0, 0x10, 2, 2),
+ PIN_FIELD_BASE(24, 24, 9, 0x00c0, 0x10, 4, 2),
+ PIN_FIELD_BASE(25, 25, 4, 0x00a0, 0x10, 6, 2),
+ PIN_FIELD_BASE(26, 26, 4, 0x00a0, 0x10, 4, 2),
+ PIN_FIELD_BASE(27, 27, 2, 0x00c0, 0x10, 2, 2),
+ PIN_FIELD_BASE(28, 28, 2, 0x00c0, 0x10, 4, 2),
+ PIN_FIELD_BASE(29, 29, 4, 0x00a0, 0x10, 4, 2),
+ PIN_FIELD_BASE(30, 30, 2, 0x00c0, 0x10, 0, 2),
+ PIN_FIELD_BASE(31, 31, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(32, 32, 1, 0x00b0, 0x10, 8, 2),
+ PIN_FIELD_BASE(33, 33, 3, 0x00e0, 0x10, 20, 2),
+ PIN_FIELD_BASE(34, 34, 3, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(35, 35, 3, 0x00e0, 0x10, 12, 2),
+ PIN_FIELD_BASE(36, 36, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(37, 37, 3, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(38, 38, 3, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(39, 39, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(40, 40, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(41, 41, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(42, 42, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(43, 43, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(44, 44, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(45, 45, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(46, 46, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(47, 47, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(48, 48, 4, 0x00a0, 0x10, 4, 2),
+ PIN_FIELD_BASE(49, 49, 4, 0x00a0, 0x10, 2, 2),
+ PIN_FIELD_BASE(50, 50, 4, 0x00a0, 0x10, 0, 2),
+ PIN_FIELD_BASE(51, 51, 8, 0x00a0, 0x10, 10, 2),
+ PIN_FIELD_BASE(52, 52, 8, 0x00a0, 0x10, 10, 2),
+ PIN_FIELD_BASE(53, 53, 8, 0x00a0, 0x10, 10, 2),
+ PIN_FIELD_BASE(54, 54, 8, 0x00a0, 0x10, 10, 2),
+ PIN_FIELD_BASE(55, 55, 4, 0x00a0, 0x10, 6, 2),
+ PIN_FIELD_BASE(56, 56, 4, 0x00a0, 0x10, 6, 2),
+ PIN_FIELD_BASE(57, 57, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(58, 58, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(59, 59, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(60, 60, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(61, 61, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(62, 62, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(63, 63, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(64, 64, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(65, 65, 9, 0x00d0, 0x10, 12, 2),
+ PIN_FIELD_BASE(66, 66, 9, 0x00d0, 0x10, 12, 2),
+ PIN_FIELD_BASE(67, 67, 9, 0x00d0, 0x10, 12, 2),
+ PIN_FIELD_BASE(68, 68, 9, 0x00d0, 0x10, 12, 2),
+ PIN_FIELD_BASE(69, 69, 2, 0x00c0, 0x10, 16, 2),
+ PIN_FIELD_BASE(70, 70, 2, 0x00c0, 0x10, 14, 2),
+ PIN_FIELD_BASE(71, 71, 2, 0x00c0, 0x10, 20, 2),
+ PIN_FIELD_BASE(72, 72, 2, 0x00c0, 0x10, 18, 2),
+ PIN_FIELD_BASE(73, 73, 2, 0x00c0, 0x10, 24, 2),
+ PIN_FIELD_BASE(74, 74, 2, 0x00c0, 0x10, 22, 2),
+ PIN_FIELD_BASE(75, 75, 3, 0x00e0, 0x10, 18, 2),
+ PIN_FIELD_BASE(76, 76, 2, 0x00c0, 0x10, 26, 2),
+ PIN_FIELD_BASE(77, 77, 8, 0x00a0, 0x10, 14, 2),
+ PIN_FIELD_BASE(78, 78, 8, 0x00a0, 0x10, 12, 2),
+ PIN_FIELD_BASE(79, 79, 8, 0x00a0, 0x10, 18, 2),
+ PIN_FIELD_BASE(80, 80, 8, 0x00a0, 0x10, 16, 2),
+ PIN_FIELD_BASE(81, 81, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(82, 82, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(83, 83, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(84, 84, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(85, 85, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(86, 86, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(87, 87, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(88, 88, 5, 0x00f0, 0x10, 24, 2),
+ PIN_FIELD_BASE(89, 89, 5, 0x00f0, 0x10, 24, 2),
+ PIN_FIELD_BASE(90, 90, 5, 0x00f0, 0x10, 24, 2),
+ PIN_FIELD_BASE(91, 91, 5, 0x00f0, 0x10, 24, 2),
+ PIN_FIELD_BASE(92, 92, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(93, 93, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(94, 94, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(95, 95, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(96, 96, 5, 0x00f0, 0x10, 28, 2),
+ PIN_FIELD_BASE(97, 97, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(98, 98, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(99, 99, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(100, 100, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(101, 101, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(102, 102, 5, 0x00f0, 0x10, 26, 2),
+ PIN_FIELD_BASE(103, 103, 7, 0x00d0, 0x10, 20, 2),
+ PIN_FIELD_BASE(104, 104, 7, 0x00d0, 0x10, 14, 2),
+ PIN_FIELD_BASE(105, 105, 7, 0x00d0, 0x10, 18, 2),
+ PIN_FIELD_BASE(106, 106, 7, 0x00d0, 0x10, 16, 2),
+ PIN_FIELD_BASE(107, 107, 7, 0x00d0, 0x10, 28, 2),
+ PIN_FIELD_BASE(108, 108, 7, 0x00d0, 0x10, 22, 2),
+ PIN_FIELD_BASE(109, 109, 7, 0x00d0, 0x10, 26, 2),
+ PIN_FIELD_BASE(110, 110, 7, 0x00d0, 0x10, 24, 2),
+ PIN_FIELD_BASE(111, 111, 7, 0x00d0, 0x10, 30, 2),
+ PIN_FIELD_BASE(112, 112, 8, 0x00a0, 0x10, 10, 2),
+ PIN_FIELD_BASE(113, 113, 8, 0x00a0, 0x10, 10, 2),
+ PIN_FIELD_BASE(114, 114, 8, 0x00a0, 0x10, 10, 2),
+ PIN_FIELD_BASE(115, 115, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(116, 116, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(117, 117, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(118, 118, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(119, 119, 1, 0x00a0, 0x10, 28, 2),
+ PIN_FIELD_BASE(120, 120, 1, 0x00a0, 0x10, 26, 2),
+ PIN_FIELD_BASE(121, 121, 1, 0x00a0, 0x10, 24, 2),
+ PIN_FIELD_BASE(122, 122, 1, 0x00a0, 0x10, 22, 2),
+ PIN_FIELD_BASE(123, 123, 1, 0x00a0, 0x10, 14, 2),
+ PIN_FIELD_BASE(124, 124, 1, 0x00a0, 0x10, 12, 2),
+ PIN_FIELD_BASE(125, 125, 1, 0x00a0, 0x10, 10, 2),
+ PIN_FIELD_BASE(126, 126, 1, 0x00a0, 0x10, 8, 2),
+ PIN_FIELD_BASE(127, 127, 1, 0x00a0, 0x10, 20, 2),
+ PIN_FIELD_BASE(128, 128, 1, 0x00a0, 0x10, 6, 2),
+ PIN_FIELD_BASE(129, 129, 1, 0x00a0, 0x10, 16, 2),
+ PIN_FIELD_BASE(130, 130, 1, 0x00a0, 0x10, 30, 2),
+ PIN_FIELD_BASE(131, 131, 1, 0x00a0, 0x10, 2, 2),
+ PIN_FIELD_BASE(132, 132, 1, 0x00a0, 0x10, 4, 2),
+ PIN_FIELD_BASE(133, 133, 1, 0x00b0, 0x10, 0, 2),
+ PIN_FIELD_BASE(134, 134, 1, 0x00a0, 0x10, 18, 2),
+ PIN_FIELD_BASE(135, 135, 1, 0x00a0, 0x10, 0, 2),
+ PIN_FIELD_BASE(136, 136, 1, 0x00b0, 0x10, 2, 2),
+ PIN_FIELD_BASE(137, 137, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(138, 138, 2, 0x00c0, 0x10, 12, 2),
+ PIN_FIELD_BASE(139, 139, 1, 0x00b0, 0x10, 6, 2),
+ PIN_FIELD_BASE(140, 140, 1, 0x00b0, 0x10, 6, 2),
+ PIN_FIELD_BASE(141, 141, 1, 0x00b0, 0x10, 6, 2),
+ PIN_FIELD_BASE(142, 142, 1, 0x00b0, 0x10, 6, 2),
+ PIN_FIELD_BASE(143, 143, 1, 0x00b0, 0x10, 6, 2),
+ PIN_FIELD_BASE(144, 144, 1, 0x00b0, 0x10, 6, 2),
+ PIN_FIELD_BASE(145, 145, 1, 0x00b0, 0x10, 4, 2),
+ PIN_FIELD_BASE(146, 146, 1, 0x00b0, 0x10, 4, 2),
+ PIN_FIELD_BASE(147, 147, 1, 0x00b0, 0x10, 4, 2),
+ PIN_FIELD_BASE(148, 148, 1, 0x00b0, 0x10, 4, 2),
+ PIN_FIELD_BASE(149, 149, 1, 0x00b0, 0x10, 4, 2),
+ PIN_FIELD_BASE(150, 150, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(151, 151, 1, 0x00b0, 0x10, 8, 2),
+ PIN_FIELD_BASE(152, 152, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(153, 153, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(154, 154, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(155, 155, 3, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(156, 156, 5, 0x00f0, 0x10, 6, 6),
+ PIN_FIELD_BASE(157, 157, 5, 0x00f0, 0x10, 0, 6),
+ PIN_FIELD_BASE(158, 158, 5, 0x00e0, 0x10, 24, 6),
+ PIN_FIELD_BASE(159, 159, 6, 0x0080, 0x10, 12, 6),
+ PIN_FIELD_BASE(160, 160, 5, 0x00f0, 0x10, 18, 6),
+ PIN_FIELD_BASE(161, 161, 5, 0x00e0, 0x10, 6, 6),
+ PIN_FIELD_BASE(162, 162, 5, 0x00e0, 0x10, 0, 6),
+ PIN_FIELD_BASE(163, 163, 6, 0x0080, 0x10, 6, 6),
+ PIN_FIELD_BASE(164, 164, 5, 0x00e0, 0x10, 18, 6),
+ PIN_FIELD_BASE(165, 165, 5, 0x00e0, 0x10, 12, 6),
+ PIN_FIELD_BASE(166, 166, 6, 0x0080, 0x10, 0, 6),
+ PIN_FIELD_BASE(167, 167, 5, 0x00f0, 0x10, 12, 6),
+ PIN_FIELD_BASE(168, 168, 3, 0x00d0, 0x10, 10, 6),
+ PIN_FIELD_BASE(169, 169, 3, 0x00d0, 0x10, 4, 6),
+ PIN_FIELD_BASE(170, 170, 3, 0x00d0, 0x10, 16, 6),
+ PIN_FIELD_BASE(171, 171, 3, 0x00d0, 0x10, 22, 6),
+ PIN_FIELD_BASE(172, 172, 3, 0x00e0, 0x10, 0, 6),
+ PIN_FIELD_BASE(173, 173, 3, 0x00e0, 0x10, 6, 6),
+ PIN_FIELD_BASE(174, 174, 9, 0x00c0, 0x10, 12, 6),
+ PIN_FIELD_BASE(175, 175, 9, 0x00c0, 0x10, 6, 6),
+ PIN_FIELD_BASE(176, 176, 9, 0x00c0, 0x10, 18, 6),
+ PIN_FIELD_BASE(177, 177, 9, 0x00c0, 0x10, 24, 6),
+ PIN_FIELD_BASE(178, 178, 9, 0x00d0, 0x10, 0, 6),
+ PIN_FIELD_BASE(179, 179, 9, 0x00d0, 0x10, 6, 6),
+ PIN_FIELD_BASE(180, 180, 5, 0x00f0, 0x10, 30, 2),
+ PIN_FIELD_BASE(181, 181, 5, 0x0100, 0x10, 0, 2),
+ PIN_FIELD_BASE(182, 182, 9, 0x00d0, 0x10, 14, 2),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_pupd_range[] = {
+ PIN_FIELD_BASE(44, 44, 7, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(156, 156, 5, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(157, 157, 5, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(158, 158, 5, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(159, 159, 6, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(162, 162, 5, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(163, 163, 6, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 5, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(165, 165, 5, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 6, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(167, 167, 5, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(174, 174, 9, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(175, 175, 9, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(176, 176, 9, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(177, 177, 9, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(178, 178, 9, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(179, 179, 9, 0x0080, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_r0_range[] = {
+ PIN_FIELD_BASE(44, 44, 7, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(156, 156, 5, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(157, 157, 5, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(158, 158, 5, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(159, 159, 6, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(162, 162, 5, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(163, 163, 6, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 5, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(165, 165, 5, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 6, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(167, 167, 5, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(174, 174, 9, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(175, 175, 9, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(176, 176, 9, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(177, 177, 9, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(178, 178, 9, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(179, 179, 9, 0x00a0, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_r1_range[] = {
+ PIN_FIELD_BASE(44, 44, 7, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(156, 156, 5, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(157, 157, 5, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(158, 158, 5, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(159, 159, 6, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(162, 162, 5, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(163, 163, 6, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 5, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(165, 165, 5, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 6, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(167, 167, 5, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(174, 174, 9, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(175, 175, 9, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(176, 176, 9, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(177, 177, 9, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(178, 178, 9, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(179, 179, 9, 0x00b0, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 0, 7, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(2, 2, 8, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(3, 3, 8, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(4, 4, 8, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(5, 5, 8, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(6, 6, 7, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(7, 7, 7, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(8, 8, 7, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(9, 9, 7, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 7, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(11, 11, 7, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 3, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(18, 18, 7, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(19, 19, 7, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 7, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 7, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 9, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 9, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 9, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 4, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(26, 26, 4, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(29, 29, 4, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0090, 0x10, 30, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(48, 48, 4, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(49, 49, 4, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(52, 52, 8, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(53, 53, 8, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(54, 54, 8, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(55, 55, 4, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(56, 56, 4, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(59, 59, 2, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(60, 60, 2, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(61, 61, 2, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(62, 62, 2, 0x00b0, 0x10, 19, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x00b0, 0x10, 20, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(69, 69, 2, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(70, 70, 2, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(71, 71, 2, 0x00b0, 0x10, 24, 1),
+ PIN_FIELD_BASE(72, 72, 2, 0x00b0, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 2, 0x00b0, 0x10, 26, 1),
+ PIN_FIELD_BASE(74, 74, 2, 0x00b0, 0x10, 25, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 2, 0x00b0, 0x10, 27, 1),
+ PIN_FIELD_BASE(77, 77, 8, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(78, 78, 8, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(79, 79, 8, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(80, 80, 8, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(81, 81, 2, 0x00b0, 0x10, 29, 1),
+ PIN_FIELD_BASE(82, 82, 2, 0x00b0, 0x10, 28, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x00b0, 0x10, 30, 1),
+ PIN_FIELD_BASE(84, 84, 7, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(85, 85, 7, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(86, 86, 7, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(87, 87, 7, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(88, 88, 5, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(89, 89, 5, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(90, 90, 5, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(91, 91, 5, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(92, 92, 5, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(93, 93, 5, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 5, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(95, 95, 5, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(96, 96, 5, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(97, 97, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 5, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 5, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(100, 100, 5, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(101, 101, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(102, 102, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(103, 103, 7, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(104, 104, 7, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(105, 105, 7, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 7, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(107, 107, 7, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(108, 108, 7, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(109, 109, 7, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(110, 110, 7, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 7, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(112, 112, 8, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(113, 113, 8, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(114, 114, 8, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(119, 119, 1, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(120, 120, 1, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(121, 121, 1, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(122, 122, 1, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(123, 123, 1, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(124, 124, 1, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(125, 125, 1, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(126, 126, 1, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(127, 127, 1, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(128, 128, 1, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(129, 129, 1, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(130, 130, 1, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 2, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(138, 138, 2, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(150, 150, 3, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(151, 151, 1, 0x0090, 0x10, 29, 1),
+ PIN_FIELD_BASE(152, 152, 3, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(153, 153, 3, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(154, 154, 3, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(155, 155, 3, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(180, 180, 5, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(181, 181, 5, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(182, 182, 9, 0x0090, 0x10, 3, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 0, 7, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(2, 2, 8, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(3, 3, 8, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(4, 4, 8, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(5, 5, 8, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(6, 6, 7, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(7, 7, 7, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(8, 8, 7, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(9, 9, 7, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 7, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(11, 11, 7, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(12, 12, 2, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(13, 13, 2, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 3, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(18, 18, 7, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(19, 19, 7, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 7, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 7, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 9, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(23, 23, 9, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(24, 24, 9, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(25, 25, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(26, 26, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(29, 29, 4, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0080, 0x10, 30, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 3, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(48, 48, 4, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(49, 49, 4, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(52, 52, 8, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(53, 53, 8, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(54, 54, 8, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(55, 55, 4, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(56, 56, 4, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 57, 2, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(58, 58, 2, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(59, 59, 2, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(60, 60, 2, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(61, 61, 2, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(62, 62, 2, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(63, 63, 2, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(64, 64, 2, 0x00a0, 0x10, 20, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(69, 69, 2, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(70, 70, 2, 0x00a0, 0x10, 21, 1),
+ PIN_FIELD_BASE(71, 71, 2, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(72, 72, 2, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 2, 0x00a0, 0x10, 26, 1),
+ PIN_FIELD_BASE(74, 74, 2, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(76, 76, 2, 0x00a0, 0x10, 27, 1),
+ PIN_FIELD_BASE(77, 77, 8, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(78, 78, 8, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(79, 79, 8, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(80, 80, 8, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(81, 81, 2, 0x00a0, 0x10, 29, 1),
+ PIN_FIELD_BASE(82, 82, 2, 0x00a0, 0x10, 28, 1),
+ PIN_FIELD_BASE(83, 83, 2, 0x00a0, 0x10, 30, 1),
+ PIN_FIELD_BASE(84, 84, 7, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(85, 85, 7, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(86, 86, 7, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(87, 87, 7, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(88, 88, 5, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(89, 89, 5, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(90, 90, 5, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(91, 91, 5, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(92, 92, 5, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(93, 93, 5, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 5, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(95, 95, 5, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(96, 96, 5, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(97, 97, 5, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(98, 98, 5, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 5, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(100, 100, 5, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(101, 101, 5, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(102, 102, 5, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(103, 103, 7, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(104, 104, 7, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(105, 105, 7, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 7, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(107, 107, 7, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(108, 108, 7, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(109, 109, 7, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(110, 110, 7, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(111, 111, 7, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(112, 112, 8, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(113, 113, 8, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(114, 114, 8, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(119, 119, 1, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(120, 120, 1, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(121, 121, 1, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(122, 122, 1, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(123, 123, 1, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(124, 124, 1, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(125, 125, 1, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(126, 126, 1, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(127, 127, 1, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(128, 128, 1, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(129, 129, 1, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(130, 130, 1, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(131, 131, 1, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(132, 132, 1, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(133, 133, 1, 0x0080, 0x10, 28, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 1, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(137, 137, 2, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(138, 138, 2, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(139, 139, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(140, 140, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(141, 141, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(150, 150, 3, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(151, 151, 1, 0x0080, 0x10, 29, 1),
+ PIN_FIELD_BASE(152, 152, 3, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(153, 153, 3, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(154, 154, 3, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(155, 155, 3, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(180, 180, 5, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(181, 181, 5, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(182, 182, 9, 0x0070, 0x10, 3, 1),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 7, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(1, 1, 8, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(2, 2, 8, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(3, 3, 8, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(4, 4, 8, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(5, 5, 8, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(6, 6, 7, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(7, 7, 7, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(8, 8, 7, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(9, 9, 7, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(10, 10, 7, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(11, 11, 7, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(12, 12, 2, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(13, 13, 2, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(14, 14, 3, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(15, 15, 3, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(16, 16, 2, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(17, 17, 2, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(18, 18, 7, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(19, 19, 7, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(20, 20, 7, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(21, 21, 7, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(22, 22, 9, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(23, 23, 9, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(24, 24, 9, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(25, 25, 4, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(26, 26, 4, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(27, 27, 2, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(28, 28, 2, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(29, 29, 4, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(30, 30, 2, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(31, 31, 3, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(32, 32, 1, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(33, 33, 3, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(34, 34, 3, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(35, 35, 3, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(36, 36, 3, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(37, 37, 3, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(38, 38, 3, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(39, 39, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(40, 40, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(41, 41, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(42, 42, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(43, 43, 3, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(44, 44, 7, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(45, 45, 7, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(46, 46, 7, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(47, 47, 7, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(48, 48, 4, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(49, 49, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(50, 50, 4, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(51, 51, 8, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(52, 52, 8, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(53, 53, 8, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(54, 54, 8, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(55, 55, 4, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(56, 56, 4, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(59, 59, 2, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(60, 60, 2, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(61, 61, 2, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(62, 62, 2, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(65, 65, 9, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 9, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 9, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 9, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(69, 69, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(70, 70, 2, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(71, 71, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(72, 72, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(73, 73, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(74, 74, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(75, 75, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(76, 76, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(77, 77, 8, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(78, 78, 8, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(79, 79, 8, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(80, 80, 8, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(81, 81, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(82, 82, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(83, 83, 2, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(84, 84, 7, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(85, 85, 7, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(86, 86, 7, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(87, 87, 7, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(88, 88, 5, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(89, 89, 5, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(90, 90, 5, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(91, 91, 5, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(92, 92, 5, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(93, 93, 5, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(94, 94, 5, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(95, 95, 5, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(96, 96, 5, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(97, 97, 5, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(98, 98, 5, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(99, 99, 5, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(100, 100, 5, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(101, 101, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(102, 102, 5, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(103, 103, 7, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(104, 104, 7, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(105, 105, 7, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(106, 106, 7, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(107, 107, 7, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(108, 108, 7, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(109, 109, 7, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(110, 110, 7, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(111, 111, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(112, 112, 8, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(113, 113, 8, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(114, 114, 8, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(115, 115, 2, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(116, 116, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(117, 117, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(118, 118, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(119, 119, 1, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 1, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 1, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(122, 122, 1, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(123, 123, 1, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(124, 124, 1, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(125, 125, 1, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(126, 126, 1, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(127, 127, 1, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(128, 128, 1, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(129, 129, 1, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(130, 130, 1, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(131, 131, 1, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(132, 132, 1, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(133, 133, 1, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(134, 134, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(135, 135, 1, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(136, 136, 1, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(137, 137, 2, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(138, 138, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(139, 139, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(140, 140, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(141, 141, 1, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(142, 142, 1, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(143, 143, 1, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(144, 144, 1, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(145, 145, 1, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(146, 146, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(147, 147, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(148, 148, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(149, 149, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(150, 150, 3, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(151, 151, 1, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(152, 152, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(153, 153, 3, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(154, 154, 3, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(155, 155, 3, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(156, 156, 5, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(157, 157, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(158, 158, 5, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(159, 159, 6, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(160, 160, 5, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(161, 161, 5, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(162, 162, 5, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(163, 163, 6, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(164, 164, 5, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(165, 165, 5, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(166, 166, 6, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(167, 167, 5, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(168, 168, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(169, 169, 3, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(171, 171, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(172, 172, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(173, 173, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(174, 174, 9, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(175, 175, 9, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(176, 176, 9, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(177, 177, 9, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(178, 178, 9, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(179, 179, 9, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(180, 180, 5, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(181, 181, 5, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(182, 182, 9, 0x0000, 0x10, 9, 3),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(51, 51, 8, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(52, 52, 8, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(53, 53, 8, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(54, 54, 8, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(55, 55, 4, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(56, 56, 4, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(59, 59, 2, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(60, 60, 2, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(61, 61, 2, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(62, 62, 2, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(65, 65, 9, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 9, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 9, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 9, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(180, 180, 5, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(181, 181, 5, 0x0030, 0x10, 3, 3),
+};
+
+static const struct mtk_pin_field_calc mt8189_pin_rsel_range[] = {
+ PIN_FIELD_BASE(51, 51, 8, 0x00b0, 0x10, 0, 3),
+ PIN_FIELD_BASE(52, 52, 8, 0x00b0, 0x10, 6, 3),
+ PIN_FIELD_BASE(53, 53, 8, 0x00b0, 0x10, 3, 3),
+ PIN_FIELD_BASE(54, 54, 8, 0x00b0, 0x10, 9, 3),
+ PIN_FIELD_BASE(55, 55, 4, 0x00b0, 0x10, 0, 3),
+ PIN_FIELD_BASE(56, 56, 4, 0x00b0, 0x10, 3, 3),
+ PIN_FIELD_BASE(57, 57, 2, 0x00d0, 0x10, 0, 3),
+ PIN_FIELD_BASE(58, 58, 2, 0x00d0, 0x10, 12, 3),
+ PIN_FIELD_BASE(59, 59, 2, 0x00d0, 0x10, 3, 3),
+ PIN_FIELD_BASE(60, 60, 2, 0x00d0, 0x10, 15, 3),
+ PIN_FIELD_BASE(61, 61, 2, 0x00d0, 0x10, 6, 3),
+ PIN_FIELD_BASE(62, 62, 2, 0x00d0, 0x10, 18, 3),
+ PIN_FIELD_BASE(63, 63, 2, 0x00d0, 0x10, 9, 3),
+ PIN_FIELD_BASE(64, 64, 2, 0x00d0, 0x10, 21, 3),
+ PIN_FIELD_BASE(65, 65, 9, 0x00e0, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 9, 0x00e0, 0x10, 6, 3),
+ PIN_FIELD_BASE(67, 67, 9, 0x00e0, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 9, 0x00e0, 0x10, 9, 3),
+ PIN_FIELD_BASE(180, 180, 5, 0x0110, 0x10, 0, 3),
+ PIN_FIELD_BASE(181, 181, 5, 0x0110, 0x10, 3, 3),
+};
+
+static const struct mtk_pin_rsel mt8189_pin_rsel_val_range[] = {
+ PIN_RSEL(51, 68, 0x0, 75000, 75000),
+ PIN_RSEL(51, 68, 0x1, 10000, 5000),
+ PIN_RSEL(51, 68, 0x2, 5000, 75000),
+ PIN_RSEL(51, 68, 0x3, 4000, 5000),
+ PIN_RSEL(51, 68, 0x4, 3000, 75000),
+ PIN_RSEL(51, 68, 0x5, 2000, 5000),
+ PIN_RSEL(51, 68, 0x6, 1500, 75000),
+ PIN_RSEL(51, 68, 0x7, 1000, 5000),
+ PIN_RSEL(180, 181, 0x0, 75000, 75000),
+ PIN_RSEL(180, 181, 0x1, 10000, 5000),
+ PIN_RSEL(180, 181, 0x2, 5000, 75000),
+ PIN_RSEL(180, 181, 0x3, 4000, 5000),
+ PIN_RSEL(180, 181, 0x4, 3000, 75000),
+ PIN_RSEL(180, 181, 0x5, 2000, 5000),
+ PIN_RSEL(180, 181, 0x6, 1500, 75000),
+ PIN_RSEL(180, 181, 0x7, 1000, 5000),
+};
+
+static const unsigned int mt8189_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE, /*0*/
+ MTK_PULL_PU_PD_TYPE, /*1*/
+ MTK_PULL_PU_PD_TYPE, /*2*/
+ MTK_PULL_PU_PD_TYPE, /*3*/
+ MTK_PULL_PU_PD_TYPE, /*4*/
+ MTK_PULL_PU_PD_TYPE, /*5*/
+ MTK_PULL_PU_PD_TYPE, /*6*/
+ MTK_PULL_PU_PD_TYPE, /*7*/
+ MTK_PULL_PU_PD_TYPE, /*8*/
+ MTK_PULL_PU_PD_TYPE, /*9*/
+ MTK_PULL_PU_PD_TYPE, /*10*/
+ MTK_PULL_PU_PD_TYPE, /*11*/
+ MTK_PULL_PU_PD_TYPE, /*12*/
+ MTK_PULL_PU_PD_TYPE, /*13*/
+ MTK_PULL_PU_PD_TYPE, /*14*/
+ MTK_PULL_PU_PD_TYPE, /*15*/
+ MTK_PULL_PU_PD_TYPE, /*16*/
+ MTK_PULL_PU_PD_TYPE, /*17*/
+ MTK_PULL_PU_PD_TYPE, /*18*/
+ MTK_PULL_PU_PD_TYPE, /*19*/
+ MTK_PULL_PU_PD_TYPE, /*20*/
+ MTK_PULL_PU_PD_TYPE, /*21*/
+ MTK_PULL_PU_PD_TYPE, /*22*/
+ MTK_PULL_PU_PD_TYPE, /*23*/
+ MTK_PULL_PU_PD_TYPE, /*24*/
+ MTK_PULL_PU_PD_TYPE, /*25*/
+ MTK_PULL_PU_PD_TYPE, /*26*/
+ MTK_PULL_PU_PD_TYPE, /*27*/
+ MTK_PULL_PU_PD_TYPE, /*28*/
+ MTK_PULL_PU_PD_TYPE, /*29*/
+ MTK_PULL_PU_PD_TYPE, /*30*/
+ MTK_PULL_PU_PD_TYPE, /*31*/
+ MTK_PULL_PU_PD_TYPE, /*32*/
+ MTK_PULL_PU_PD_TYPE, /*33*/
+ MTK_PULL_PU_PD_TYPE, /*34*/
+ MTK_PULL_PU_PD_TYPE, /*35*/
+ MTK_PULL_PU_PD_TYPE, /*36*/
+ MTK_PULL_PU_PD_TYPE, /*37*/
+ MTK_PULL_PU_PD_TYPE, /*38*/
+ MTK_PULL_PU_PD_TYPE, /*39*/
+ MTK_PULL_PU_PD_TYPE, /*40*/
+ MTK_PULL_PU_PD_TYPE, /*41*/
+ MTK_PULL_PU_PD_TYPE, /*42*/
+ MTK_PULL_PU_PD_TYPE, /*43*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*44*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*45*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*46*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*47*/
+ MTK_PULL_PU_PD_TYPE, /*48*/
+ MTK_PULL_PU_PD_TYPE, /*49*/
+ MTK_PULL_PU_PD_TYPE, /*50*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*51*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*52*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*53*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*54*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*55*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*56*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*57*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*58*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*59*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*60*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*61*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*62*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*63*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*64*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*65*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*66*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*67*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*68*/
+ MTK_PULL_PU_PD_TYPE, /*69*/
+ MTK_PULL_PU_PD_TYPE, /*70*/
+ MTK_PULL_PU_PD_TYPE, /*71*/
+ MTK_PULL_PU_PD_TYPE, /*72*/
+ MTK_PULL_PU_PD_TYPE, /*73*/
+ MTK_PULL_PU_PD_TYPE, /*74*/
+ MTK_PULL_PU_PD_TYPE, /*75*/
+ MTK_PULL_PU_PD_TYPE, /*76*/
+ MTK_PULL_PU_PD_TYPE, /*77*/
+ MTK_PULL_PU_PD_TYPE, /*78*/
+ MTK_PULL_PU_PD_TYPE, /*79*/
+ MTK_PULL_PU_PD_TYPE, /*80*/
+ MTK_PULL_PU_PD_TYPE, /*81*/
+ MTK_PULL_PU_PD_TYPE, /*82*/
+ MTK_PULL_PU_PD_TYPE, /*83*/
+ MTK_PULL_PU_PD_TYPE, /*84*/
+ MTK_PULL_PU_PD_TYPE, /*85*/
+ MTK_PULL_PU_PD_TYPE, /*86*/
+ MTK_PULL_PU_PD_TYPE, /*87*/
+ MTK_PULL_PU_PD_TYPE, /*88*/
+ MTK_PULL_PU_PD_TYPE, /*89*/
+ MTK_PULL_PU_PD_TYPE, /*90*/
+ MTK_PULL_PU_PD_TYPE, /*91*/
+ MTK_PULL_PU_PD_TYPE, /*92*/
+ MTK_PULL_PU_PD_TYPE, /*93*/
+ MTK_PULL_PU_PD_TYPE, /*94*/
+ MTK_PULL_PU_PD_TYPE, /*95*/
+ MTK_PULL_PU_PD_TYPE, /*96*/
+ MTK_PULL_PU_PD_TYPE, /*97*/
+ MTK_PULL_PU_PD_TYPE, /*98*/
+ MTK_PULL_PU_PD_TYPE, /*99*/
+ MTK_PULL_PU_PD_TYPE, /*100*/
+ MTK_PULL_PU_PD_TYPE, /*101*/
+ MTK_PULL_PU_PD_TYPE, /*102*/
+ MTK_PULL_PU_PD_TYPE, /*103*/
+ MTK_PULL_PU_PD_TYPE, /*104*/
+ MTK_PULL_PU_PD_TYPE, /*105*/
+ MTK_PULL_PU_PD_TYPE, /*106*/
+ MTK_PULL_PU_PD_TYPE, /*107*/
+ MTK_PULL_PU_PD_TYPE, /*108*/
+ MTK_PULL_PU_PD_TYPE, /*109*/
+ MTK_PULL_PU_PD_TYPE, /*110*/
+ MTK_PULL_PU_PD_TYPE, /*111*/
+ MTK_PULL_PU_PD_TYPE, /*112*/
+ MTK_PULL_PU_PD_TYPE, /*113*/
+ MTK_PULL_PU_PD_TYPE, /*114*/
+ MTK_PULL_PU_PD_TYPE, /*115*/
+ MTK_PULL_PU_PD_TYPE, /*116*/
+ MTK_PULL_PU_PD_TYPE, /*117*/
+ MTK_PULL_PU_PD_TYPE, /*118*/
+ MTK_PULL_PU_PD_TYPE, /*119*/
+ MTK_PULL_PU_PD_TYPE, /*120*/
+ MTK_PULL_PU_PD_TYPE, /*121*/
+ MTK_PULL_PU_PD_TYPE, /*122*/
+ MTK_PULL_PU_PD_TYPE, /*123*/
+ MTK_PULL_PU_PD_TYPE, /*124*/
+ MTK_PULL_PU_PD_TYPE, /*125*/
+ MTK_PULL_PU_PD_TYPE, /*126*/
+ MTK_PULL_PU_PD_TYPE, /*127*/
+ MTK_PULL_PU_PD_TYPE, /*128*/
+ MTK_PULL_PU_PD_TYPE, /*129*/
+ MTK_PULL_PU_PD_TYPE, /*130*/
+ MTK_PULL_PU_PD_TYPE, /*131*/
+ MTK_PULL_PU_PD_TYPE, /*132*/
+ MTK_PULL_PU_PD_TYPE, /*133*/
+ MTK_PULL_PU_PD_TYPE, /*134*/
+ MTK_PULL_PU_PD_TYPE, /*135*/
+ MTK_PULL_PU_PD_TYPE, /*136*/
+ MTK_PULL_PU_PD_TYPE, /*137*/
+ MTK_PULL_PU_PD_TYPE, /*138*/
+ MTK_PULL_PU_PD_TYPE, /*139*/
+ MTK_PULL_PU_PD_TYPE, /*140*/
+ MTK_PULL_PU_PD_TYPE, /*141*/
+ MTK_PULL_PU_PD_TYPE, /*142*/
+ MTK_PULL_PU_PD_TYPE, /*143*/
+ MTK_PULL_PU_PD_TYPE, /*144*/
+ MTK_PULL_PU_PD_TYPE, /*145*/
+ MTK_PULL_PU_PD_TYPE, /*146*/
+ MTK_PULL_PU_PD_TYPE, /*147*/
+ MTK_PULL_PU_PD_TYPE, /*148*/
+ MTK_PULL_PU_PD_TYPE, /*149*/
+ MTK_PULL_PU_PD_TYPE, /*150*/
+ MTK_PULL_PU_PD_TYPE, /*151*/
+ MTK_PULL_PU_PD_TYPE, /*152*/
+ MTK_PULL_PU_PD_TYPE, /*153*/
+ MTK_PULL_PU_PD_TYPE, /*154*/
+ MTK_PULL_PU_PD_TYPE, /*155*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*156*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*157*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*158*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*159*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*160*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*161*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*162*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*163*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*164*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*165*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*166*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*167*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*168*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*169*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*170*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*171*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*172*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*173*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*174*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*175*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*176*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*177*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*178*/
+ MTK_PULL_PUPD_R1R0_TYPE, /*179*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*180*/
+ MTK_PULL_PU_PD_RSEL_TYPE, /*181*/
+ MTK_PULL_PU_PD_TYPE, /*182*/
+};
+
+static const struct mtk_pin_reg_calc mt8189_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8189_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8189_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8189_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8189_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8189_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8189_pin_ies_range),
+ [PINCTRL_PIN_REG_TDSEL] = MTK_RANGE(mt8189_pin_tdsel_range),
+ [PINCTRL_PIN_REG_RDSEL] = MTK_RANGE(mt8189_pin_rdsel_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8189_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8189_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8189_pin_r1_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8189_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8189_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8189_pin_drv_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8189_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8189_pin_rsel_range),
+};
+
+static const char * const mt8189_pinctrl_register_base_names[] = {
+ "gpio_base", "iocfg_bm0_base", "iocfg_bm1_base", "iocfg_bm2_base", "iocfg_lm_base",
+ "iocfg_lt0_base", "iocfg_lt1_base", "iocfg_rb0_base", "iocfg_rb1_base",
+ "iocfg_rt_base"
+};
+
+static const struct mtk_eint_hw mt8189_eint_hw = {
+ .port_mask = 0xf,
+ .ports = 3,
+ .ap_num = 210,
+ .db_cnt = 32,
+ .db_time = debounce_time_mt6765,
+};
+
+static const struct mtk_pin_soc mt8189_data = {
+ .reg_cal = mt8189_reg_cals,
+ .pins = mtk_pins_mt8189,
+ .npins = ARRAY_SIZE(mtk_pins_mt8189),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8189),
+ .eint_pin = eint_pins_mt8189,
+ .eint_hw = &mt8189_eint_hw,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt8189_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt8189_pinctrl_register_base_names),
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .pull_type = mt8189_pull_type,
+ .pin_rsel = mt8189_pin_rsel_val_range,
+ .npin_rsel = ARRAY_SIZE(mt8189_pin_rsel_val_range),
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+};
+
+static const struct of_device_id mt8189_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8189-pinctrl", .data = &mt8189_data },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mt8189_pinctrl_driver = {
+ .driver = {
+ .name = "mt8189-pinctrl",
+ .of_match_table = mt8189_pinctrl_of_match,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
+ },
+ .probe = mtk_paris_pinctrl_probe,
+};
+
+static int __init mt8189_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8189_pinctrl_driver);
+}
+arch_initcall(mt8189_pinctrl_init);
+
+MODULE_DESCRIPTION("MediaTek MT8189 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index a4cb6d511fcd..d10306024111 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -898,7 +898,7 @@ static const struct gpio_chip mtk_gpio_chip = {
.direction_input = pinctrl_gpio_direction_input,
.direction_output = mtk_gpio_direction_output,
.get = mtk_gpio_get,
- .set_rv = mtk_gpio_set,
+ .set = mtk_gpio_set,
.to_irq = mtk_gpio_to_irq,
.set_config = mtk_gpio_set_config,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8189.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8189.h
new file mode 100644
index 000000000000..771efb3da73f
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8189.h
@@ -0,0 +1,2452 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 MediaTek Inc.
+ * Author: Lei Xue <lei.xue@mediatek.com>
+ * Cathy Xu <ot_cathy.xu@mediatek.com>
+ */
+
+#ifndef __PINCTRL_MTK_MT8189_H
+#define __PINCTRL_MTK_MT8189_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt8189[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "TP_GPIO0_AO"),
+ MTK_FUNCTION(2, "SPIM3_A_CSB"),
+ MTK_FUNCTION(3, "I2SOUT0_MCK"),
+ MTK_FUNCTION(4, "SCP_SPI0_CS"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS6"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "TP_GPIO1_AO"),
+ MTK_FUNCTION(2, "SPIM3_A_CLK"),
+ MTK_FUNCTION(3, "I2SOUT0_BCK"),
+ MTK_FUNCTION(4, "SCP_SPI0_CK"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS7"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "TP_GPIO2_AO"),
+ MTK_FUNCTION(2, "SPIM3_A_MO"),
+ MTK_FUNCTION(3, "I2SOUT0_LRCK"),
+ MTK_FUNCTION(4, "SCP_SPI0_MO"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS8"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "TP_GPIO3_AO"),
+ MTK_FUNCTION(2, "SPIM3_A_MI"),
+ MTK_FUNCTION(3, "I2SOUT0_DO"),
+ MTK_FUNCTION(4, "SCP_SPI0_MI"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS9"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "TP_GPIO4_AO"),
+ MTK_FUNCTION(2, "SPIM4_A_CSB"),
+ MTK_FUNCTION(3, "I2SIN0_DI"),
+ MTK_FUNCTION(4, "SCP_SPI1_CS"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS10"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "TP_GPIO5_AO"),
+ MTK_FUNCTION(2, "SPIM4_A_CLK"),
+ MTK_FUNCTION(3, "I2SIN0_BCK"),
+ MTK_FUNCTION(4, "SCP_SPI1_CK"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS11_OLAT0"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "TP_GPIO6_AO"),
+ MTK_FUNCTION(2, "SPIM4_A_MO"),
+ MTK_FUNCTION(3, "I2SIN0_LRCK"),
+ MTK_FUNCTION(4, "SCP_SPI1_MO"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS12_OLAT1"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "TP_GPIO7_AO"),
+ MTK_FUNCTION(2, "SPIM4_A_MI"),
+ MTK_FUNCTION(3, "I2SIN0_MCK"),
+ MTK_FUNCTION(4, "SCP_SPI1_MI"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS13_OLAT2"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "TP_UTXD1_VLP"),
+ MTK_FUNCTION(2, "SPIM5_A_CSB"),
+ MTK_FUNCTION(3, "I2SOUT1_MCK"),
+ MTK_FUNCTION(4, "VADSP_UTXD0"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS14_OLAT3"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "TP_URXD1_VLP"),
+ MTK_FUNCTION(2, "SPIM5_A_CLK"),
+ MTK_FUNCTION(3, "I2SOUT1_BCK"),
+ MTK_FUNCTION(4, "VADSP_URXD0"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS15_OLAT4"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "TP_UCTS1_VLP"),
+ MTK_FUNCTION(2, "SPIM5_A_MO"),
+ MTK_FUNCTION(3, "I2SOUT1_LRCK"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(6, "CONN_BPI_BUS16_OLAT5"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "TP_URTS1_VLP"),
+ MTK_FUNCTION(2, "SPIM5_A_MI"),
+ MTK_FUNCTION(3, "I2SOUT1_DO"),
+ MTK_FUNCTION(4, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "PWM_vlp"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "TP_UTXD1_VCORE"),
+ MTK_FUNCTION(2, "UTXD3"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, "CMFLASH0"),
+ MTK_FUNCTION(6, "ANT_SEL0"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "TP_URXD1_VCORE"),
+ MTK_FUNCTION(2, "URXD3"),
+ MTK_FUNCTION(3, "CLKM1"),
+ MTK_FUNCTION(4, "CMFLASH1"),
+ MTK_FUNCTION(6, "ANT_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "TP_UCTS1_VCORE"),
+ MTK_FUNCTION(2, "UCTS3"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(4, "CMFLASH2"),
+ MTK_FUNCTION(6, "ANT_SEL2"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "TP_URTS1_VCORE"),
+ MTK_FUNCTION(2, "URTS3"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(4, "CMVREF0"),
+ MTK_FUNCTION(6, "ANT_SEL3"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "PWM_0"),
+ MTK_FUNCTION(2, "UCTS2"),
+ MTK_FUNCTION(3, "DP_TX_HPD"),
+ MTK_FUNCTION(4, "CMVREF1"),
+ MTK_FUNCTION(5, "MD32_0_GPIO0"),
+ MTK_FUNCTION(6, "ANT_SEL4"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "PWM_1"),
+ MTK_FUNCTION(2, "URTS2"),
+ MTK_FUNCTION(3, "EDP_TX_HPD"),
+ MTK_FUNCTION(4, "CMVREF2"),
+ MTK_FUNCTION(5, "MD32_1_GPIO0"),
+ MTK_FUNCTION(6, "PMSR_SMAP"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "CMFLASH0"),
+ MTK_FUNCTION(2, "CMVREF3"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(4, "DISP_PWM1"),
+ MTK_FUNCTION(5, "I2SIN1_MCK"),
+ MTK_FUNCTION(6, "mbistreaden_trigger"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "CMFLASH1"),
+ MTK_FUNCTION(2, "CMVREF2"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(4, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(5, "I2SIN1_BCK"),
+ MTK_FUNCTION(6, "mbistwriteen_trigger"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "CMFLASH2"),
+ MTK_FUNCTION(2, "CMVREF1"),
+ MTK_FUNCTION(3, "UCTS2"),
+ MTK_FUNCTION(4, "PERSTN"),
+ MTK_FUNCTION(5, "I2SIN1_LRCK"),
+ MTK_FUNCTION(6, "DMIC0_DAT1"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "CMFLASH3"),
+ MTK_FUNCTION(2, "CMVREF0"),
+ MTK_FUNCTION(3, "URTS2"),
+ MTK_FUNCTION(4, "CLKREQN"),
+ MTK_FUNCTION(5, "I2SIN1_DI"),
+ MTK_FUNCTION(6, "DMIC1_DAT1"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(2, "TP_GPIO4_AO")
+ ),
+
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "CMMCLK1"),
+ MTK_FUNCTION(2, "TP_GPIO5_AO"),
+ MTK_FUNCTION(3, "SSPM_UTXD_AO_VLP"),
+ MTK_FUNCTION(4, "PWM_vlp"),
+ MTK_FUNCTION(6, "SRCLKENAI0")
+ ),
+
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(2, "TP_GPIO6_AO"),
+ MTK_FUNCTION(3, "SSPM_URXD_AO_VLP"),
+ MTK_FUNCTION(4, "WAKEN"),
+ MTK_FUNCTION(5, "SPMI_P_TRIG_FLAG"),
+ MTK_FUNCTION(6, "SRCLKENAI1")
+ ),
+
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(2, "DP_TX_HPD"),
+ MTK_FUNCTION(3, "CMFLASH3"),
+ MTK_FUNCTION(4, "MD32_0_GPIO0"),
+ MTK_FUNCTION(5, "USB_DRVVBUS_2P")
+ ),
+
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(2, "EDP_TX_HPD"),
+ MTK_FUNCTION(3, "CMVREF3"),
+ MTK_FUNCTION(4, "MD32_1_GPIO0"),
+ MTK_FUNCTION(5, "USB_DRVVBUS_3P")
+ ),
+
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "DP_TX_HPD"),
+ MTK_FUNCTION(2, "mbistreaden_trigger"),
+ MTK_FUNCTION(3, "MD32_0_GPIO0"),
+ MTK_FUNCTION(4, "TP_UCTS1_VCORE"),
+ MTK_FUNCTION(5, "CMVREF4"),
+ MTK_FUNCTION(6, "EXTIF0_ACT"),
+ MTK_FUNCTION(7, "ANT_SEL0")
+ ),
+
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "EDP_TX_HPD"),
+ MTK_FUNCTION(2, "mbistwriteen_trigger"),
+ MTK_FUNCTION(3, "MD32_1_GPIO0"),
+ MTK_FUNCTION(4, "TP_URTS1_VCORE"),
+ MTK_FUNCTION(6, "EXTIF0_PRI"),
+ MTK_FUNCTION(7, "ANT_SEL1")
+ ),
+
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "DISP_PWM0"),
+ MTK_FUNCTION(2, "MD32_1_TXD"),
+ MTK_FUNCTION(3, "SSPM_UTXD_AO_VCORE"),
+ MTK_FUNCTION(5, "USB_DRVVBUS_4P")
+ ),
+
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "DISP_PWM1"),
+ MTK_FUNCTION(2, "MD32_1_RXD"),
+ MTK_FUNCTION(3, "SSPM_URXD_AO_VCORE"),
+ MTK_FUNCTION(5, "PMSR_SMAP"),
+ MTK_FUNCTION(6, "EXTIF0_GNT_B"),
+ MTK_FUNCTION(7, "ANT_SEL2")
+ ),
+
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "MD32_0_TXD")
+ ),
+
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "MD32_0_RXD")
+ ),
+
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "VADSP_UTXD0"),
+ MTK_FUNCTION(3, "TP_UTXD1_VLP"),
+ MTK_FUNCTION(4, "MD32_1_TXD"),
+ MTK_FUNCTION(5, "CONN_BGF_UART0_TXD"),
+ MTK_FUNCTION(6, "CONN_WIFI_TXD")
+ ),
+
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "VADSP_URXD0"),
+ MTK_FUNCTION(3, "TP_URXD1_VLP"),
+ MTK_FUNCTION(4, "MD32_1_RXD"),
+ MTK_FUNCTION(5, "CONN_BGF_UART0_RXD")
+ ),
+
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "UTXD2"),
+ MTK_FUNCTION(2, "UCTS1"),
+ MTK_FUNCTION(3, "TP_UCTS1_VLP"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO_VLP"),
+ MTK_FUNCTION(5, "VADSP_UTXD0"),
+ MTK_FUNCTION(6, "CONN_BT_TXD")
+ ),
+
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "URXD2"),
+ MTK_FUNCTION(2, "URTS1"),
+ MTK_FUNCTION(3, "TP_URTS1_VLP"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO_VLP"),
+ MTK_FUNCTION(5, "VADSP_URXD0")
+ ),
+
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "UTXD3"),
+ MTK_FUNCTION(2, "UCTS0"),
+ MTK_FUNCTION(3, "TP_UTXD1_VCORE"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO_VCORE"),
+ MTK_FUNCTION(6, "MD32_0_TXD"),
+ MTK_FUNCTION(7, "CONN_BGF_UART0_TXD")
+ ),
+
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "URXD3"),
+ MTK_FUNCTION(2, "URTS0"),
+ MTK_FUNCTION(3, "TP_URXD1_VCORE"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO_VCORE"),
+ MTK_FUNCTION(6, "MD32_0_RXD"),
+ MTK_FUNCTION(7, "CONN_BGF_UART0_RXD")
+ ),
+
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "JTMS_SEL1")
+ ),
+
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "JTCK_SEL1")
+ ),
+
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "JTDI_SEL1")
+ ),
+
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "JTDO_SEL1")
+ ),
+
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "JTRSTn_SEL1")
+ ),
+
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "KPCOL0")
+ ),
+
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "TP_GPIO0_AO"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(2, "TP_GPIO1_AO")
+ ),
+
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "TP_GPIO2_AO"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(7, "DBG_MON_A32")
+ ),
+
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "WAKEN"),
+ MTK_FUNCTION(2, "TP_GPIO3_AO"),
+ MTK_FUNCTION(3, "SPMI_P_TRIG_FLAG")
+ ),
+
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "PERSTN"),
+ MTK_FUNCTION(2, "MD32_0_GPIO0"),
+ MTK_FUNCTION(3, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(7, "ANT_SEL3")
+ ),
+
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "CLKREQN"),
+ MTK_FUNCTION(2, "MD32_1_GPIO0"),
+ MTK_FUNCTION(3, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(7, "ANT_SEL4")
+ ),
+
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "SCP_SCL0"),
+ MTK_FUNCTION(2, "SCL0")
+ ),
+
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "SCP_SDA0"),
+ MTK_FUNCTION(2, "SDA0")
+ ),
+
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "SCP_SCL1"),
+ MTK_FUNCTION(2, "SCL1")
+ ),
+
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "SCP_SDA1"),
+ MTK_FUNCTION(2, "SDA1")
+ ),
+
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "SCL2"),
+ MTK_FUNCTION(2, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(3, "SSUSB_U2SIF_SCL")
+ ),
+
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "SDA2"),
+ MTK_FUNCTION(2, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(3, "SSUSB_U2SIF_SDA")
+ ),
+
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(2, "PCIE_PHY_I2C_SCL"),
+ MTK_FUNCTION(3, "SSUSB_U2SIF_SCL_1P")
+ ),
+
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(2, "PCIE_PHY_I2C_SDA"),
+ MTK_FUNCTION(3, "SSUSB_U2SIF_SDA_1P")
+ ),
+
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "SCL4"),
+ MTK_FUNCTION(2, "SSUSB_U3PHY_I2C_SCL")
+ ),
+
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "SDA4"),
+ MTK_FUNCTION(2, "SSUSB_U3PHY_I2C_SDA")
+ ),
+
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "SCL5"),
+ MTK_FUNCTION(2, "SSPXTP_U3PHY_I2C_SCL")
+ ),
+
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "SDA5"),
+ MTK_FUNCTION(2, "SSPXTP_U3PHY_I2C_SDA")
+ ),
+
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "SCL6")
+ ),
+
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "SDA6")
+ ),
+
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "SCL7")
+ ),
+
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "SDA7")
+ ),
+
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "SCL8")
+ ),
+
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "SDA8")
+ ),
+
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "SPIM0_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI0_CS"),
+ MTK_FUNCTION(3, "SPM_JTAG_TMS_VCORE"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "SPM_JTAG_TMS"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS_VLP"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TMS_VLP")
+ ),
+
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "SPIM0_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "SPM_JTAG_TCK_VCORE"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "SPM_JTAG_TCK"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK_VLP"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TCK_VLP")
+ ),
+
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "SPIM0_MO"),
+ MTK_FUNCTION(2, "SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "SPM_JTAG_TDI_VCORE"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "SPM_JTAG_TDI"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI_VLP"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TDI_VLP")
+ ),
+
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "SPIM0_MI"),
+ MTK_FUNCTION(2, "SCP_SPI0_MI"),
+ MTK_FUNCTION(3, "SPM_JTAG_TDO_VCORE"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TDO"),
+ MTK_FUNCTION(5, "SPM_JTAG_TDO"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO_VLP"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TDO_VLP")
+ ),
+
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "SPIM1_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI1_CS"),
+ MTK_FUNCTION(3, "SPM_JTAG_TRSTN_VCORE"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(5, "SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN_VLP"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TRSTN_VLP")
+ ),
+
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "SPIM1_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI1_CK")
+ ),
+
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "SPIM1_MO"),
+ MTK_FUNCTION(2, "SCP_SPI1_MO")
+ ),
+
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "SPIM1_MI"),
+ MTK_FUNCTION(2, "SCP_SPI1_MI")
+ ),
+
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "SPIM2_CSB"),
+ MTK_FUNCTION(2, "PCM0_SYNC"),
+ MTK_FUNCTION(3, "SSUSB_U2SIF_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "SPIM2_CLK"),
+ MTK_FUNCTION(2, "PCM0_CLK"),
+ MTK_FUNCTION(3, "SSUSB_U2SIF_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "SPIM2_MO"),
+ MTK_FUNCTION(2, "PCM0_DO"),
+ MTK_FUNCTION(3, "SSUSB_U2SIF_SCL_1P"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "SPIM2_MI"),
+ MTK_FUNCTION(2, "PCM0_DI"),
+ MTK_FUNCTION(3, "SSUSB_U2SIF_SDA_1P"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(7, "DBG_MON_B32")
+ ),
+
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "USB_DRVVBUS")
+ ),
+
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "VBUSVALID")
+ ),
+
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_2P"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_3P"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_4P"),
+ MTK_FUNCTION(6, "CMVREF4"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+ ),
+
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+ ),
+
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO")
+ ),
+
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI")
+ ),
+
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "SRCLKENA1")
+ ),
+
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO")
+ ),
+
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI")
+ ),
+
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "AUD_SYNC_MOSI")
+ ),
+
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0")
+ ),
+
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1")
+ ),
+
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0")
+ ),
+
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1")
+ ),
+
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "I2SIN0_MCK"),
+ MTK_FUNCTION(2, "SPIM3_B_CSB"),
+ MTK_FUNCTION(3, "APU_JTAG_TMS"),
+ MTK_FUNCTION(4, "SCP_JTAG0_TMS_VCORE"),
+ MTK_FUNCTION(5, "CONN_WF_MCU_TMS"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS_VCORE"),
+ MTK_FUNCTION(7, "IPU_JTAG_TMS")
+ ),
+
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "I2SIN0_BCK"),
+ MTK_FUNCTION(2, "SPIM3_B_CLK"),
+ MTK_FUNCTION(3, "APU_JTAG_TCK"),
+ MTK_FUNCTION(4, "SCP_JTAG0_TCK_VCORE"),
+ MTK_FUNCTION(5, "CONN_WF_MCU_TCK"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK_VCORE"),
+ MTK_FUNCTION(7, "IPU_JTAG_TCK")
+ ),
+
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "I2SIN0_LRCK"),
+ MTK_FUNCTION(2, "SPIM3_B_MO"),
+ MTK_FUNCTION(3, "APU_JTAG_TDI"),
+ MTK_FUNCTION(4, "SCP_JTAG0_TDI_VCORE"),
+ MTK_FUNCTION(5, "CONN_WF_MCU_TDI"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI_VCORE"),
+ MTK_FUNCTION(7, "IPU_JTAG_TDI")
+ ),
+
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "I2SIN0_DI"),
+ MTK_FUNCTION(2, "SPIM3_B_MI"),
+ MTK_FUNCTION(3, "APU_JTAG_TDO"),
+ MTK_FUNCTION(4, "SCP_JTAG0_TDO_VCORE"),
+ MTK_FUNCTION(5, "CONN_WF_MCU_TDO"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO_VCORE"),
+ MTK_FUNCTION(7, "IPU_JTAG_TDO")
+ ),
+
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "I2SOUT0_MCK"),
+ MTK_FUNCTION(2, "SPIM4_B_CSB"),
+ MTK_FUNCTION(3, "APU_JTAG_TRST"),
+ MTK_FUNCTION(4, "SCP_JTAG0_TRSTN_VCORE"),
+ MTK_FUNCTION(5, "CONN_WF_MCU_TRST_B"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN_VCORE"),
+ MTK_FUNCTION(7, "IPU_JTAG_TRST")
+ ),
+
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "I2SOUT0_BCK"),
+ MTK_FUNCTION(2, "SPIM4_B_CLK"),
+ MTK_FUNCTION(3, "EXTIF0_ACT"),
+ MTK_FUNCTION(4, "SPM_JTAG_TMS_VCORE"),
+ MTK_FUNCTION(6, "CLKM2"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "I2SOUT0_LRCK"),
+ MTK_FUNCTION(2, "SPIM4_B_MO"),
+ MTK_FUNCTION(3, "EXTIF0_PRI"),
+ MTK_FUNCTION(4, "SPM_JTAG_TCK_VCORE"),
+ MTK_FUNCTION(6, "CLKM3"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "I2SOUT0_DO"),
+ MTK_FUNCTION(2, "SPIM4_B_MI"),
+ MTK_FUNCTION(3, "EXTIF0_GNT_B"),
+ MTK_FUNCTION(4, "SPM_JTAG_TDI_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "DMIC0_CLK"),
+ MTK_FUNCTION(2, "I2SIN1_MCK"),
+ MTK_FUNCTION(3, "I2SOUT1_MCK"),
+ MTK_FUNCTION(4, "SPM_JTAG_TDO_VCORE"),
+ MTK_FUNCTION(6, "CONN_MIPI0_SDATA"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "DMIC0_DAT0"),
+ MTK_FUNCTION(2, "I2SIN1_BCK"),
+ MTK_FUNCTION(3, "I2SOUT1_BCK"),
+ MTK_FUNCTION(4, "SPM_JTAG_TRSTN_VCORE"),
+ MTK_FUNCTION(6, "CONN_MIPI0_SCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "DMIC1_CLK"),
+ MTK_FUNCTION(2, "I2SIN1_LRCK"),
+ MTK_FUNCTION(3, "I2SOUT1_LRCK"),
+ MTK_FUNCTION(4, "PMSR_SMAP"),
+ MTK_FUNCTION(6, "CONN_MIPI1_SDATA"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "DMIC1_DAT0"),
+ MTK_FUNCTION(2, "I2SIN1_DI"),
+ MTK_FUNCTION(3, "I2SOUT1_DO"),
+ MTK_FUNCTION(6, "CONN_MIPI1_SCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "PCM0_CLK"),
+ MTK_FUNCTION(2, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(3, "PCIE_PHY_I2C_SCL"),
+ MTK_FUNCTION(4, "SSUSB_U3PHY_I2C_SCL"),
+ MTK_FUNCTION(6, "CMFLASH0"),
+ MTK_FUNCTION(7, "EXTIF0_ACT")
+ ),
+
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "PCM0_SYNC"),
+ MTK_FUNCTION(2, "USB_DRVVBUS_2P"),
+ MTK_FUNCTION(3, "PCIE_PHY_I2C_SDA"),
+ MTK_FUNCTION(4, "SSUSB_U3PHY_I2C_SDA"),
+ MTK_FUNCTION(6, "CMFLASH1"),
+ MTK_FUNCTION(7, "EXTIF0_PRI")
+ ),
+
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "PCM0_DI"),
+ MTK_FUNCTION(2, "USB_DRVVBUS_3P"),
+ MTK_FUNCTION(3, "DP_TX_HPD"),
+ MTK_FUNCTION(4, "SSPXTP_U3PHY_I2C_SCL"),
+ MTK_FUNCTION(6, "CMVREF0"),
+ MTK_FUNCTION(7, "EXTIF0_GNT_B")
+ ),
+
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "PCM0_DO"),
+ MTK_FUNCTION(2, "USB_DRVVBUS_4P"),
+ MTK_FUNCTION(3, "EDP_TX_HPD"),
+ MTK_FUNCTION(4, "SSPXTP_U3PHY_I2C_SDA"),
+ MTK_FUNCTION(6, "CMVREF1")
+ ),
+
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "GBE_TXD3"),
+ MTK_FUNCTION(2, "DMIC0_CLK"),
+ MTK_FUNCTION(3, "LVTS_FOUT"),
+ MTK_FUNCTION(4, "CONN_BGF_MCU_TMS"),
+ MTK_FUNCTION(5, "UDI_TMS"),
+ MTK_FUNCTION(6, "ANT_SEL5"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "GBE_TXD2"),
+ MTK_FUNCTION(2, "DMIC0_DAT0"),
+ MTK_FUNCTION(3, "LVTS_SDO"),
+ MTK_FUNCTION(4, "CONN_BGF_MCU_TCK"),
+ MTK_FUNCTION(5, "UDI_TCK"),
+ MTK_FUNCTION(6, "ANT_SEL6"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "GBE_TXD1"),
+ MTK_FUNCTION(2, "DMIC0_DAT1"),
+ MTK_FUNCTION(3, "LVTS_26M"),
+ MTK_FUNCTION(4, "CONN_BGF_MCU_TDI"),
+ MTK_FUNCTION(5, "UDI_TDI"),
+ MTK_FUNCTION(6, "ANT_SEL7"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "GBE_TXD0"),
+ MTK_FUNCTION(2, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "LVTS_SCF"),
+ MTK_FUNCTION(4, "CONN_BGF_MCU_TDO"),
+ MTK_FUNCTION(5, "UDI_TDO"),
+ MTK_FUNCTION(6, "ANT_SEL8"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "GBE_RXD3"),
+ MTK_FUNCTION(2, "DMIC1_DAT0"),
+ MTK_FUNCTION(3, "LVTS_SCK"),
+ MTK_FUNCTION(4, "CONN_BGF_MCU_TRST_B"),
+ MTK_FUNCTION(5, "UDI_NTRST"),
+ MTK_FUNCTION(6, "ANT_SEL9"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "GBE_RXD2"),
+ MTK_FUNCTION(2, "DMIC1_DAT1"),
+ MTK_FUNCTION(3, "LVTS_SDI"),
+ MTK_FUNCTION(4, "CONN_WF_MCU_TMS"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TMS_VCORE"),
+ MTK_FUNCTION(6, "ANT_SEL10"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "GBE_RXD1"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(4, "CONN_WF_MCU_TCK"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TCK_VCORE"),
+ MTK_FUNCTION(6, "ANT_SEL11"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "GBE_RXD0"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(4, "CONN_WF_MCU_TDI"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDI_VCORE"),
+ MTK_FUNCTION(6, "ANT_SEL12"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "GBE_TXC"),
+ MTK_FUNCTION(2, "I2SIN1_MCK"),
+ MTK_FUNCTION(4, "CONN_WF_MCU_TDO"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDO_VCORE"),
+ MTK_FUNCTION(6, "ANT_SEL13"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "GBE_RXC"),
+ MTK_FUNCTION(2, "I2SIN1_BCK"),
+ MTK_FUNCTION(4, "CONN_WF_MCU_TRST_B"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TRSTN_VCORE"),
+ MTK_FUNCTION(6, "ANT_SEL14"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "GBE_RXDV"),
+ MTK_FUNCTION(2, "I2SIN1_LRCK"),
+ MTK_FUNCTION(4, "CONN_BGF_MCU_AICE_TMSC"),
+ MTK_FUNCTION(5, "IPU_JTAG_TMS"),
+ MTK_FUNCTION(6, "ANT_SEL15"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "GBE_TXEN"),
+ MTK_FUNCTION(2, "I2SIN1_DI"),
+ MTK_FUNCTION(4, "CONN_BGF_MCU_AICE_TCKC"),
+ MTK_FUNCTION(5, "IPU_JTAG_TCK"),
+ MTK_FUNCTION(6, "ANT_SEL16"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "GBE_MDC"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "mbistreaden_trigger"),
+ MTK_FUNCTION(4, "CONN_BGF_UART0_TXD"),
+ MTK_FUNCTION(5, "IPU_JTAG_TDI"),
+ MTK_FUNCTION(6, "ANT_SEL17"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "GBE_MDIO"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "mbistwriteen_trigger"),
+ MTK_FUNCTION(4, "CONN_BGF_UART0_RXD"),
+ MTK_FUNCTION(5, "IPU_JTAG_TDO"),
+ MTK_FUNCTION(6, "ANT_SEL18"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "GBE_TXER"),
+ MTK_FUNCTION(2, "GBE_AUX_PPS2"),
+ MTK_FUNCTION(4, "CONN_BT_TXD"),
+ MTK_FUNCTION(5, "IPU_JTAG_TRST"),
+ MTK_FUNCTION(6, "ANT_SEL19"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "GBE_RXER"),
+ MTK_FUNCTION(2, "GBE_AUX_PPS3"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "CONN_WF_MCU_AICE_TMSC"),
+ MTK_FUNCTION(5, "APU_JTAG_TMS"),
+ MTK_FUNCTION(6, "ANT_SEL20"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "GBE_COL"),
+ MTK_FUNCTION(2, "I2SOUT1_MCK"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(4, "CONN_WF_MCU_AICE_TCKC"),
+ MTK_FUNCTION(5, "APU_JTAG_TCK"),
+ MTK_FUNCTION(6, "ANT_SEL21"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "GBE_INTR"),
+ MTK_FUNCTION(2, "I2SOUT1_BCK"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "CONN_WIFI_TXD"),
+ MTK_FUNCTION(5, "APU_JTAG_TDI"),
+ MTK_FUNCTION(6, "PWM_0"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "GBE_AUX_PPS0"),
+ MTK_FUNCTION(2, "I2SOUT1_LRCK"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(4, "DP_TX_HPD"),
+ MTK_FUNCTION(5, "APU_JTAG_TDO"),
+ MTK_FUNCTION(6, "PWM_1"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "GBE_AUX_PPS1"),
+ MTK_FUNCTION(2, "I2SOUT1_DO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "EDP_TX_HPD"),
+ MTK_FUNCTION(5, "APU_JTAG_TRST"),
+ MTK_FUNCTION(6, "PWM_2"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "CONN_TOP_CLK")
+ ),
+
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "CONN_TOP_DATA")
+ ),
+
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "CONN_BT_CLK")
+ ),
+
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "CONN_BT_DATA")
+ ),
+
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "CONN_HRST_B")
+ ),
+
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "CONN_WB_PTA")
+ ),
+
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL0")
+ ),
+
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL1")
+ ),
+
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL2")
+ ),
+
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL3")
+ ),
+
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL4")
+ ),
+
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "SPINOR_CK"),
+ MTK_FUNCTION(2, "DMIC0_CLK"),
+ MTK_FUNCTION(3, "DP_TX_HPD"),
+ MTK_FUNCTION(4, "PWM_0"),
+ MTK_FUNCTION(5, "CONN_BPI_BUS17_ANT0"),
+ MTK_FUNCTION(6, "LVTS_FOUT"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "SPINOR_CS"),
+ MTK_FUNCTION(2, "DMIC0_DAT0"),
+ MTK_FUNCTION(3, "EDP_TX_HPD"),
+ MTK_FUNCTION(4, "PWM_1"),
+ MTK_FUNCTION(5, "CONN_BPI_BUS18_ANT1"),
+ MTK_FUNCTION(6, "LVTS_SDO"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "SPINOR_IO0"),
+ MTK_FUNCTION(2, "DMIC0_DAT1"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(4, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(5, "CONN_BPI_BUS19_ANT2"),
+ MTK_FUNCTION(6, "LVTS_26M"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "SPINOR_IO1"),
+ MTK_FUNCTION(2, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "UCTS2"),
+ MTK_FUNCTION(4, "USB_DRVVBUS_2P"),
+ MTK_FUNCTION(5, "CONN_BPI_BUS20_ANT3"),
+ MTK_FUNCTION(6, "LVTS_SCF"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "SPINOR_IO2"),
+ MTK_FUNCTION(2, "DMIC1_DAT0"),
+ MTK_FUNCTION(3, "URTS2"),
+ MTK_FUNCTION(4, "USB_DRVVBUS_3P"),
+ MTK_FUNCTION(5, "CONN_BPI_BUS21_ANT4"),
+ MTK_FUNCTION(6, "LVTS_SCK"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "SPINOR_IO3"),
+ MTK_FUNCTION(2, "DMIC1_DAT1"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(4, "USB_DRVVBUS_4P"),
+ MTK_FUNCTION(5, "DISP_PWM1"),
+ MTK_FUNCTION(6, "LVTS_SDI"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "MSDC0_DAT7")
+ ),
+
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "MSDC0_DAT6")
+ ),
+
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "MSDC0_DAT5")
+ ),
+
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "MSDC0_DAT4")
+ ),
+
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "MSDC0_RSTB")
+ ),
+
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "MSDC0_CMD")
+ ),
+
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "MSDC0_CLK")
+ ),
+
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "MSDC0_DAT3")
+ ),
+
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "MSDC0_DAT2")
+ ),
+
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "MSDC0_DAT1")
+ ),
+
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "MSDC0_DAT0")
+ ),
+
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "MSDC0_DSL")
+ ),
+
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "CONN_WF_MCU_AICE_TMSC"),
+ MTK_FUNCTION(3, "UCTS1"),
+ MTK_FUNCTION(4, "UDI_TMS"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TMS_VCORE"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "CONN_BGF_MCU_TMS")
+ ),
+
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "CONN_WF_MCU_AICE_TCKC"),
+ MTK_FUNCTION(3, "URTS1"),
+ MTK_FUNCTION(4, "UDI_TCK"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TCK_VCORE"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "CONN_BGF_MCU_TCK")
+ ),
+
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "SPIM5_B_CSB"),
+ MTK_FUNCTION(3, "UCTS2"),
+ MTK_FUNCTION(4, "UDI_TDI"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDI_VCORE"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "CONN_BGF_MCU_TDI")
+ ),
+
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "SPIM5_B_CLK"),
+ MTK_FUNCTION(3, "URTS2"),
+ MTK_FUNCTION(4, "UDI_TDO"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDO_VCORE"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "CONN_BGF_MCU_TDO")
+ ),
+
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "SPIM5_B_MO"),
+ MTK_FUNCTION(3, "UCTS3"),
+ MTK_FUNCTION(4, "UDI_NTRST"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TRSTN_VCORE"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "CONN_BGF_MCU_TRST_B")
+ ),
+
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "SPIM5_B_MI"),
+ MTK_FUNCTION(3, "URTS3"),
+ MTK_FUNCTION(4, "CLKM0"),
+ MTK_FUNCTION(5, "PWM_2")
+ ),
+
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "CONN_BGF_MCU_AICE_TMSC"),
+ MTK_FUNCTION(3, "UTXD1"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TMS_VLP"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TMS_VLP")
+ ),
+
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "CONN_BGF_MCU_AICE_TCKC"),
+ MTK_FUNCTION(3, "URXD1"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TCK_VLP"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TCK_VLP")
+ ),
+
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, 176),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "SRCLKENAI0"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDI_VLP"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TDI_VLP")
+ ),
+
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, 177),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TDO"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDO_VLP"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TDO_VLP")
+ ),
+
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 178),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(3, "UTXD3"),
+ MTK_FUNCTION(4, "VADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TRSTN_VLP"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TRSTN_VLP")
+ ),
+
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 179),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(3, "URXD3"),
+ MTK_FUNCTION(4, "CLKM1"),
+ MTK_FUNCTION(5, "PWM_vlp"),
+ MTK_FUNCTION(7, "TP_GPIO7_AO")
+ ),
+
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 180),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "SPMI_P_SCL")
+ ),
+
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 181),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "SPMI_P_SDA")
+ ),
+
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "DDR_PAD_RRESETB")
+ ),
+
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 182),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 183),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 184),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 185),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 186),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(0, 187),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(0, 188),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 190, "GPIO190",
+ MTK_EINT_FUNCTION(0, 189),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 191, "GPIO191",
+ MTK_EINT_FUNCTION(0, 190),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 192, "GPIO192",
+ MTK_EINT_FUNCTION(0, 191),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 193, "GPIO193",
+ MTK_EINT_FUNCTION(0, 192),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 194, "GPIO194",
+ MTK_EINT_FUNCTION(0, 193),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 195, "GPIO195",
+ MTK_EINT_FUNCTION(0, 194),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 196, "GPIO196",
+ MTK_EINT_FUNCTION(0, 195),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 197, "GPIO197",
+ MTK_EINT_FUNCTION(0, 196),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 198, "GPIO198",
+ MTK_EINT_FUNCTION(0, 197),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 199, "GPIO199",
+ MTK_EINT_FUNCTION(0, 198),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 200, "GPIO200",
+ MTK_EINT_FUNCTION(0, 199),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 201, "GPIO201",
+ MTK_EINT_FUNCTION(0, 200),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 202, "GPIO202",
+ MTK_EINT_FUNCTION(0, 201),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 203, "GPIO203",
+ MTK_EINT_FUNCTION(0, 202),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 204, "GPIO204",
+ MTK_EINT_FUNCTION(0, 203),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 205, "GPIO205",
+ MTK_EINT_FUNCTION(0, 204),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 206, "GPIO206",
+ MTK_EINT_FUNCTION(0, 205),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 207, "GPIO207",
+ MTK_EINT_FUNCTION(0, 206),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 208, "GPIO208",
+ MTK_EINT_FUNCTION(0, 207),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 209, "GPIO209",
+ MTK_EINT_FUNCTION(0, 208),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+ MTK_PIN(
+ 210, "GPIO210",
+ MTK_EINT_FUNCTION(0, 209),
+ DRV_FIXED,
+ MTK_FUNCTION(0, NULL)
+ ),
+
+};
+
+static struct mtk_eint_pin eint_pins_mt8189[] = {
+ MTK_EINT_PIN(0, 0, 0, 1),
+ MTK_EINT_PIN(1, 0, 1, 1),
+ MTK_EINT_PIN(2, 0, 2, 1),
+ MTK_EINT_PIN(3, 0, 3, 1),
+ MTK_EINT_PIN(4, 0, 4, 1),
+ MTK_EINT_PIN(5, 0, 5, 1),
+ MTK_EINT_PIN(6, 0, 6, 1),
+ MTK_EINT_PIN(7, 0, 7, 1),
+ MTK_EINT_PIN(8, 0, 8, 1),
+ MTK_EINT_PIN(9, 0, 9, 1),
+ MTK_EINT_PIN(10, 0, 10, 1),
+ MTK_EINT_PIN(11, 0, 11, 1),
+ MTK_EINT_PIN(12, 1, 0, 1),
+ MTK_EINT_PIN(13, 1, 1, 1),
+ MTK_EINT_PIN(14, 1, 2, 1),
+ MTK_EINT_PIN(15, 1, 3, 1),
+ MTK_EINT_PIN(16, 1, 4, 1),
+ MTK_EINT_PIN(17, 1, 5, 1),
+ MTK_EINT_PIN(18, 0, 12, 1),
+ MTK_EINT_PIN(19, 0, 13, 1),
+ MTK_EINT_PIN(20, 0, 14, 1),
+ MTK_EINT_PIN(21, 0, 15, 1),
+ MTK_EINT_PIN(22, 0, 16, 1),
+ MTK_EINT_PIN(23, 0, 17, 1),
+ MTK_EINT_PIN(24, 0, 18, 1),
+ MTK_EINT_PIN(25, 2, 0, 1),
+ MTK_EINT_PIN(26, 2, 1, 1),
+ MTK_EINT_PIN(27, 1, 6, 1),
+ MTK_EINT_PIN(28, 1, 7, 1),
+ MTK_EINT_PIN(29, 2, 2, 1),
+ MTK_EINT_PIN(30, 1, 8, 1),
+ MTK_EINT_PIN(31, 1, 9, 1),
+ MTK_EINT_PIN(32, 1, 10, 1),
+ MTK_EINT_PIN(33, 1, 11, 1),
+ MTK_EINT_PIN(34, 1, 12, 1),
+ MTK_EINT_PIN(35, 1, 13, 1),
+ MTK_EINT_PIN(36, 1, 14, 1),
+ MTK_EINT_PIN(37, 1, 15, 1),
+ MTK_EINT_PIN(38, 1, 16, 1),
+ MTK_EINT_PIN(39, 1, 17, 1),
+ MTK_EINT_PIN(40, 1, 18, 1),
+ MTK_EINT_PIN(41, 1, 19, 1),
+ MTK_EINT_PIN(42, 1, 20, 1),
+ MTK_EINT_PIN(43, 1, 21, 1),
+ MTK_EINT_PIN(44, 0, 19, 1),
+ MTK_EINT_PIN(45, 0, 20, 1),
+ MTK_EINT_PIN(46, 0, 21, 1),
+ MTK_EINT_PIN(47, 0, 22, 1),
+ MTK_EINT_PIN(48, 2, 3, 1),
+ MTK_EINT_PIN(49, 2, 4, 1),
+ MTK_EINT_PIN(50, 2, 5, 1),
+ MTK_EINT_PIN(51, 0, 23, 1),
+ MTK_EINT_PIN(52, 0, 24, 1),
+ MTK_EINT_PIN(53, 0, 25, 1),
+ MTK_EINT_PIN(54, 0, 26, 1),
+ MTK_EINT_PIN(55, 2, 6, 1),
+ MTK_EINT_PIN(56, 2, 7, 1),
+ MTK_EINT_PIN(57, 1, 22, 1),
+ MTK_EINT_PIN(58, 1, 23, 1),
+ MTK_EINT_PIN(59, 1, 24, 1),
+ MTK_EINT_PIN(60, 1, 25, 1),
+ MTK_EINT_PIN(61, 1, 26, 1),
+ MTK_EINT_PIN(62, 1, 27, 1),
+ MTK_EINT_PIN(63, 1, 28, 1),
+ MTK_EINT_PIN(64, 1, 29, 1),
+ MTK_EINT_PIN(65, 0, 27, 1),
+ MTK_EINT_PIN(66, 0, 28, 1),
+ MTK_EINT_PIN(67, 0, 29, 1),
+ MTK_EINT_PIN(68, 0, 30, 1),
+ MTK_EINT_PIN(69, 1, 30, 1),
+ MTK_EINT_PIN(70, 1, 31, 1),
+ MTK_EINT_PIN(71, 1, 32, 1),
+ MTK_EINT_PIN(72, 1, 33, 1),
+ MTK_EINT_PIN(73, 1, 34, 1),
+ MTK_EINT_PIN(74, 1, 35, 1),
+ MTK_EINT_PIN(75, 1, 36, 1),
+ MTK_EINT_PIN(76, 1, 37, 1),
+ MTK_EINT_PIN(77, 0, 31, 1),
+ MTK_EINT_PIN(78, 0, 32, 1),
+ MTK_EINT_PIN(79, 0, 33, 1),
+ MTK_EINT_PIN(80, 0, 34, 1),
+ MTK_EINT_PIN(81, 1, 38, 1),
+ MTK_EINT_PIN(82, 1, 39, 1),
+ MTK_EINT_PIN(83, 1, 40, 1),
+ MTK_EINT_PIN(84, 0, 35, 1),
+ MTK_EINT_PIN(85, 0, 36, 1),
+ MTK_EINT_PIN(86, 0, 37, 1),
+ MTK_EINT_PIN(87, 0, 38, 1),
+ MTK_EINT_PIN(88, 2, 8, 1),
+ MTK_EINT_PIN(89, 2, 9, 1),
+ MTK_EINT_PIN(90, 2, 10, 1),
+ MTK_EINT_PIN(91, 2, 11, 1),
+ MTK_EINT_PIN(92, 2, 12, 1),
+ MTK_EINT_PIN(93, 2, 13, 1),
+ MTK_EINT_PIN(94, 2, 14, 1),
+ MTK_EINT_PIN(95, 2, 15, 1),
+ MTK_EINT_PIN(96, 2, 16, 1),
+ MTK_EINT_PIN(97, 2, 17, 1),
+ MTK_EINT_PIN(98, 2, 18, 1),
+ MTK_EINT_PIN(99, 2, 19, 1),
+ MTK_EINT_PIN(100, 2, 20, 1),
+ MTK_EINT_PIN(101, 2, 21, 1),
+ MTK_EINT_PIN(102, 2, 22, 1),
+ MTK_EINT_PIN(103, 0, 39, 1),
+ MTK_EINT_PIN(104, 0, 40, 1),
+ MTK_EINT_PIN(105, 0, 41, 1),
+ MTK_EINT_PIN(106, 0, 42, 1),
+ MTK_EINT_PIN(107, 0, 43, 1),
+ MTK_EINT_PIN(108, 0, 44, 1),
+ MTK_EINT_PIN(109, 0, 45, 1),
+ MTK_EINT_PIN(110, 0, 46, 1),
+ MTK_EINT_PIN(111, 0, 47, 1),
+ MTK_EINT_PIN(112, 0, 48, 0),
+ MTK_EINT_PIN(113, 0, 49, 1),
+ MTK_EINT_PIN(114, 0, 50, 0),
+ MTK_EINT_PIN(115, 1, 41, 1),
+ MTK_EINT_PIN(116, 1, 42, 1),
+ MTK_EINT_PIN(117, 1, 43, 1),
+ MTK_EINT_PIN(118, 1, 44, 1),
+ MTK_EINT_PIN(119, 1, 45, 1),
+ MTK_EINT_PIN(120, 1, 46, 1),
+ MTK_EINT_PIN(121, 1, 47, 1),
+ MTK_EINT_PIN(122, 1, 48, 1),
+ MTK_EINT_PIN(123, 1, 49, 1),
+ MTK_EINT_PIN(124, 1, 50, 1),
+ MTK_EINT_PIN(125, 1, 51, 1),
+ MTK_EINT_PIN(126, 1, 52, 1),
+ MTK_EINT_PIN(127, 1, 53, 1),
+ MTK_EINT_PIN(128, 1, 54, 1),
+ MTK_EINT_PIN(129, 1, 55, 1),
+ MTK_EINT_PIN(130, 1, 56, 1),
+ MTK_EINT_PIN(131, 1, 57, 1),
+ MTK_EINT_PIN(132, 1, 58, 1),
+ MTK_EINT_PIN(133, 1, 59, 1),
+ MTK_EINT_PIN(134, 1, 60, 1),
+ MTK_EINT_PIN(135, 1, 61, 1),
+ MTK_EINT_PIN(136, 1, 62, 1),
+ MTK_EINT_PIN(137, 1, 63, 1),
+ MTK_EINT_PIN(138, 1, 64, 1),
+ MTK_EINT_PIN(139, 1, 65, 1),
+ MTK_EINT_PIN(140, 1, 66, 1),
+ MTK_EINT_PIN(141, 1, 67, 1),
+ MTK_EINT_PIN(142, 1, 68, 1),
+ MTK_EINT_PIN(143, 1, 69, 1),
+ MTK_EINT_PIN(144, 1, 70, 1),
+ MTK_EINT_PIN(145, 1, 71, 1),
+ MTK_EINT_PIN(146, 1, 72, 1),
+ MTK_EINT_PIN(147, 1, 73, 1),
+ MTK_EINT_PIN(148, 1, 74, 1),
+ MTK_EINT_PIN(149, 1, 75, 1),
+ MTK_EINT_PIN(150, 1, 76, 1),
+ MTK_EINT_PIN(151, 1, 77, 1),
+ MTK_EINT_PIN(152, 1, 78, 1),
+ MTK_EINT_PIN(153, 1, 79, 1),
+ MTK_EINT_PIN(154, 1, 80, 1),
+ MTK_EINT_PIN(155, 1, 81, 1),
+ MTK_EINT_PIN(156, 2, 23, 1),
+ MTK_EINT_PIN(157, 2, 24, 1),
+ MTK_EINT_PIN(158, 2, 25, 1),
+ MTK_EINT_PIN(159, 4, 0, 1),
+ MTK_EINT_PIN(160, 2, 26, 1),
+ MTK_EINT_PIN(161, 2, 27, 1),
+ MTK_EINT_PIN(162, 2, 28, 1),
+ MTK_EINT_PIN(163, 4, 1, 1),
+ MTK_EINT_PIN(164, 2, 29, 1),
+ MTK_EINT_PIN(165, 2, 30, 1),
+ MTK_EINT_PIN(166, 4, 2, 1),
+ MTK_EINT_PIN(167, 2, 31, 0),
+ MTK_EINT_PIN(168, 1, 82, 1),
+ MTK_EINT_PIN(169, 1, 83, 1),
+ MTK_EINT_PIN(170, 1, 84, 1),
+ MTK_EINT_PIN(171, 1, 85, 0),
+ MTK_EINT_PIN(172, 1, 86, 1),
+ MTK_EINT_PIN(173, 1, 87, 0),
+ MTK_EINT_PIN(174, 4, 3, 1),
+ MTK_EINT_PIN(175, 4, 4, 1),
+ MTK_EINT_PIN(176, 4, 5, 1),
+ MTK_EINT_PIN(177, 4, 6, 1),
+ MTK_EINT_PIN(178, 4, 7, 1),
+ MTK_EINT_PIN(179, 4, 8, 1),
+ MTK_EINT_PIN(180, 2, 32, 1),
+ MTK_EINT_PIN(181, 2, 33, 0),
+ MTK_EINT_PIN(182, 3, 0, 1),
+ MTK_EINT_PIN(183, 3, 1, 1),
+ MTK_EINT_PIN(184, 3, 2, 1),
+ MTK_EINT_PIN(185, 3, 3, 1),
+ MTK_EINT_PIN(186, 3, 4, 1),
+ MTK_EINT_PIN(187, 3, 5, 1),
+ MTK_EINT_PIN(188, 3, 6, 1),
+ MTK_EINT_PIN(189, 3, 7, 1),
+ MTK_EINT_PIN(190, 3, 8, 1),
+ MTK_EINT_PIN(191, 3, 9, 1),
+ MTK_EINT_PIN(192, 3, 10, 1),
+ MTK_EINT_PIN(193, 3, 11, 1),
+ MTK_EINT_PIN(194, 3, 12, 1),
+ MTK_EINT_PIN(195, 3, 13, 1),
+ MTK_EINT_PIN(196, 3, 14, 1),
+ MTK_EINT_PIN(197, 3, 15, 1),
+ MTK_EINT_PIN(198, 3, 16, 1),
+ MTK_EINT_PIN(199, 3, 17, 1),
+ MTK_EINT_PIN(200, 3, 18, 1),
+ MTK_EINT_PIN(201, 3, 19, 1),
+ MTK_EINT_PIN(202, 3, 20, 1),
+ MTK_EINT_PIN(203, 3, 21, 1),
+ MTK_EINT_PIN(204, 3, 22, 1),
+ MTK_EINT_PIN(205, 3, 23, 1),
+ MTK_EINT_PIN(206, 3, 24, 1),
+ MTK_EINT_PIN(207, 3, 25, 1),
+ MTK_EINT_PIN(208, 3, 26, 1),
+ MTK_EINT_PIN(209, 3, 27, 1)
+};
+
+#endif /* __PINCTRL_MTK_MT8189_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 89ef4e530fcc..3e714554789d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -949,7 +949,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->direction_input = mtk_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
- chip->set_rv = mtk_gpio_set;
+ chip->set = mtk_gpio_set;
chip->to_irq = mtk_gpio_to_irq;
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
index 385cc619df13..6132710aff68 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
@@ -50,15 +50,23 @@ struct aml_pio_control {
u32 bit_offset[AML_NUM_REG];
};
-struct aml_reg_bit {
- u32 bank_id;
- u32 reg_offs[AML_NUM_REG];
- u32 bit_offs[AML_NUM_REG];
+/*
+ * partial bank(subordinate) pins mux config use other bank(main) mux registgers
+ * m_bank_id: the main bank which pin_id from 0, but register bit not from bit 0
+ * m_bit_offs: bit offset the main bank mux register
+ * sid: start pin_id of subordinate bank
+ * eid: end pin_id of subordinate bank
+ */
+struct multi_mux {
+ unsigned int m_bank_id;
+ unsigned int m_bit_offs;
+ unsigned int sid;
+ unsigned int eid;
};
struct aml_pctl_data {
unsigned int number;
- struct aml_reg_bit rb_offs[];
+ const struct multi_mux *p_mux;
};
struct aml_pmx_func {
@@ -78,10 +86,12 @@ struct aml_gpio_bank {
struct gpio_chip gpio_chip;
struct aml_pio_control pc;
u32 bank_id;
+ u32 mux_bit_offs;
unsigned int pin_base;
struct regmap *reg_mux;
struct regmap *reg_gpio;
struct regmap *reg_ds;
+ const struct multi_mux *p_mux;
};
struct aml_pinctrl {
@@ -113,13 +123,46 @@ static const char *aml_bank_name[31] = {
"GPIOCC", "TEST_N", "ANALOG"
};
+static const struct multi_mux multi_mux_s7[] = {
+ {
+ .m_bank_id = AMLOGIC_GPIO_CC,
+ .m_bit_offs = 24,
+ .sid = (AMLOGIC_GPIO_X << 8) + 16,
+ .eid = (AMLOGIC_GPIO_X << 8) + 19,
+ },
+};
+
+static const struct aml_pctl_data s7_priv_data = {
+ .number = ARRAY_SIZE(multi_mux_s7),
+ .p_mux = multi_mux_s7,
+};
+
+static const struct multi_mux multi_mux_s6[] = {
+ {
+ .m_bank_id = AMLOGIC_GPIO_CC,
+ .m_bit_offs = 24,
+ .sid = (AMLOGIC_GPIO_X << 8) + 16,
+ .eid = (AMLOGIC_GPIO_X << 8) + 19,
+ }, {
+ .m_bank_id = AMLOGIC_GPIO_F,
+ .m_bit_offs = 4,
+ .sid = (AMLOGIC_GPIO_D << 8) + 6,
+ .eid = (AMLOGIC_GPIO_D << 8) + 6,
+ },
+};
+
+static const struct aml_pctl_data s6_priv_data = {
+ .number = ARRAY_SIZE(multi_mux_s6),
+ .p_mux = multi_mux_s6,
+};
+
static int aml_pmx_calc_reg_and_offset(struct pinctrl_gpio_range *range,
unsigned int pin, unsigned int *reg,
unsigned int *offset)
{
unsigned int shift;
- shift = (pin - range->pin_base) << 2;
+ shift = ((pin - range->pin_base) << 2) + *offset;
*reg = (shift / 32) * 4;
*offset = shift % 32;
@@ -131,9 +174,36 @@ static int aml_pctl_set_function(struct aml_pinctrl *info,
int pin_id, int func)
{
struct aml_gpio_bank *bank = gpio_chip_to_bank(range->gc);
+ unsigned int shift;
int reg;
- int offset;
+ int i;
+ unsigned int offset = bank->mux_bit_offs;
+ const struct multi_mux *p_mux;
+
+ /* peculiar mux reg set */
+ if (bank->p_mux) {
+ p_mux = bank->p_mux;
+ if (pin_id >= p_mux->sid && pin_id <= p_mux->eid) {
+ bank = NULL;
+ for (i = 0; i < info->nbanks; i++) {
+ if (info->banks[i].bank_id == p_mux->m_bank_id) {
+ bank = &info->banks[i];
+ break;
+ }
+ }
+ if (!bank || !bank->reg_mux)
+ return -EINVAL;
+
+ shift = (pin_id - p_mux->sid) << 2;
+ reg = (shift / 32) * 4;
+ offset = shift % 32;
+ return regmap_update_bits(bank->reg_mux, reg,
+ 0xf << offset, (func & 0xf) << offset);
+ }
+ }
+
+ /* normal mux reg set */
if (!bank->reg_mux)
return 0;
@@ -818,7 +888,7 @@ static const struct gpio_chip aml_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.set_config = gpiochip_generic_config,
- .set_rv = aml_gpio_set,
+ .set = aml_gpio_set,
.get = aml_gpio_get,
.direction_input = aml_gpio_direction_input,
.direction_output = aml_gpio_direction_output,
@@ -830,29 +900,27 @@ static void init_bank_register_bit(struct aml_pinctrl *info,
struct aml_gpio_bank *bank)
{
const struct aml_pctl_data *data = info->data;
- const struct aml_reg_bit *aml_rb;
- bool def_offs = true;
+ const struct multi_mux *p_mux;
int i;
+ for (i = 0; i < AML_NUM_REG; i++) {
+ bank->pc.reg_offset[i] = aml_def_regoffs[i];
+ bank->pc.bit_offset[i] = 0;
+ }
+
+ bank->mux_bit_offs = 0;
+
if (data) {
for (i = 0; i < data->number; i++) {
- aml_rb = &data->rb_offs[i];
- if (bank->bank_id == aml_rb->bank_id) {
- def_offs = false;
+ p_mux = &data->p_mux[i];
+ if (bank->bank_id == p_mux->m_bank_id) {
+ bank->mux_bit_offs = p_mux->m_bit_offs;
+ break;
+ }
+ if (p_mux->sid >> 8 == bank->bank_id) {
+ bank->p_mux = p_mux;
break;
}
- }
- }
-
- if (def_offs) {
- for (i = 0; i < AML_NUM_REG; i++) {
- bank->pc.reg_offset[i] = aml_def_regoffs[i];
- bank->pc.bit_offset[i] = 0;
- }
- } else {
- for (i = 0; i < AML_NUM_REG; i++) {
- bank->pc.reg_offset[i] = aml_rb->reg_offs[i];
- bank->pc.bit_offset[i] = aml_rb->bit_offs[i];
}
}
}
@@ -1021,9 +1089,11 @@ static int aml_pctl_probe(struct platform_device *pdev)
static const struct of_device_id aml_pctl_of_match[] = {
{ .compatible = "amlogic,pinctrl-a4", },
+ { .compatible = "amlogic,pinctrl-s7", .data = &s7_priv_data, },
+ { .compatible = "amlogic,pinctrl-s6", .data = &s6_priv_data, },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, aml_pctl_dt_match);
+MODULE_DEVICE_TABLE(of, aml_pctl_of_match);
static struct platform_driver aml_pctl_driver = {
.driver = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index e2788bfc5874..8b9130c6e170 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -270,15 +270,21 @@ static const unsigned int pwm_a_pins[] = { GPIOX_6 };
/* pwm_b */
static const unsigned int pwm_b_x7_pins[] = { GPIOX_7 };
static const unsigned int pwm_b_x19_pins[] = { GPIOX_19 };
+static const unsigned int pwm_b_z0_pins[] = { GPIOZ_0 };
+static const unsigned int pwm_b_z13_pins[] = { GPIOZ_13 };
+static const unsigned int pwm_b_h_pins[] = { GPIOH_7 };
/* pwm_c */
static const unsigned int pwm_c_c_pins[] = { GPIOC_4 };
static const unsigned int pwm_c_x5_pins[] = { GPIOX_5 };
static const unsigned int pwm_c_x8_pins[] = { GPIOX_8 };
+static const unsigned int pwm_c_z_pins[] = { GPIOZ_1 };
/* pwm_d */
static const unsigned int pwm_d_x3_pins[] = { GPIOX_3 };
static const unsigned int pwm_d_x6_pins[] = { GPIOX_6 };
+static const unsigned int pwm_d_z_pins[] = { GPIOZ_2 };
+static const unsigned int pwm_d_a_pins[] = { GPIOA_4 };
/* pwm_e */
static const unsigned int pwm_e_pins[] = { GPIOX_16 };
@@ -649,12 +655,22 @@ static const struct meson_pmx_group meson_g12a_periphs_groups[] = {
GROUP(pwm_a, 1),
GROUP(pwm_b_x7, 4),
GROUP(pwm_b_x19, 1),
+ GROUP(pwm_b_z0, 5),
+ GROUP(pwm_b_z13, 5),
+ GROUP(pwm_b_h, 5),
GROUP(pwm_c_x5, 4),
GROUP(pwm_c_x8, 5),
+ GROUP(pwm_c_c, 5),
+ GROUP(pwm_c_z, 5),
+ GROUP(pwm_d_z, 4),
+ GROUP(pwm_d_a, 3),
GROUP(pwm_d_x3, 4),
GROUP(pwm_d_x6, 4),
GROUP(pwm_e, 1),
+ GROUP(pwm_f_a, 3),
+ GROUP(pwm_f_h, 4),
GROUP(pwm_f_x, 1),
+ GROUP(pwm_f_z, 5),
GROUP(tsin_a_valid, 3),
GROUP(tsin_a_sop, 3),
GROUP(tsin_a_din0, 3),
@@ -1058,15 +1074,15 @@ static const char * const pwm_a_groups[] = {
};
static const char * const pwm_b_groups[] = {
- "pwm_b_x7", "pwm_b_x19",
+ "pwm_b_h", "pwm_b_x7", "pwm_b_x19", "pwm_b_z0", "pwm_b_z13"
};
static const char * const pwm_c_groups[] = {
- "pwm_c_c", "pwm_c_x5", "pwm_c_x8",
+ "pwm_c_c", "pwm_c_x5", "pwm_c_x8", "pwm_c_z",
};
static const char * const pwm_d_groups[] = {
- "pwm_d_x3", "pwm_d_x6",
+ "pwm_d_a", "pwm_d_x3", "pwm_d_x6", "pwm_d_z",
};
static const char * const pwm_e_groups[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index f5be61f2ede4..277e9c40490d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -616,7 +616,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
- pc->chip.set_rv = meson_gpio_set;
+ pc->chip.set = meson_gpio_set;
pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index a6b106984e12..881df5e08f61 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -518,7 +518,7 @@ static const struct pinmux_ops armada_37xx_pmx_ops = {
static const struct gpio_chip armada_37xx_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = armada_37xx_gpio_set,
+ .set = armada_37xx_gpio_set,
.get = armada_37xx_gpio_get,
.get_direction = armada_37xx_gpio_get_direction,
.direction_input = armada_37xx_gpio_direction_input,
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 2f55f83127cf..7b5f94d8cb23 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -536,7 +536,7 @@ static const struct gpio_chip abx500gpio_chip = {
.direction_input = abx500_gpio_direction_input,
.get = abx500_gpio_get,
.direction_output = abx500_gpio_direction_output,
- .set_rv = abx500_gpio_set,
+ .set = abx500_gpio_set,
.to_irq = abx500_gpio_to_irq,
.dbg_show = abx500_gpio_dbg_show,
};
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
index b51704bafd81..54652bfbe6ac 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
@@ -361,7 +361,7 @@ static int ma35_gpio_core_get(struct gpio_chip *gc, unsigned int gpio)
return !!(readl(reg_pin) & BIT(gpio));
}
-static void ma35_gpio_core_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int ma35_gpio_core_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct ma35_pin_bank *bank = gpiochip_get_data(gc);
void __iomem *reg_dout = bank->reg_base + MA35_GP_REG_DOUT;
@@ -373,6 +373,8 @@ static void ma35_gpio_core_set(struct gpio_chip *gc, unsigned int gpio, int val)
regval = readl(reg_dout) & ~BIT(gpio);
writel(regval, reg_dout);
+
+ return 0;
}
static int ma35_gpio_core_to_request(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index dfd32feb3428..b8872d8f5930 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1817,7 +1817,7 @@ static const struct pinconf_ops npcm7xx_pinconf_ops = {
};
/* pinctrl_desc */
-static struct pinctrl_desc npcm7xx_pinctrl_desc = {
+static const struct pinctrl_desc npcm7xx_pinctrl_desc = {
.name = "npcm7xx-pinctrl",
.pins = npcm7xx_pins,
.npins = ARRAY_SIZE(npcm7xx_pins),
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index be3db8ab406c..3c3b9d8d3681 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -2299,7 +2299,7 @@ static const struct pinconf_ops npcm8xx_pinconf_ops = {
};
/* pinctrl_desc */
-static struct pinctrl_desc npcm8xx_pinctrl_desc = {
+static const struct pinctrl_desc npcm8xx_pinctrl_desc = {
.name = "npcm8xx-pinctrl",
.pins = npcm8xx_pins,
.npins = ARRAY_SIZE(npcm8xx_pins),
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 4264ca749175..8d8314ba0e4c 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -989,7 +989,7 @@ static const struct pinconf_ops wpcm450_pinconf_ops = {
.pin_config_set = wpcm450_config_set,
};
-static struct pinctrl_desc wpcm450_pinctrl_desc = {
+static const struct pinctrl_desc wpcm450_pinctrl_desc = {
.name = "wpcm450-pinctrl",
.pins = wpcm450_pins,
.npins = ARRAY_SIZE(wpcm450_pins),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index b3f0d02aeeb3..09a5425d54ba 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -872,7 +872,7 @@ static const struct pinconf_ops amd_pinconf_ops = {
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
- struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ const struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
u32 pin_reg, mask;
int i;
@@ -901,7 +901,7 @@ static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
static void amd_gpio_check_pending(void)
{
struct amd_gpio *gpio_dev = pinctrl_dev;
- struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ const struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
int i;
if (!pm_debug_messages_on)
@@ -957,7 +957,7 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
- struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ const struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
int i;
u32 wake_mask = is_suspend ? WAKE_SOURCE_SUSPEND : WAKE_SOURCE_HIBERNATE;
@@ -1012,7 +1012,7 @@ static int amd_gpio_hibernate(struct device *dev)
static int amd_gpio_resume(struct device *dev)
{
struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
- struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ const struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
int i;
@@ -1187,7 +1187,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->gc.direction_input = amd_gpio_direction_input;
gpio_dev->gc.direction_output = amd_gpio_direction_output;
gpio_dev->gc.get = amd_gpio_get_value;
- gpio_dev->gc.set_rv = amd_gpio_set_value;
+ gpio_dev->gc.set = amd_gpio_set_value;
gpio_dev->gc.set_config = amd_gpio_set_config;
gpio_dev->gc.dbg_show = amd_gpio_dbg_show;
diff --git a/drivers/pinctrl/pinctrl-amdisp.c b/drivers/pinctrl/pinctrl-amdisp.c
index 9256ed67bb20..efbf40c776ea 100644
--- a/drivers/pinctrl/pinctrl-amdisp.c
+++ b/drivers/pinctrl/pinctrl-amdisp.c
@@ -117,7 +117,7 @@ static int amdisp_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return !!(pin_reg & BIT(GPIO_CONTROL_PIN));
}
-static void amdisp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int amdisp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
unsigned long flags;
u32 pin_reg;
@@ -131,6 +131,8 @@ static void amdisp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
pin_reg &= ~BIT(GPIO_CONTROL_PIN);
writel(pin_reg, pctrl->gpiobase + gpio_offset[gpio]);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
}
static int amdisp_gpiochip_add(struct platform_device *pdev,
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index 0f551d67d482..a09daa72bfe4 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -217,11 +217,13 @@ static int apple_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(reg & REG_GPIOx_DATA);
}
-static void apple_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int apple_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
apple_gpio_set_reg(pctl, offset, REG_GPIOx_DATA, value ? REG_GPIOx_DATA : 0);
+
+ return 0;
}
static int apple_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index 717f9592b28b..af67057128ff 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -907,7 +907,7 @@ static const struct pinconf_ops artpec6_pconf_ops = {
.pin_config_group_set = artpec6_pconf_group_set,
};
-static struct pinctrl_desc artpec6_desc = {
+static const struct pinctrl_desc artpec6_desc = {
.name = "artpec6-pinctrl",
.owner = THIS_MODULE,
.pins = artpec6_pins,
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 0d8c75ce20ed..e713dea98aa8 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -422,6 +422,8 @@ static struct pinctrl_desc as3722_pinctrl_desc = {
.pmxops = &as3722_pinmux_ops,
.confops = &as3722_pinconf_ops,
.owner = THIS_MODULE,
+ .pins = as3722_pins_desc,
+ .npins = ARRAY_SIZE(as3722_pins_desc),
};
static int as3722_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -471,8 +473,8 @@ static int as3722_gpio_get(struct gpio_chip *chip, unsigned offset)
return (invert_enable) ? !val : val;
}
-static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int as3722_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct as3722_pctrl_info *as_pci = gpiochip_get_data(chip);
struct as3722 *as3722 = as_pci->as3722;
@@ -484,7 +486,7 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
if (ret < 0) {
dev_err(as_pci->dev,
"GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
- return;
+ return ret;
}
en_invert = !!(val & AS3722_GPIO_INV);
@@ -498,12 +500,19 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
if (ret < 0)
dev_err(as_pci->dev,
"GPIO_SIGNAL_OUT_REG update failed: %d\n", ret);
+
+ return ret;
}
static int as3722_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
- as3722_gpio_set(chip, offset, value);
+ int ret;
+
+ ret = as3722_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
return pinctrl_gpio_direction_output(chip, offset);
}
@@ -550,8 +559,6 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
as_pci->pin_groups = as3722_pingroups;
as_pci->num_pin_groups = ARRAY_SIZE(as3722_pingroups);
as3722_pinctrl_desc.name = dev_name(&pdev->dev);
- as3722_pinctrl_desc.pins = as3722_pins_desc;
- as3722_pinctrl_desc.npins = ARRAY_SIZE(as3722_pins_desc);
as_pci->pctl = devm_pinctrl_register(&pdev->dev, &as3722_pinctrl_desc,
as_pci);
if (IS_ERR(as_pci->pctl)) {
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index ca8a54a43ff5..35ea3414cb96 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -442,8 +442,8 @@ static struct gpio_chip atmel_gpio_chip = {
.get = atmel_gpio_get,
.get_multiple = atmel_gpio_get_multiple,
.direction_output = atmel_gpio_direction_output,
- .set_rv = atmel_gpio_set,
- .set_multiple_rv = atmel_gpio_set_multiple,
+ .set = atmel_gpio_set,
+ .set_multiple = atmel_gpio_set_multiple,
.to_irq = atmel_gpio_to_irq,
.base = 0,
};
@@ -1212,9 +1212,9 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
dev_dbg(dev, "bank %i: irq=%d\n", i, ret);
}
- atmel_pioctrl->irq_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node),
- atmel_pioctrl->gpio_chip->ngpio,
- &irq_domain_simple_ops, NULL);
+ atmel_pioctrl->irq_domain = irq_domain_create_linear(dev_fwnode(dev),
+ atmel_pioctrl->gpio_chip->ngpio,
+ &irq_domain_simple_ops, NULL);
if (!atmel_pioctrl->irq_domain)
return dev_err_probe(dev, -ENODEV, "can't add the irq domain\n");
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 6c2727bd55bc..0a57ed51d4c9 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1801,8 +1801,8 @@ static const struct gpio_chip at91_gpio_template = {
.direction_input = at91_gpio_direction_input,
.get = at91_gpio_get,
.direction_output = at91_gpio_direction_output,
- .set_rv = at91_gpio_set,
- .set_multiple_rv = at91_gpio_set_multiple,
+ .set = at91_gpio_set,
+ .set_multiple = at91_gpio_set_multiple,
.dbg_show = at91_gpio_dbg_show,
.can_sleep = false,
.ngpio = MAX_NB_GPIO_PER_BANK,
diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c
index 04afb344e9e5..890b83fddea3 100644
--- a/drivers/pinctrl/pinctrl-aw9523.c
+++ b/drivers/pinctrl/pinctrl-aw9523.c
@@ -625,14 +625,14 @@ out:
return ret;
}
-static void aw9523_gpio_set_multiple(struct gpio_chip *chip,
+static int aw9523_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *mask,
unsigned long *bits)
{
struct aw9523 *awi = gpiochip_get_data(chip);
u8 mask_lo, mask_hi, bits_lo, bits_hi;
unsigned int reg;
- int ret;
+ int ret = 0;
mask_lo = *mask;
mask_hi = *mask >> 8;
@@ -644,27 +644,33 @@ static void aw9523_gpio_set_multiple(struct gpio_chip *chip,
reg = AW9523_REG_OUT_STATE(AW9523_PINS_PER_PORT);
ret = regmap_write_bits(awi->regmap, reg, mask_hi, bits_hi);
if (ret)
- dev_warn(awi->dev, "Cannot write port1 out level\n");
+ goto out;
}
if (mask_lo) {
reg = AW9523_REG_OUT_STATE(0);
ret = regmap_write_bits(awi->regmap, reg, mask_lo, bits_lo);
if (ret)
- dev_warn(awi->dev, "Cannot write port0 out level\n");
+ goto out;
}
+
+out:
mutex_unlock(&awi->i2c_lock);
+ return ret;
}
-static void aw9523_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int aw9523_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct aw9523 *awi = gpiochip_get_data(chip);
u8 regbit = offset % AW9523_PINS_PER_PORT;
+ int ret;
mutex_lock(&awi->i2c_lock);
- regmap_update_bits(awi->regmap, AW9523_REG_OUT_STATE(offset),
- BIT(regbit), value ? BIT(regbit) : 0);
+ ret = regmap_update_bits(awi->regmap, AW9523_REG_OUT_STATE(offset),
+ BIT(regbit), value ? BIT(regbit) : 0);
mutex_unlock(&awi->i2c_lock);
+
+ return ret;
}
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index fff408b60c4a..2bd8487484a8 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -192,7 +192,7 @@ static int axp20x_gpio_get_direction(struct gpio_chip *chip,
static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
- return chip->set_rv(chip, offset, value);
+ return chip->set(chip, offset, value);
}
static int axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
@@ -463,7 +463,7 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
pctl->chip.owner = THIS_MODULE;
pctl->chip.get = axp20x_gpio_get;
pctl->chip.get_direction = axp20x_gpio_get_direction;
- pctl->chip.set_rv = axp20x_gpio_set;
+ pctl->chip.set = axp20x_gpio_set;
pctl->chip.direction_input = pinctrl_gpio_direction_input;
pctl->chip.direction_output = axp20x_gpio_output;
diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c
index b0000fe5b31d..387798fb09be 100644
--- a/drivers/pinctrl/pinctrl-bm1880.c
+++ b/drivers/pinctrl/pinctrl-bm1880.c
@@ -1298,7 +1298,7 @@ static const struct pinmux_ops bm1880_pinmux_ops = {
.set_mux = bm1880_pinmux_set_mux,
};
-static struct pinctrl_desc bm1880_desc = {
+static const struct pinctrl_desc bm1880_desc = {
.name = "bm1880_pinctrl",
.pins = bm1880_pins,
.npins = ARRAY_SIZE(bm1880_pins),
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 8a2fd632bdd4..cf7f80497fde 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -939,10 +939,10 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
gc->direction_input = cy8c95x0_gpio_direction_input;
gc->direction_output = cy8c95x0_gpio_direction_output;
gc->get = cy8c95x0_gpio_get_value;
- gc->set_rv = cy8c95x0_gpio_set_value;
+ gc->set = cy8c95x0_gpio_set_value;
gc->get_direction = cy8c95x0_gpio_get_direction;
gc->get_multiple = cy8c95x0_gpio_get_multiple;
- gc->set_multiple_rv = cy8c95x0_gpio_set_multiple;
+ gc->set_multiple = cy8c95x0_gpio_set_multiple;
gc->set_config = gpiochip_generic_config;
gc->can_sleep = true;
gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
index 6f44a13b90ce..53298cbcc5cf 100644
--- a/drivers/pinctrl/pinctrl-da9062.c
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -102,14 +102,14 @@ static int da9062_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(val & BIT(offset));
}
-static void da9062_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int da9062_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct da9062_pctl *pctl = gpiochip_get_data(gc);
struct regmap *regmap = pctl->da9062->regmap;
- regmap_update_bits(regmap, DA9062AA_GPIO_MODE0_4, BIT(offset),
- value << offset);
+ return regmap_update_bits(regmap, DA9062AA_GPIO_MODE0_4, BIT(offset),
+ value << offset);
}
static int da9062_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -172,9 +172,7 @@ static int da9062_gpio_direction_output(struct gpio_chip *gc,
if (ret)
return ret;
- da9062_gpio_set(gc, offset, value);
-
- return 0;
+ return da9062_gpio_set(gc, offset, value);
}
static int da9062_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index a0423172bdd6..2e16f09aeb47 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -182,7 +182,7 @@ static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
return 0;
}
-static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value);
+static int dc_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value);
static int dc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
@@ -216,7 +216,7 @@ static int dc_gpio_get(struct gpio_chip *chip, unsigned gpio)
return !!(input & BIT(bit_off));
}
-static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+static int dc_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct dc_pinmap *pmap = gpiochip_get_data(chip);
int reg_off = GP_OUTPUT0(gpio/PINS_PER_COLLECTION);
@@ -232,6 +232,8 @@ static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
output &= ~BIT(bit_off);
writeb_relaxed(output, pmap->regs + reg_off);
spin_unlock_irqrestore(&pmap->lock, flags);
+
+ return 0;
}
static int dc_gpiochip_add(struct dc_pinmap *pmap)
diff --git a/drivers/pinctrl/pinctrl-eic7700.c b/drivers/pinctrl/pinctrl-eic7700.c
new file mode 100644
index 000000000000..4874b5532343
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-eic7700.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ESWIN Pinctrl Controller Platform Device Driver
+ *
+ * Copyright 2024, Beijing ESWIN Computing Technology Co., Ltd.. All rights reserved.
+ *
+ * Authors: Samuel Holland <samuel.holland@sifive.com>
+ * Yulin Lu <luyulin@eswincomputing.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "core.h"
+#include "pinmux.h"
+#include "pinconf.h"
+
+#define EIC7700_PIN_REG(i) (4 * (i))
+#define EIC7700_IE BIT(0)
+#define EIC7700_PU BIT(1)
+#define EIC7700_PD BIT(2)
+#define EIC7700_DS GENMASK(6, 3)
+#define EIC7700_ST BIT(7)
+#define EIC7700_FUNC_SEL GENMASK(18, 16)
+
+#define EIC7700_BIAS (EIC7700_PD | EIC7700_PU)
+#define EIC7700_PINCONF GENMASK(7, 0)
+
+#define EIC7700_RGMII0_SEL_MODE (0x310 - 0x80)
+#define EIC7700_RGMII1_SEL_MODE (0x314 - 0x80)
+#define EIC7700_MS GENMASK(1, 0)
+#define EIC7700_MS_3V3 0x0
+#define EIC7700_MS_1V8 0x3
+
+#define EIC7700_FUNCTIONS_PER_PIN 8
+
+struct eic7700_pin {
+ u8 functions[EIC7700_FUNCTIONS_PER_PIN];
+};
+
+struct eic7700_pinctrl {
+ void __iomem *base;
+ struct pinctrl_desc desc;
+ unsigned int functions_count;
+ struct pinfunction functions[] __counted_by(functions_count);
+};
+
+enum {
+ F_DISABLED,
+ F_BOOT_SEL,
+ F_CHIP_MODE,
+ F_EMMC,
+ F_FAN_TACH,
+ F_GPIO,
+ F_HDMI,
+ F_I2C,
+ F_I2S,
+ F_JTAG,
+ F_DDR_REF_CLK_SEL,
+ F_LPDDR_REF_CLK,
+ F_MIPI_CSI,
+ F_OSC,
+ F_PCIE,
+ F_PWM,
+ F_RGMII,
+ F_RESET,
+ F_SATA,
+ F_SDIO,
+ F_SPI,
+ F_S_MODE,
+ F_UART,
+ F_USB,
+ EIC7700_FUNCTIONS_COUNT
+};
+
+static const char *const eic7700_functions[EIC7700_FUNCTIONS_COUNT] = {
+ [F_DISABLED] = "disabled",
+ [F_BOOT_SEL] = "boot_sel",
+ [F_CHIP_MODE] = "chip_mode",
+ [F_EMMC] = "emmc",
+ [F_FAN_TACH] = "fan_tach",
+ [F_GPIO] = "gpio",
+ [F_HDMI] = "hdmi",
+ [F_I2C] = "i2c",
+ [F_I2S] = "i2s",
+ [F_JTAG] = "jtag",
+ [F_DDR_REF_CLK_SEL] = "ddr_ref_clk_sel",
+ [F_LPDDR_REF_CLK] = "lpddr_ref_clk",
+ [F_MIPI_CSI] = "mipi_csi",
+ [F_OSC] = "osc",
+ [F_PCIE] = "pcie",
+ [F_PWM] = "pwm",
+ [F_RGMII] = "rgmii",
+ [F_RESET] = "reset",
+ [F_SATA] = "sata",
+ [F_SDIO] = "sdio",
+ [F_SPI] = "spi",
+ [F_S_MODE] = "s_mode",
+ [F_UART] = "uart",
+ [F_USB] = "usb",
+};
+
+#define EIC7700_PIN(_number, _name, ...) \
+ { \
+ .number = _number, \
+ .name = _name, \
+ .drv_data = (void *)&(struct eic7700_pin) { { __VA_ARGS__ } } \
+ }
+
+static const struct pinctrl_pin_desc eic7700_pins[] = {
+ EIC7700_PIN(0, "chip_mode", [0] = F_CHIP_MODE),
+ EIC7700_PIN(1, "mode_set0", [0] = F_SDIO, [2] = F_GPIO),
+ EIC7700_PIN(2, "mode_set1", [0] = F_SDIO, [2] = F_GPIO),
+ EIC7700_PIN(3, "mode_set2", [0] = F_SDIO, [2] = F_GPIO),
+ EIC7700_PIN(4, "mode_set3", [0] = F_SDIO, [2] = F_GPIO),
+ EIC7700_PIN(5, "xin", [0] = F_OSC),
+ EIC7700_PIN(6, "rtc_xin", [0] = F_DISABLED),
+ EIC7700_PIN(7, "rst_out_n", [0] = F_RESET),
+ EIC7700_PIN(8, "key_reset_n", [0] = F_RESET),
+ EIC7700_PIN(9, "rst_in_n", [0] = F_DISABLED),
+ EIC7700_PIN(10, "por_in_n", [0] = F_DISABLED),
+ EIC7700_PIN(11, "por_out_n", [0] = F_DISABLED),
+ EIC7700_PIN(12, "gpio0", [0] = F_GPIO),
+ EIC7700_PIN(13, "por_sel", [0] = F_RESET),
+ EIC7700_PIN(14, "jtag0_tck", [0] = F_JTAG, [1] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(15, "jtag0_tms", [0] = F_JTAG, [1] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(16, "jtag0_tdi", [0] = F_JTAG, [1] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(17, "jtag0_tdo", [0] = F_JTAG, [1] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(18, "gpio5", [0] = F_GPIO, [1] = F_SPI),
+ EIC7700_PIN(19, "spi2_cs0_n", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(20, "jtag1_tck", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(21, "jtag1_tms", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(22, "jtag1_tdi", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(23, "jtag1_tdo", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(24, "gpio11", [0] = F_GPIO),
+ EIC7700_PIN(25, "spi2_cs1_n", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(26, "pcie_clkreq_n", [0] = F_PCIE),
+ EIC7700_PIN(27, "pcie_wake_n", [0] = F_PCIE),
+ EIC7700_PIN(28, "pcie_perst_n", [0] = F_PCIE),
+ EIC7700_PIN(29, "hdmi_scl", [0] = F_HDMI),
+ EIC7700_PIN(30, "hdmi_sda", [0] = F_HDMI),
+ EIC7700_PIN(31, "hdmi_cec", [0] = F_HDMI),
+ EIC7700_PIN(32, "jtag2_trst", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(33, "rgmii0_clk_125", [0] = F_RGMII),
+ EIC7700_PIN(34, "rgmii0_txen", [0] = F_RGMII),
+ EIC7700_PIN(35, "rgmii0_txclk", [0] = F_RGMII),
+ EIC7700_PIN(36, "rgmii0_txd0", [0] = F_RGMII),
+ EIC7700_PIN(37, "rgmii0_txd1", [0] = F_RGMII),
+ EIC7700_PIN(38, "rgmii0_txd2", [0] = F_RGMII),
+ EIC7700_PIN(39, "rgmii0_txd3", [0] = F_RGMII),
+ EIC7700_PIN(40, "i2s0_bclk", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(41, "i2s0_wclk", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(42, "i2s0_sdi", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(43, "i2s0_sdo", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(44, "i2s_mclk", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(45, "rgmii0_rxclk", [0] = F_RGMII),
+ EIC7700_PIN(46, "rgmii0_rxdv", [0] = F_RGMII),
+ EIC7700_PIN(47, "rgmii0_rxd0", [0] = F_RGMII),
+ EIC7700_PIN(48, "rgmii0_rxd1", [0] = F_RGMII),
+ EIC7700_PIN(49, "rgmii0_rxd2", [0] = F_RGMII),
+ EIC7700_PIN(50, "rgmii0_rxd3", [0] = F_RGMII),
+ EIC7700_PIN(51, "i2s2_bclk", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(52, "i2s2_wclk", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(53, "i2s2_sdi", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(54, "i2s2_sdo", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(55, "gpio27", [0] = F_GPIO, [1] = F_SATA),
+ EIC7700_PIN(56, "gpio28", [0] = F_GPIO),
+ EIC7700_PIN(57, "gpio29", [0] = F_RESET, [1] = F_EMMC, [2] = F_GPIO),
+ EIC7700_PIN(58, "rgmii0_mdc", [0] = F_RGMII),
+ EIC7700_PIN(59, "rgmii0_mdio", [0] = F_RGMII),
+ EIC7700_PIN(60, "rgmii0_intb", [0] = F_RGMII),
+ EIC7700_PIN(61, "rgmii1_clk_125", [0] = F_RGMII),
+ EIC7700_PIN(62, "rgmii1_txen", [0] = F_RGMII),
+ EIC7700_PIN(63, "rgmii1_txclk", [0] = F_RGMII),
+ EIC7700_PIN(64, "rgmii1_txd0", [0] = F_RGMII),
+ EIC7700_PIN(65, "rgmii1_txd1", [0] = F_RGMII),
+ EIC7700_PIN(66, "rgmii1_txd2", [0] = F_RGMII),
+ EIC7700_PIN(67, "rgmii1_txd3", [0] = F_RGMII),
+ EIC7700_PIN(68, "i2s1_bclk", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(69, "i2s1_wclk", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(70, "i2s1_sdi", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(71, "i2s1_sdo", [0] = F_I2S, [2] = F_GPIO),
+ EIC7700_PIN(72, "gpio34", [0] = F_RESET, [1] = F_SDIO, [2] = F_GPIO),
+ EIC7700_PIN(73, "rgmii1_rxclk", [0] = F_RGMII),
+ EIC7700_PIN(74, "rgmii1_rxdv", [0] = F_RGMII),
+ EIC7700_PIN(75, "rgmii1_rxd0", [0] = F_RGMII),
+ EIC7700_PIN(76, "rgmii1_rxd1", [0] = F_RGMII),
+ EIC7700_PIN(77, "rgmii1_rxd2", [0] = F_RGMII),
+ EIC7700_PIN(78, "rgmii1_rxd3", [0] = F_RGMII),
+ EIC7700_PIN(79, "spi1_cs0_n", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(80, "spi1_clk", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(81, "spi1_d0", [0] = F_SPI, [1] = F_I2C, [2] = F_GPIO, [3] = F_UART),
+ EIC7700_PIN(82, "spi1_d1", [0] = F_SPI, [1] = F_I2C, [2] = F_GPIO, [3] = F_UART),
+ EIC7700_PIN(83, "spi1_d2", [0] = F_SPI, [1] = F_SDIO, [2] = F_GPIO),
+ EIC7700_PIN(84, "spi1_d3", [0] = F_SPI, [1] = F_PWM, [2] = F_GPIO),
+ EIC7700_PIN(85, "spi1_cs1_n", [0] = F_SPI, [1] = F_PWM, [2] = F_GPIO),
+ EIC7700_PIN(86, "rgmii1_mdc", [0] = F_RGMII),
+ EIC7700_PIN(87, "rgmii1_mdio", [0] = F_RGMII),
+ EIC7700_PIN(88, "rgmii1_intb", [0] = F_RGMII),
+ EIC7700_PIN(89, "usb0_pwren", [0] = F_USB, [2] = F_GPIO),
+ EIC7700_PIN(90, "usb1_pwren", [0] = F_USB, [2] = F_GPIO),
+ EIC7700_PIN(91, "i2c0_scl", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(92, "i2c0_sda", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(93, "i2c1_scl", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(94, "i2c1_sda", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(95, "i2c2_scl", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(96, "i2c2_sda", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(97, "i2c3_scl", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(98, "i2c3_sda", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(99, "i2c4_scl", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(100, "i2c4_sda", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(101, "i2c5_scl", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(102, "i2c5_sda", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(103, "uart0_tx", [0] = F_UART, [2] = F_GPIO),
+ EIC7700_PIN(104, "uart0_rx", [0] = F_UART, [2] = F_GPIO),
+ EIC7700_PIN(105, "uart1_tx", [0] = F_UART, [2] = F_GPIO),
+ EIC7700_PIN(106, "uart1_rx", [0] = F_UART, [2] = F_GPIO),
+ EIC7700_PIN(107, "uart1_cts", [0] = F_UART, [1] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(108, "uart1_rts", [0] = F_UART, [1] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(109, "uart2_tx", [0] = F_UART, [1] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(110, "uart2_rx", [0] = F_UART, [1] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(111, "jtag2_tck", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(112, "jtag2_tms", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(113, "jtag2_tdi", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(114, "jtag2_tdo", [0] = F_JTAG, [2] = F_GPIO),
+ EIC7700_PIN(115, "fan_pwm", [0] = F_PWM, [2] = F_GPIO),
+ EIC7700_PIN(116, "fan_tach", [0] = F_FAN_TACH, [2] = F_GPIO),
+ EIC7700_PIN(117, "mipi_csi0_xvs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(118, "mipi_csi0_xhs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(119, "mipi_csi0_mclk", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(120, "mipi_csi1_xvs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(121, "mipi_csi1_xhs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(122, "mipi_csi1_mclk", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(123, "mipi_csi2_xvs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(124, "mipi_csi2_xhs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(125, "mipi_csi2_mclk", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(126, "mipi_csi3_xvs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(127, "mipi_csi3_xhs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(128, "mipi_csi3_mclk", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(129, "mipi_csi4_xvs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(130, "mipi_csi4_xhs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(131, "mipi_csi4_mclk", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(132, "mipi_csi5_xvs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(133, "mipi_csi5_xhs", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(134, "mipi_csi5_mclk", [0] = F_MIPI_CSI, [2] = F_GPIO),
+ EIC7700_PIN(135, "spi3_cs_n", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(136, "spi3_clk", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(137, "spi3_di", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(138, "spi3_do", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(139, "gpio92", [0] = F_I2C, [1] = F_MIPI_CSI, [2] = F_GPIO, [3] = F_UART),
+ EIC7700_PIN(140, "gpio93", [0] = F_I2C, [1] = F_MIPI_CSI, [2] = F_GPIO, [3] = F_UART),
+ EIC7700_PIN(141, "s_mode", [0] = F_S_MODE, [2] = F_GPIO),
+ EIC7700_PIN(142, "gpio95", [0] = F_DDR_REF_CLK_SEL, [2] = F_GPIO),
+ EIC7700_PIN(143, "spi0_cs_n", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(144, "spi0_clk", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(145, "spi0_d0", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(146, "spi0_d1", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(147, "spi0_d2", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(148, "spi0_d3", [0] = F_SPI, [2] = F_GPIO),
+ EIC7700_PIN(149, "i2c10_scl", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(150, "i2c10_sda", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(151, "i2c11_scl", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(152, "i2c11_sda", [0] = F_I2C, [2] = F_GPIO),
+ EIC7700_PIN(153, "gpio106", [0] = F_GPIO),
+ EIC7700_PIN(154, "boot_sel0", [0] = F_BOOT_SEL, [2] = F_GPIO),
+ EIC7700_PIN(155, "boot_sel1", [0] = F_BOOT_SEL, [2] = F_GPIO),
+ EIC7700_PIN(156, "boot_sel2", [0] = F_BOOT_SEL, [2] = F_GPIO),
+ EIC7700_PIN(157, "boot_sel3", [0] = F_BOOT_SEL, [2] = F_GPIO),
+ EIC7700_PIN(158, "gpio111", [0] = F_GPIO),
+ EIC7700_PIN(159, "reserved0", [0] = F_DISABLED),
+ EIC7700_PIN(160, "reserved1", [0] = F_DISABLED),
+ EIC7700_PIN(161, "reserved2", [0] = F_DISABLED),
+ EIC7700_PIN(162, "reserved3", [0] = F_DISABLED),
+ EIC7700_PIN(163, "lpddr_ref_clk", [0] = F_LPDDR_REF_CLK),
+};
+
+static int eic7700_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->desc.npins;
+}
+
+static const char *eic7700_get_group_name(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->desc.pins[selector].name;
+}
+
+static int eic7700_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins, unsigned int *npins)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pc->desc.pins[selector].number;
+ *npins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops eic7700_pinctrl_ops = {
+ .get_groups_count = eic7700_get_groups_count,
+ .get_group_name = eic7700_get_group_name,
+ .get_group_pins = eic7700_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+#endif
+};
+
+static int eic7700_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct eic7700_pin *pin_data = pc->desc.pins[pin].drv_data;
+ u32 arg, value;
+ int param;
+
+ if (pin_data->functions[0] == F_OSC || pin_data->functions[0] == F_DISABLED)
+ return -EOPNOTSUPP;
+
+ value = readl_relaxed(pc->base + EIC7700_PIN_REG(pin));
+
+ param = pinconf_to_config_param(*config);
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = (value & EIC7700_BIAS) == 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = (value & EIC7700_BIAS) == EIC7700_PD;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = (value & EIC7700_BIAS) == EIC7700_PU;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ if (pin_data->functions[0] == F_RGMII ||
+ pin_data->functions[0] == F_LPDDR_REF_CLK)
+ arg = FIELD_GET(EIC7700_DS, value) * 3000 + 3000;
+ else
+ arg = FIELD_GET(EIC7700_DS, value) * 3000 + 6000;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ arg = value & EIC7700_IE;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ arg = value & EIC7700_ST;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return arg ? 0 : -EINVAL;
+}
+
+static int eic7700_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct eic7700_pin *pin_data = pc->desc.pins[pin].drv_data;
+ u32 value;
+
+ if (pin_data->functions[0] == F_OSC || pin_data->functions[0] == F_DISABLED)
+ return -EOPNOTSUPP;
+
+ value = readl_relaxed(pc->base + EIC7700_PIN_REG(pin));
+
+ for (unsigned int i = 0; i < num_configs; i++) {
+ int param = pinconf_to_config_param(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ value &= ~EIC7700_BIAS;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (arg == 0)
+ return -EOPNOTSUPP;
+ value &= ~EIC7700_BIAS;
+ value |= EIC7700_PD;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (arg == 0)
+ return -EOPNOTSUPP;
+ value &= ~EIC7700_BIAS;
+ value |= EIC7700_PU;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ value &= ~EIC7700_DS;
+ if (pin_data->functions[0] == F_RGMII ||
+ pin_data->functions[0] == F_LPDDR_REF_CLK) {
+ if (arg < 3000 || arg > 24000)
+ return -EOPNOTSUPP;
+ value |= FIELD_PREP(EIC7700_DS, (arg - 3000) / 3000);
+ } else {
+ if (arg < 6000 || arg > 27000)
+ return -EOPNOTSUPP;
+ value |= FIELD_PREP(EIC7700_DS, (arg - 6000) / 3000);
+ }
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (arg)
+ value |= EIC7700_IE;
+ else
+ value &= ~EIC7700_IE;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (arg)
+ value |= EIC7700_ST;
+ else
+ value &= ~EIC7700_ST;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ writel_relaxed(value, pc->base + EIC7700_PIN_REG(pin));
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void eic7700_pin_config_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int pin)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ u32 value = readl_relaxed(pc->base + EIC7700_PIN_REG(pin)) & EIC7700_PINCONF;
+
+ seq_printf(s, " [0x%02x]", value);
+}
+#else
+#define eic7700_pin_config_dbg_show NULL
+#endif
+
+static const struct pinconf_ops eic7700_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = eic7700_pin_config_get,
+ .pin_config_set = eic7700_pin_config_set,
+ .pin_config_group_get = eic7700_pin_config_get,
+ .pin_config_group_set = eic7700_pin_config_set,
+ .pin_config_dbg_show = eic7700_pin_config_dbg_show,
+ .pin_config_group_dbg_show = eic7700_pin_config_dbg_show,
+};
+
+static int eic7700_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->functions_count;
+}
+
+static const char *eic7700_get_function_name(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->functions[selector].name;
+}
+
+static int eic7700_get_function_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char *const **groups, unsigned int *num_groups)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pc->functions[selector].groups;
+ *num_groups = pc->functions[selector].ngroups;
+
+ return 0;
+}
+
+static int eic7700_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct eic7700_pin *pin_data = pc->desc.pins[group_selector].drv_data;
+ u32 fs, value;
+
+ if (pin_data->functions[0] == F_OSC || pin_data->functions[0] == F_DISABLED)
+ return -EOPNOTSUPP;
+
+ for (fs = 0; fs < EIC7700_FUNCTIONS_PER_PIN; fs++)
+ if (pin_data->functions[fs] == func_selector)
+ break;
+
+ if (fs == EIC7700_FUNCTIONS_PER_PIN) {
+ dev_err(pctldev->dev, "invalid mux %s for pin %s\n",
+ pc->functions[func_selector].name,
+ pc->desc.pins[group_selector].name);
+ return -EINVAL;
+ }
+
+ value = readl_relaxed(pc->base + EIC7700_PIN_REG(group_selector));
+ value &= ~EIC7700_FUNC_SEL;
+ value |= FIELD_PREP(EIC7700_FUNC_SEL, fs);
+ writel_relaxed(value, pc->base + EIC7700_PIN_REG(group_selector));
+
+ return 0;
+}
+
+static int eic7700_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned int offset)
+{
+ return eic7700_set_mux(pctldev, F_GPIO, offset);
+}
+
+static void eic7700_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned int offset)
+{
+ eic7700_set_mux(pctldev, F_DISABLED, offset);
+}
+
+static int eic7700_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned int offset,
+ bool input)
+{
+ struct eic7700_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ u32 value;
+
+ value = readl_relaxed(pc->base + EIC7700_PIN_REG(offset));
+ if (input)
+ value |= EIC7700_IE;
+ else
+ value &= ~EIC7700_IE;
+ writel_relaxed(value, pc->base + EIC7700_PIN_REG(offset));
+
+ return 0;
+}
+
+static const struct pinmux_ops eic7700_pinmux_ops = {
+ .get_functions_count = eic7700_get_functions_count,
+ .get_function_name = eic7700_get_function_name,
+ .get_function_groups = eic7700_get_function_groups,
+ .set_mux = eic7700_set_mux,
+ .gpio_request_enable = eic7700_gpio_request_enable,
+ .gpio_disable_free = eic7700_gpio_disable_free,
+ .gpio_set_direction = eic7700_gpio_set_direction,
+ .strict = true,
+};
+
+static int eic7700_pinctrl_init_function_groups(struct device *dev, struct eic7700_pinctrl *pc,
+ const char *const *function_names)
+{
+ unsigned int ngroups = 0;
+ const char **groups;
+
+ /* Count the number of groups for each function */
+ for (unsigned int pin = 0; pin < pc->desc.npins; pin++) {
+ const struct eic7700_pin *pin_data = pc->desc.pins[pin].drv_data;
+ bool found_disabled = false;
+
+ for (unsigned int fs = 0; fs < EIC7700_FUNCTIONS_PER_PIN; fs++) {
+ unsigned int selector = pin_data->functions[fs];
+ struct pinfunction *function = &pc->functions[selector];
+
+ /* Only count F_DISABLED once per pin */
+ if (selector == F_DISABLED) {
+ if (found_disabled)
+ continue;
+ found_disabled = true;
+ }
+
+ function->ngroups++;
+ ngroups++;
+ }
+ }
+
+ groups = devm_kcalloc(dev, ngroups, sizeof(*groups), GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (unsigned int selector = 0; selector < pc->functions_count; selector++) {
+ struct pinfunction *function = &pc->functions[selector];
+
+ function->name = function_names[selector];
+ function->groups = groups;
+ groups += function->ngroups;
+
+ /* Reset per-function ngroups for use as iterator below */
+ function->ngroups = 0;
+ }
+
+ /* Fill in the group pointers for each function */
+ for (unsigned int pin = 0; pin < pc->desc.npins; pin++) {
+ const struct pinctrl_pin_desc *desc = &pc->desc.pins[pin];
+ const struct eic7700_pin *pin_data = desc->drv_data;
+ bool found_disabled = false;
+
+ for (unsigned int fs = 0; fs < EIC7700_FUNCTIONS_PER_PIN; fs++) {
+ unsigned int selector = pin_data->functions[fs];
+ struct pinfunction *function = &pc->functions[selector];
+
+ /* Only count F_DISABLED once per pin */
+ if (selector == F_DISABLED) {
+ if (found_disabled)
+ continue;
+ found_disabled = true;
+ }
+
+ ((const char **)function->groups)[function->ngroups++] = desc->name;
+ }
+ }
+
+ return 0;
+}
+
+static int eic7700_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pinctrl_dev *pctldev;
+ struct eic7700_pinctrl *pc;
+ struct regulator *regulator;
+ u32 rgmii0_mode, rgmii1_mode;
+ int ret, voltage;
+
+ pc = devm_kzalloc(dev, struct_size(pc, functions, EIC7700_FUNCTIONS_COUNT), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ pc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pc->base))
+ return PTR_ERR(pc->base);
+
+ regulator = devm_regulator_get(dev, "vrgmii");
+ if (IS_ERR_OR_NULL(regulator)) {
+ return dev_err_probe(dev, PTR_ERR(regulator),
+ "failed to get vrgmii regulator\n");
+ }
+
+ voltage = regulator_get_voltage(regulator);
+ if (voltage < 0) {
+ return dev_err_probe(&pdev->dev, voltage,
+ "Failed to get voltage from regulator\n");
+ }
+
+ rgmii0_mode = readl_relaxed(pc->base + EIC7700_RGMII0_SEL_MODE);
+ rgmii1_mode = readl_relaxed(pc->base + EIC7700_RGMII1_SEL_MODE);
+ rgmii0_mode &= ~EIC7700_MS;
+ rgmii1_mode &= ~EIC7700_MS;
+ if (voltage == 1800000) {
+ rgmii0_mode |= FIELD_PREP(EIC7700_MS, EIC7700_MS_1V8);
+ rgmii1_mode |= FIELD_PREP(EIC7700_MS, EIC7700_MS_1V8);
+ } else if (voltage == 3300000) {
+ rgmii0_mode |= FIELD_PREP(EIC7700_MS, EIC7700_MS_3V3);
+ rgmii1_mode |= FIELD_PREP(EIC7700_MS, EIC7700_MS_3V3);
+ } else {
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Invalid voltage configuration, should be either 1.8V or 3.3V\n");
+ }
+
+ writel_relaxed(rgmii0_mode, pc->base + EIC7700_RGMII0_SEL_MODE);
+ writel_relaxed(rgmii1_mode, pc->base + EIC7700_RGMII1_SEL_MODE);
+
+ pc->desc.name = dev_name(dev);
+ pc->desc.pins = eic7700_pins;
+ pc->desc.npins = ARRAY_SIZE(eic7700_pins);
+ pc->desc.pctlops = &eic7700_pinctrl_ops;
+ pc->desc.pmxops = &eic7700_pinmux_ops;
+ pc->desc.confops = &eic7700_pinconf_ops;
+ pc->desc.owner = THIS_MODULE;
+
+ pc->functions_count = EIC7700_FUNCTIONS_COUNT;
+ ret = eic7700_pinctrl_init_function_groups(dev, pc, eic7700_functions);
+ if (ret)
+ return ret;
+
+ ret = devm_pinctrl_register_and_init(dev, &pc->desc, pc, &pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not register pinctrl driver\n");
+
+ return pinctrl_enable(pctldev);
+}
+
+static const struct of_device_id eic7700_pinctrl_of_match[] = {
+ { .compatible = "eswin,eic7700-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, eic7700_pinctrl_of_match);
+
+static struct platform_driver eic7700_pinctrl_driver = {
+ .probe = eic7700_pinctrl_probe,
+ .driver = {
+ .name = "pinctrl-eic7700",
+ .of_match_table = eic7700_pinctrl_of_match,
+ },
+};
+module_platform_driver(eic7700_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the ESWIN EIC7700 SoC");
+MODULE_AUTHOR("Samuel Holland <samuel.holland@sifive.com>");
+MODULE_AUTHOR("Yulin Lu <luyulin@eswincomputing.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index 3a9a0f059090..fce804d42e7d 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -182,6 +182,8 @@ static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
gc = &gctrl->chip;
gc->label = gctrl->name;
gc->fwnode = gctrl->fwnode;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
if (!fwnode_property_read_bool(gctrl->fwnode, "interrupt-controller")) {
dev_dbg(dev, "gc %s: doesn't act as interrupt controller!\n",
@@ -685,11 +687,8 @@ static int eqbr_build_functions(struct eqbr_pinctrl_drv_data *drvdata)
if (funcs[i].name == NULL)
continue;
- ret = pinmux_generic_add_function(drvdata->pctl_dev,
- funcs[i].name,
- funcs[i].groups,
- funcs[i].ngroups,
- drvdata);
+ ret = pinmux_generic_add_pinfunction(drvdata->pctl_dev,
+ &funcs[i], drvdata);
if (ret < 0) {
dev_err(dev, "Failed to register function %s\n",
funcs[i].name);
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index 0bf9ffbcc79f..100eed175c0d 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -505,7 +505,7 @@ static struct platform_driver pinctrl_falcon_driver = {
},
};
-int __init pinctrl_falcon_init(void)
+static int __init pinctrl_falcon_init(void)
{
return platform_driver_register(&pinctrl_falcon_driver);
}
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 3c660471ec69..2900513467fa 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -4451,7 +4451,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.fwnode = fwnode;
jzgc->gc.owner = THIS_MODULE;
- jzgc->gc.set_rv = ingenic_gpio_set;
+ jzgc->gc.set = ingenic_gpio_set;
jzgc->gc.get = ingenic_gpio_get;
jzgc->gc.direction_input = pinctrl_gpio_direction_input;
jzgc->gc.direction_output = ingenic_gpio_direction_output;
@@ -4574,9 +4574,8 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
const struct function_desc *function = &chip_info->functions[i];
const struct pinfunction *func = &function->func;
- err = pinmux_generic_add_function(jzpc->pctl, func->name,
- func->groups, func->ngroups,
- function->data);
+ err = pinmux_generic_add_pinfunction(jzpc->pctl, func,
+ function->data);
if (err < 0) {
dev_err(dev, "Failed to register function %s\n", func->name);
return err;
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index eddb01796a83..66c04120c29d 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -879,7 +879,7 @@ static const struct pinctrl_ops k210_pinctrl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-static struct pinctrl_desc k210_pinctrl_desc = {
+static const struct pinctrl_desc k210_pinctrl_desc = {
.name = "k210-pinctrl",
.pins = k210_pins,
.npins = K210_NPINS,
diff --git a/drivers/pinctrl/pinctrl-k230.c b/drivers/pinctrl/pinctrl-k230.c
index a9b4627b46b0..d716f23d837f 100644
--- a/drivers/pinctrl/pinctrl-k230.c
+++ b/drivers/pinctrl/pinctrl-k230.c
@@ -477,6 +477,10 @@ static int k230_pinctrl_parse_groups(struct device_node *np,
grp->name = np->name;
list = of_get_property(np, "pinmux", &size);
+ if (!list) {
+ dev_err(dev, "failed to get pinmux property\n");
+ return -EINVAL;
+ }
size /= sizeof(*list);
grp->num_pins = size;
@@ -586,6 +590,7 @@ static int k230_pinctrl_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct k230_pinctrl *info;
struct pinctrl_desc *pctl;
+ int ret;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -611,19 +616,21 @@ static int k230_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(info->regmap_base),
"failed to init regmap\n");
+ ret = k230_pinctrl_parse_dt(pdev, info);
+ if (ret)
+ return ret;
+
info->pctl_dev = devm_pinctrl_register(dev, pctl, info);
if (IS_ERR(info->pctl_dev))
return dev_err_probe(dev, PTR_ERR(info->pctl_dev),
"devm_pinctrl_register failed\n");
- k230_pinctrl_parse_dt(pdev, info);
-
return 0;
}
static const struct of_device_id k230_dt_ids[] = {
{ .compatible = "canaan,k230-pinctrl", },
- { /* sintenel */ }
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, k230_dt_ids);
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
index 0d7cc8280ea2..60cf017498b3 100644
--- a/drivers/pinctrl/pinctrl-keembay.c
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -1188,7 +1188,7 @@ static int keembay_gpio_get(struct gpio_chip *gc, unsigned int pin)
return keembay_read_pin(kpc->base0 + offset, pin);
}
-static void keembay_gpio_set(struct gpio_chip *gc, unsigned int pin, int val)
+static int keembay_gpio_set(struct gpio_chip *gc, unsigned int pin, int val)
{
struct keembay_pinctrl *kpc = gpiochip_get_data(gc);
unsigned int reg_val;
@@ -1200,6 +1200,8 @@ static void keembay_gpio_set(struct gpio_chip *gc, unsigned int pin, int val)
else
keembay_write_gpio_reg(~reg_val | BIT(pin % KEEMBAY_GPIO_MAX_PER_REG),
kpc->base0 + KEEMBAY_GPIO_DATA_LOW, pin);
+
+ return 0;
}
static int keembay_gpio_get_direction(struct gpio_chip *gc, unsigned int pin)
@@ -1231,9 +1233,8 @@ static int keembay_gpio_set_direction_out(struct gpio_chip *gc,
val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
val &= ~KEEMBAY_GPIO_MODE_DIR;
keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
- keembay_gpio_set(gc, pin, value);
- return 0;
+ return keembay_gpio_set(gc, pin, value);
}
static void keembay_gpio_irq_handler(struct irq_desc *desc)
@@ -1585,13 +1586,9 @@ static int keembay_add_functions(struct keembay_pinctrl *kpc,
}
/* Add all functions */
- for (i = 0; i < kpc->nfuncs; i++) {
- pinmux_generic_add_function(kpc->pctrl,
- functions[i].func.name,
- functions[i].func.groups,
- functions[i].func.ngroups,
- functions[i].data);
- }
+ for (i = 0; i < kpc->nfuncs; i++)
+ pinmux_generic_add_pinfunction(kpc->pctrl, &functions[i].func,
+ functions[i].data);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 0f5a7bed2f81..5e0201768323 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -1257,7 +1257,7 @@ static const struct pinctrl_ops lpc18xx_pctl_ops = {
.dt_free_map = pinctrl_utils_free_map,
};
-static struct pinctrl_desc lpc18xx_scu_desc = {
+static const struct pinctrl_desc lpc18xx_scu_desc = {
.name = "lpc18xx/43xx-scu",
.pins = lpc18xx_pins,
.npins = ARRAY_SIZE(lpc18xx_pins),
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index d236daa7c13e..acb945a25743 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -543,6 +543,10 @@ static struct pinctrl_desc max77620_pinctrl_desc = {
.pctlops = &max77620_pinctrl_ops,
.pmxops = &max77620_pinmux_ops,
.confops = &max77620_pinconf_ops,
+ .pins = max77620_pins_desc,
+ .npins = ARRAY_SIZE(max77620_pins_desc),
+ .num_custom_params = ARRAY_SIZE(max77620_cfg_params),
+ .custom_params = max77620_cfg_params,
};
static int max77620_pinctrl_probe(struct platform_device *pdev)
@@ -569,11 +573,6 @@ static int max77620_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mpci);
max77620_pinctrl_desc.name = dev_name(&pdev->dev);
- max77620_pinctrl_desc.pins = max77620_pins_desc;
- max77620_pinctrl_desc.npins = ARRAY_SIZE(max77620_pins_desc);
- max77620_pinctrl_desc.num_custom_params =
- ARRAY_SIZE(max77620_cfg_params);
- max77620_pinctrl_desc.custom_params = max77620_cfg_params;
for (i = 0; i < MAX77620_PIN_NUM; ++i) {
mpci->fps_config[i].active_fps_src = -1;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index c2f4b16f42d2..a17fcaddf490 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -341,24 +341,30 @@ static int __mcp23s08_set(struct mcp23s08 *mcp, unsigned mask, bool value)
return mcp_update_bits(mcp, MCP_OLAT, mask, value ? mask : 0);
}
-static void mcp23s08_set(struct gpio_chip *chip, unsigned offset, int value)
+static int mcp23s08_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct mcp23s08 *mcp = gpiochip_get_data(chip);
unsigned mask = BIT(offset);
+ int ret;
mutex_lock(&mcp->lock);
- __mcp23s08_set(mcp, mask, !!value);
+ ret = __mcp23s08_set(mcp, mask, !!value);
mutex_unlock(&mcp->lock);
+
+ return ret;
}
-static void mcp23s08_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int mcp23s08_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
struct mcp23s08 *mcp = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&mcp->lock);
- mcp_update_bits(mcp, MCP_OLAT, *mask, *bits);
+ ret = mcp_update_bits(mcp, MCP_OLAT, *mask, *bits);
mutex_unlock(&mcp->lock);
+
+ return ret;
}
static int
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 88c2f14cfc6b..6191e5c13815 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -858,7 +858,7 @@ static int microchip_sgpio_register_bank(struct device *dev,
gc->direction_input = microchip_sgpio_direction_input;
gc->direction_output = microchip_sgpio_direction_output;
gc->get = microchip_sgpio_get_value;
- gc->set_rv = microchip_sgpio_set_value;
+ gc->set = microchip_sgpio_set_value;
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->of_xlate = microchip_sgpio_of_xlate;
diff --git a/drivers/pinctrl/pinctrl-mlxbf3.c b/drivers/pinctrl/pinctrl-mlxbf3.c
index ffb5dda364dc..fcd9d46de89f 100644
--- a/drivers/pinctrl/pinctrl-mlxbf3.c
+++ b/drivers/pinctrl/pinctrl-mlxbf3.c
@@ -231,7 +231,7 @@ static const struct pinmux_ops mlxbf3_pmx_ops = {
.gpio_request_enable = mlxbf3_gpio_request_enable,
};
-static struct pinctrl_desc mlxbf3_pin_desc = {
+static const struct pinctrl_desc mlxbf3_pin_desc = {
.name = "pinctrl-mlxbf3",
.pins = mlxbf3_pins,
.npins = ARRAY_SIZE(mlxbf3_pins),
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index fbb3d43746bb..b82bf83fed25 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1997,7 +1997,7 @@ static int ocelot_gpio_direction_output(struct gpio_chip *chip,
static const struct gpio_chip ocelot_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = ocelot_gpio_set,
+ .set = ocelot_gpio_set,
.get = ocelot_gpio_get,
.get_direction = ocelot_gpio_get_direction,
.direction_input = pinctrl_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 9e272f9deb4f..d69f114e4642 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -956,6 +956,8 @@ static struct pinctrl_desc palmas_pinctrl_desc = {
.pmxops = &palmas_pinmux_ops,
.confops = &palmas_pinconf_ops,
.owner = THIS_MODULE,
+ .pins = palmas_pins_desc,
+ .npins = ARRAY_SIZE(palmas_pins_desc),
};
struct palmas_pinctrl_data {
@@ -1023,8 +1025,6 @@ static int palmas_pinctrl_probe(struct platform_device *pdev)
}
palmas_pinctrl_desc.name = dev_name(&pdev->dev);
- palmas_pinctrl_desc.pins = palmas_pins_desc;
- palmas_pinctrl_desc.npins = ARRAY_SIZE(palmas_pins_desc);
pci->pctl = devm_pinctrl_register(&pdev->dev, &palmas_pinctrl_desc,
pci);
if (IS_ERR(pci->pctl)) {
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index bf827ab081a1..37c2bf752154 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -1828,8 +1828,8 @@ static int pic32_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(readl(bank->reg_base + PORT_REG) & BIT(offset));
}
-static void pic32_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int pic32_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
u32 mask = BIT(offset);
@@ -1838,6 +1838,8 @@ static void pic32_gpio_set(struct gpio_chip *chip, unsigned offset,
writel(mask, bank->reg_base + PIC32_SET(PORT_REG));
else
writel(mask, bank->reg_base + PIC32_CLR(PORT_REG));
+
+ return 0;
}
static int pic32_gpio_direction_output(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index e7bf60960961..0b33b01dbaad 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1156,11 +1156,14 @@ static const struct pinconf_ops pistachio_pinconf_ops = {
.is_generic = true,
};
-static struct pinctrl_desc pistachio_pinctrl_desc = {
+static const struct pinctrl_desc pistachio_pinctrl_desc = {
.name = "pistachio-pinctrl",
.pctlops = &pistachio_pinctrl_ops,
.pmxops = &pistachio_pinmux_ops,
.confops = &pistachio_pinconf_ops,
+ .pins = pistachio_pins,
+ .npins = ARRAY_SIZE(pistachio_pins),
+
};
static int pistachio_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -1328,7 +1331,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
.direction_input = pistachio_gpio_direction_input, \
.direction_output = pistachio_gpio_direction_output, \
.get = pistachio_gpio_get, \
- .set_rv = pistachio_gpio_set, \
+ .set = pistachio_gpio_set, \
.base = _pin_base, \
.ngpio = _npins, \
}, \
@@ -1474,9 +1477,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_banks = pistachio_gpio_banks;
pctl->nbanks = ARRAY_SIZE(pistachio_gpio_banks);
- pistachio_pinctrl_desc.pins = pctl->pins;
- pistachio_pinctrl_desc.npins = pctl->npins;
-
pctl->pctldev = devm_pinctrl_register(&pdev->dev, &pistachio_pinctrl_desc,
pctl);
if (IS_ERR(pctl->pctldev)) {
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index fc0e330b1d11..3acf770316c1 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -378,7 +378,7 @@ static const struct gpio_chip rk805_gpio_chip = {
.free = gpiochip_generic_free,
.get_direction = rk805_gpio_get_direction,
.get = rk805_gpio_get,
- .set_rv = rk805_gpio_set,
+ .set = rk805_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = rk805_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/pinctrl/pinctrl-rp1.c b/drivers/pinctrl/pinctrl-rp1.c
index 6080b57a5d87..dadafc935dbb 100644
--- a/drivers/pinctrl/pinctrl-rp1.c
+++ b/drivers/pinctrl/pinctrl-rp1.c
@@ -851,7 +851,7 @@ static const struct gpio_chip rp1_gpio_chip = {
.direction_output = rp1_gpio_direction_output,
.get_direction = rp1_gpio_get_direction,
.get = rp1_gpio_get,
- .set_rv = rp1_gpio_set,
+ .set = rp1_gpio_set,
.base = -1,
.set_config = rp1_gpio_set_config,
.ngpio = RP1_NUM_GPIOS,
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 8a2ef74862d3..d3cea3437d7f 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -706,10 +706,12 @@ static int st_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(readl(bank->base + REG_PIO_PIN) & BIT(offset));
}
-static void st_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int st_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct st_gpio_bank *bank = gpiochip_get_data(chip);
__st_gpio_set(bank, offset, value);
+
+ return 0;
}
static int st_gpio_direction_output(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index f4fdcaa043e6..c89b99003b71 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -697,7 +697,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_chip.direction_input = stmfx_gpio_direction_input;
pctl->gpio_chip.direction_output = stmfx_gpio_direction_output;
pctl->gpio_chip.get = stmfx_gpio_get;
- pctl->gpio_chip.set_rv = stmfx_gpio_set;
+ pctl->gpio_chip.set = stmfx_gpio_set;
pctl->gpio_chip.set_config = gpiochip_generic_config;
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index d3a12c1c0de2..53cf8168b274 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1176,7 +1176,7 @@ static int sx150x_probe(struct i2c_client *client)
pctl->gpio.direction_input = sx150x_gpio_direction_input;
pctl->gpio.direction_output = sx150x_gpio_direction_output;
pctl->gpio.get = sx150x_gpio_get;
- pctl->gpio.set_rv = sx150x_gpio_set;
+ pctl->gpio.set = sx150x_gpio_set;
pctl->gpio.set_config = gpiochip_generic_config;
pctl->gpio.parent = dev;
pctl->gpio.can_sleep = true;
@@ -1191,7 +1191,7 @@ static int sx150x_probe(struct i2c_client *client)
* would require locking that is not in place at this time.
*/
if (pctl->data->model != SX150X_789)
- pctl->gpio.set_multiple_rv = sx150x_gpio_set_multiple;
+ pctl->gpio.set_multiple = sx150x_gpio_set_multiple;
/* Add Interrupt support if an irq is specified */
if (client->irq > 0) {
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 4edb20e61951..129fa51d13b1 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -735,7 +735,7 @@ static const struct pinmux_ops tb10x_pinmux_ops = {
.set_mux = tb10x_pctl_set_mux,
};
-static struct pinctrl_desc tb10x_pindesc = {
+static const struct pinctrl_desc tb10x_pindesc = {
.name = "TB10x",
.pins = tb10x_pins,
.npins = ARRAY_SIZE(tb10x_pins),
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 48f8aabf3bfa..3d4ad61d0da9 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1228,10 +1228,10 @@ static int xway_pinconf_set(struct pinctrl_dev *pctldev,
return 0;
}
-int xway_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned selector,
- unsigned long *configs,
- unsigned num_configs)
+static int xway_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
{
struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
int i, ret = 0;
@@ -1293,7 +1293,7 @@ static struct ltq_pinmux_info xway_info = {
};
/* --------- gpio_chip related code --------- */
-static void xway_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+static int xway_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
{
struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
@@ -1301,6 +1301,8 @@ static void xway_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
gpio_setbit(info->membase[0], GPIO_OUT(pin), PORT_PIN(pin));
else
gpio_clearbit(info->membase[0], GPIO_OUT(pin), PORT_PIN(pin));
+
+ return 0;
}
static int xway_gpio_get(struct gpio_chip *chip, unsigned int pin)
@@ -1328,9 +1330,7 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
else
gpio_setbit(info->membase[0], GPIO_OD(pin), PORT_PIN(pin));
gpio_setbit(info->membase[0], GPIO_DIR(pin), PORT_PIN(pin));
- xway_gpio_set(chip, pin, val);
-
- return 0;
+ return xway_gpio_set(chip, pin, val);
}
/*
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index caa8a2ca3e68..dcde86fed10d 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -1143,7 +1143,7 @@ static const struct pinconf_ops zynq_pinconf_ops = {
.pin_config_group_set = zynq_pinconf_group_set,
};
-static struct pinctrl_desc zynq_desc = {
+static const struct pinctrl_desc zynq_desc = {
.name = "zynq_pinctrl",
.pins = zynq_pins,
.npins = ARRAY_SIZE(zynq_pins),
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 0743190da59e..79814758a084 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -236,18 +236,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
if (desc->mux_usecount)
return NULL;
}
- }
-
- /*
- * If there is no kind of request function for the pin we just assume
- * we got it by default and proceed.
- */
- if (gpio_range && ops->gpio_disable_free)
- ops->gpio_disable_free(pctldev, gpio_range, pin);
- else if (ops->free)
- ops->free(pctldev, pin);
- scoped_guard(mutex, &desc->mux_lock) {
if (gpio_range) {
owner = desc->gpio_owner;
desc->gpio_owner = NULL;
@@ -258,6 +247,15 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
}
}
+ /*
+ * If there is no kind of request function for the pin we just assume
+ * we got it by default and proceed.
+ */
+ if (gpio_range && ops->gpio_disable_free)
+ ops->gpio_disable_free(pctldev, gpio_range, pin);
+ else if (ops->free)
+ ops->free(pctldev, pin);
+
module_put(pctldev->owner);
return owner;
@@ -877,13 +875,25 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const unsigned int ngroups,
void *data)
{
+ struct pinfunction func = PINCTRL_PINFUNCTION(name, groups, ngroups);
+
+ return pinmux_generic_add_pinfunction(pctldev, &func, data);
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_add_function);
+
+/**
+ * pinmux_generic_add_pinfunction() - adds a function group
+ * @pctldev: pin controller device
+ * @func: pinfunction structure describing the function group
+ * @data: pin controller driver specific data
+ */
+int pinmux_generic_add_pinfunction(struct pinctrl_dev *pctldev,
+ const struct pinfunction *func, void *data)
+{
struct function_desc *function;
int selector, error;
- if (!name)
- return -EINVAL;
-
- selector = pinmux_func_name_to_selector(pctldev, name);
+ selector = pinmux_func_name_to_selector(pctldev, func->name);
if (selector >= 0)
return selector;
@@ -893,7 +903,8 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
if (!function)
return -ENOMEM;
- *function = PINCTRL_FUNCTION_DESC(name, groups, ngroups, data);
+ function->func = *func;
+ function->data = data;
error = radix_tree_insert(&pctldev->pin_function_tree, selector, function);
if (error)
@@ -903,7 +914,7 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
return selector;
}
-EXPORT_SYMBOL_GPL(pinmux_generic_add_function);
+EXPORT_SYMBOL_GPL(pinmux_generic_add_pinfunction);
/**
* pinmux_generic_remove_function() - removes a numbered function
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index 2965ec20b77f..bdb5be1a636e 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -141,13 +141,6 @@ struct function_desc {
void *data;
};
-/* Convenient macro to define a generic pin function descriptor */
-#define PINCTRL_FUNCTION_DESC(_name, _grps, _num_grps, _data) \
-(struct function_desc) { \
- .func = PINCTRL_PINFUNCTION(_name, _grps, _num_grps), \
- .data = _data, \
-}
-
int pinmux_generic_get_function_count(struct pinctrl_dev *pctldev);
const char *
@@ -168,6 +161,9 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
unsigned int const ngroups,
void *data);
+int pinmux_generic_add_pinfunction(struct pinctrl_dev *pctldev,
+ const struct pinfunction *func, void *data);
+
int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
unsigned int selector);
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 0bb44c9a4c06..6dad942b00a3 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -371,6 +371,14 @@ config PINCTRL_SM7150
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM7150 platform.
+config PINCTRL_MILOS
+ tristate "Qualcomm Technologies Inc Milos pin controller driver"
+ depends on ARM64 || COMPILE_TEST
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc Milos platform.
+
config PINCTRL_SM8150
tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 954f5291cc37..2acff520a285 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_PINCTRL_QCS8300) += pinctrl-qcs8300.o
obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o
obj-$(CONFIG_PINCTRL_MDM9607) += pinctrl-mdm9607.o
obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o
+obj-$(CONFIG_PINCTRL_MILOS) += pinctrl-milos.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 57fefeb603f0..54c77e0b96e9 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -398,7 +398,7 @@ static const struct gpio_chip lpi_gpio_template = {
.direction_input = lpi_gpio_direction_input,
.direction_output = lpi_gpio_direction_output,
.get = lpi_gpio_get,
- .set_rv = lpi_gpio_set,
+ .set = lpi_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = lpi_gpio_dbg_show,
diff --git a/drivers/pinctrl/qcom/pinctrl-milos.c b/drivers/pinctrl/qcom/pinctrl-milos.c
new file mode 100644
index 000000000000..d11a7bbcd733
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-milos.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9, \
+ msm_mux_##f10, \
+ msm_mux_##f11 /* egpio mode */ \
+ }, \
+ .nfuncs = 12, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .i2c_pull_bit = 13, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 8, \
+ .intr_wakeup_enable_bit = 7, \
+ .intr_wakeup_present_bit = 6, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, ctl, io) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = io, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc milos_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "UFS_RESET"),
+ PINCTRL_PIN(168, "SDC2_CLK"),
+ PINCTRL_PIN(169, "SDC2_CMD"),
+ PINCTRL_PIN(170, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+
+static const unsigned int ufs_reset_pins[] = { 167 };
+static const unsigned int sdc2_clk_pins[] = { 168 };
+static const unsigned int sdc2_cmd_pins[] = { 169 };
+static const unsigned int sdc2_data_pins[] = { 170 };
+
+enum milos_functions {
+ msm_mux_gpio,
+ msm_mux_aoss_cti,
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_audio_ext_mclk0,
+ msm_mux_audio_ext_mclk1,
+ msm_mux_audio_ref_clk,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async_in0,
+ msm_mux_cci_i2c_scl,
+ msm_mux_cci_i2c_sda,
+ msm_mux_cci_timer,
+ msm_mux_coex_uart1_rx,
+ msm_mux_coex_uart1_tx,
+ msm_mux_dbg_out_clk,
+ msm_mux_ddr_bist_complete,
+ msm_mux_ddr_bist_fail,
+ msm_mux_ddr_bist_start,
+ msm_mux_ddr_bist_stop,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_dp0_hot,
+ msm_mux_egpio,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_host2wlan_sol,
+ msm_mux_i2s0_data0,
+ msm_mux_i2s0_data1,
+ msm_mux_i2s0_sck,
+ msm_mux_i2s0_ws,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0_out,
+ msm_mux_mdp_vsync1_out,
+ msm_mux_mdp_vsync2_out,
+ msm_mux_mdp_vsync3_out,
+ msm_mux_mdp_vsync_e,
+ msm_mux_nav_gpio0,
+ msm_mux_nav_gpio1,
+ msm_mux_nav_gpio2,
+ msm_mux_pcie0_clk_req_n,
+ msm_mux_pcie1_clk_req_n,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist_sync,
+ msm_mux_pll_clk_aux,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qspi0,
+ msm_mux_qup0_se0,
+ msm_mux_qup0_se1,
+ msm_mux_qup0_se2,
+ msm_mux_qup0_se3,
+ msm_mux_qup0_se4,
+ msm_mux_qup0_se5,
+ msm_mux_qup0_se6,
+ msm_mux_qup1_se0,
+ msm_mux_qup1_se1,
+ msm_mux_qup1_se2,
+ msm_mux_qup1_se3,
+ msm_mux_qup1_se4,
+ msm_mux_qup1_se5,
+ msm_mux_qup1_se6,
+ msm_mux_resout_gpio_n,
+ msm_mux_sd_write_protect,
+ msm_mux_sdc1_clk,
+ msm_mux_sdc1_cmd,
+ msm_mux_sdc1_data,
+ msm_mux_sdc1_rclk,
+ msm_mux_sdc2_clk,
+ msm_mux_sdc2_cmd,
+ msm_mux_sdc2_data,
+ msm_mux_sdc2_fb_clk,
+ msm_mux_tb_trig_sdc1,
+ msm_mux_tb_trig_sdc2,
+ msm_mux_tgu_ch0_trigout,
+ msm_mux_tgu_ch1_trigout,
+ msm_mux_tmess_prng0,
+ msm_mux_tmess_prng1,
+ msm_mux_tmess_prng2,
+ msm_mux_tmess_prng3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim0_clk,
+ msm_mux_uim0_data,
+ msm_mux_uim0_present,
+ msm_mux_uim0_reset,
+ msm_mux_uim1_clk_mira,
+ msm_mux_uim1_clk_mirb,
+ msm_mux_uim1_data_mira,
+ msm_mux_uim1_data_mirb,
+ msm_mux_uim1_present_mira,
+ msm_mux_uim1_present_mirb,
+ msm_mux_uim1_reset_mira,
+ msm_mux_uim1_reset_mirb,
+ msm_mux_usb0_hs,
+ msm_mux_usb0_phy_ps,
+ msm_mux_vfr_0,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger_mirnat,
+ msm_mux_wcn_sw,
+ msm_mux_wcn_sw_ctrl,
+ msm_mux__,
+};
+
+static const char *const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5",
+ "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15", "gpio16", "gpio17",
+ "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23",
+ "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47",
+ "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53",
+ "gpio54", "gpio55", "gpio56", "gpio57", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65",
+ "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71",
+ "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
+ "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95",
+ "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101",
+ "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113",
+ "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119",
+ "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
+ "gpio126", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131",
+ "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137",
+ "gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143",
+ "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+ "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155",
+ "gpio156", "gpio157", "gpio158", "gpio159", "gpio160", "gpio161",
+ "gpio162", "gpio163", "gpio164", "gpio165", "gpio166",
+};
+static const char *const resout_gpio_n_groups[] = {
+ "gpio39",
+};
+static const char *const sdc1_clk_groups[] = {
+ "gpio77",
+};
+static const char *const sdc1_cmd_groups[] = {
+ "gpio78",
+};
+static const char *const sdc1_data_groups[] = {
+ "gpio73", "gpio74", "gpio75", "gpio76", "gpio79", "gpio80",
+ "gpio81", "gpio82",
+};
+static const char *const sdc1_rclk_groups[] = {
+ "gpio72",
+};
+static const char *const aoss_cti_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio4",
+ "gpio5",
+};
+static const char *const atest_char_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47", "gpio63",
+};
+static const char *const atest_usb_groups[] = {
+ "gpio23", "gpio24", "gpio60",
+};
+static const char *const audio_ext_mclk0_groups[] = {
+ "gpio23",
+};
+static const char *const audio_ext_mclk1_groups[] = {
+ "gpio24",
+};
+static const char *const audio_ref_clk_groups[] = {
+ "gpio24",
+};
+static const char *const cam_mclk_groups[] = {
+ "gpio83", "gpio84", "gpio85", "gpio86", "gpio87",
+};
+static const char *const cci_async_in0_groups[] = {
+ "gpio86",
+};
+static const char *const cci_i2c_scl_groups[] = {
+ "gpio89", "gpio91", "gpio93", "gpio95",
+};
+static const char *const cci_i2c_sda_groups[] = {
+ "gpio88", "gpio90", "gpio92", "gpio94",
+};
+static const char *const cci_timer_groups[] = {
+ "gpio77", "gpio83", "gpio84", "gpio85",
+};
+static const char *const coex_uart1_rx_groups[] = {
+ "gpio64",
+};
+static const char *const coex_uart1_tx_groups[] = {
+ "gpio63",
+};
+static const char *const dbg_out_clk_groups[] = {
+ "gpio24",
+};
+static const char *const ddr_bist_complete_groups[] = {
+ "gpio137",
+};
+static const char *const ddr_bist_fail_groups[] = {
+ "gpio56",
+};
+static const char *const ddr_bist_start_groups[] = {
+ "gpio133",
+};
+static const char *const ddr_bist_stop_groups[] = {
+ "gpio47",
+};
+static const char *const ddr_pxi0_groups[] = {
+ "gpio23",
+ "gpio24",
+};
+static const char *const ddr_pxi1_groups[] = {
+ "gpio50",
+ "gpio51",
+};
+static const char *const dp0_hot_groups[] = {
+ "gpio75",
+};
+static const char *const egpio_groups[] = {
+ "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137",
+ "gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143",
+ "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+ "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155",
+ "gpio156", "gpio157", "gpio158", "gpio159", "gpio160", "gpio161",
+ "gpio162", "gpio163", "gpio164", "gpio165", "gpio166",
+};
+static const char *const gcc_gp1_groups[] = {
+ "gpio29",
+ "gpio32",
+};
+static const char *const gcc_gp2_groups[] = {
+ "gpio28",
+ "gpio30",
+};
+static const char *const gcc_gp3_groups[] = {
+ "gpio31",
+ "gpio33",
+};
+static const char *const host2wlan_sol_groups[] = {
+ "gpio46",
+};
+static const char *const i2s0_data0_groups[] = {
+ "gpio16",
+};
+static const char *const i2s0_data1_groups[] = {
+ "gpio17",
+};
+static const char *const i2s0_sck_groups[] = {
+ "gpio15",
+};
+static const char *const i2s0_ws_groups[] = {
+ "gpio18",
+};
+static const char *const ibi_i3c_groups[] = {
+ "gpio0", "gpio1", "gpio4", "gpio5",
+ "gpio32", "gpio33", "gpio36", "gpio37",
+};
+static const char *const jitter_bist_groups[] = {
+ "gpio141",
+};
+static const char *const mdp_vsync_groups[] = {
+ "gpio19",
+ "gpio37",
+ "gpio72",
+ "gpio129",
+};
+static const char *const mdp_vsync0_out_groups[] = {
+ "gpio12",
+};
+static const char *const mdp_vsync1_out_groups[] = {
+ "gpio12",
+};
+static const char *const mdp_vsync2_out_groups[] = {
+ "gpio40",
+};
+static const char *const mdp_vsync3_out_groups[] = {
+ "gpio40",
+};
+static const char *const mdp_vsync_e_groups[] = {
+ "gpio45",
+};
+static const char *const nav_gpio0_groups[] = {
+ "gpio124",
+};
+static const char *const nav_gpio1_groups[] = {
+ "gpio125",
+};
+static const char *const nav_gpio2_groups[] = {
+ "gpio126",
+};
+static const char *const pcie0_clk_req_n_groups[] = {
+ "gpio67",
+};
+static const char *const pcie1_clk_req_n_groups[] = {
+ "gpio70",
+};
+static const char *const phase_flag_groups[] = {
+ "gpio8", "gpio9", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio18", "gpio26", "gpio38", "gpio39",
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio44", "gpio45",
+ "gpio46", "gpio47", "gpio48", "gpio49", "gpio63", "gpio64",
+ "gpio127", "gpio138", "gpio139", "gpio140", "gpio142", "gpio143",
+ "gpio144", "gpio147",
+};
+static const char *const pll_bist_sync_groups[] = {
+ "gpio26",
+};
+static const char *const pll_clk_aux_groups[] = {
+ "gpio36",
+};
+static const char *const prng_rosc0_groups[] = {
+ "gpio66",
+};
+static const char *const prng_rosc1_groups[] = {
+ "gpio67",
+};
+static const char *const prng_rosc2_groups[] = {
+ "gpio68",
+};
+static const char *const prng_rosc3_groups[] = {
+ "gpio69",
+};
+static const char *const qdss_cti_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio44", "gpio45", "gpio54", "gpio87",
+};
+static const char *const qdss_gpio_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio46", "gpio47",
+ "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53",
+ "gpio83", "gpio84", "gpio85", "gpio86", "gpio88", "gpio89",
+ "gpio138", "gpio139", "gpio140", "gpio141", "gpio149", "gpio150",
+ "gpio155", "gpio156", "gpio157", "gpio158", "gpio159", "gpio160",
+ "gpio161", "gpio162", "gpio163", "gpio164", "gpio165", "gpio166",
+};
+static const char *const qlink0_enable_groups[] = {
+ "gpio105",
+};
+static const char *const qlink0_request_groups[] = {
+ "gpio104",
+};
+static const char *const qlink0_wmss_groups[] = {
+ "gpio106",
+};
+static const char *const qlink1_enable_groups[] = {
+ "gpio108",
+};
+static const char *const qlink1_request_groups[] = {
+ "gpio107",
+};
+static const char *const qlink1_wmss_groups[] = {
+ "gpio109",
+};
+static const char *const qspi0_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+};
+static const char *const qup0_se0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char *const qup0_se1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char *const qup0_se2_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+};
+static const char *const qup0_se3_groups[] = {
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio23", "gpio24", "gpio26",
+};
+static const char *const qup0_se4_groups[] = {
+ "gpio19", "gpio20", "gpio21", "gpio22",
+};
+static const char *const qup0_se5_groups[] = {
+ "gpio23", "gpio24", "gpio25", "gpio26",
+};
+static const char *const qup0_se6_groups[] = {
+ "gpio27", "gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char *const qup1_se0_groups[] = {
+ "gpio32", "gpio33", "gpio94", "gpio95",
+};
+static const char *const qup1_se1_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char *const qup1_se2_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio40", "gpio41", "gpio42", "gpio43",
+};
+static const char *const qup1_se3_groups[] = {
+ "gpio92", "gpio93", "gpio94", "gpio95",
+};
+static const char *const qup1_se4_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53", "gpio54",
+};
+static const char *const qup1_se5_groups[] = {
+ "gpio55", "gpio56", "gpio59", "gpio60",
+};
+static const char *const qup1_se6_groups[] = {
+ "gpio55", "gpio56", "gpio59", "gpio60", "gpio90", "gpio91",
+};
+static const char *const sd_write_protect_groups[] = {
+ "gpio4",
+};
+static const char *const sdc2_data_groups[] = {
+ "gpio34",
+ "gpio35",
+ "gpio57",
+ "gpio58",
+};
+static const char *const sdc2_clk_groups[] = {
+ "gpio62",
+};
+static const char *const sdc2_cmd_groups[] = {
+ "gpio61",
+};
+static const char *const sdc2_fb_clk_groups[] = {
+ "gpio128",
+};
+static const char *const tb_trig_sdc1_groups[] = {
+ "gpio87",
+};
+static const char *const tb_trig_sdc2_groups[] = {
+ "gpio78",
+};
+static const char *const tgu_ch0_trigout_groups[] = {
+ "gpio87",
+};
+static const char *const tgu_ch1_trigout_groups[] = {
+ "gpio88",
+};
+static const char *const tmess_prng0_groups[] = {
+ "gpio86",
+};
+static const char *const tmess_prng1_groups[] = {
+ "gpio83",
+};
+static const char *const tmess_prng2_groups[] = {
+ "gpio84",
+};
+static const char *const tmess_prng3_groups[] = {
+ "gpio85",
+};
+static const char *const tsense_pwm1_groups[] = {
+ "gpio17",
+};
+static const char *const tsense_pwm2_groups[] = {
+ "gpio17",
+};
+static const char *const uim0_clk_groups[] = {
+ "gpio97",
+};
+static const char *const uim0_data_groups[] = {
+ "gpio96",
+};
+static const char *const uim0_present_groups[] = {
+ "gpio99",
+};
+static const char *const uim0_reset_groups[] = {
+ "gpio98",
+};
+static const char *const uim1_clk_mira_groups[] = {
+ "gpio111",
+};
+static const char *const uim1_clk_mirb_groups[] = {
+ "gpio101",
+};
+static const char *const uim1_data_mira_groups[] = {
+ "gpio110",
+};
+static const char *const uim1_data_mirb_groups[] = {
+ "gpio100",
+};
+static const char *const uim1_present_mira_groups[] = {
+ "gpio113",
+};
+static const char *const uim1_present_mirb_groups[] = {
+ "gpio103",
+};
+static const char *const uim1_reset_mira_groups[] = {
+ "gpio112",
+};
+static const char *const uim1_reset_mirb_groups[] = {
+ "gpio102",
+};
+static const char *const usb0_hs_groups[] = {
+ "gpio125",
+};
+static const char *const usb0_phy_ps_groups[] = {
+ "gpio131",
+};
+static const char *const vfr_0_groups[] = {
+ "gpio56",
+};
+static const char *const vfr_1_groups[] = {
+ "gpio126",
+};
+static const char *const vsense_trigger_mirnat_groups[] = {
+ "gpio94",
+};
+static const char *const wcn_sw_groups[] = {
+ "gpio52",
+};
+static const char *const wcn_sw_ctrl_groups[] = {
+ "gpio45",
+};
+
+static const struct pinfunction milos_functions[] = {
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(aoss_cti),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_usb),
+ MSM_PIN_FUNCTION(audio_ext_mclk0),
+ MSM_PIN_FUNCTION(audio_ext_mclk1),
+ MSM_PIN_FUNCTION(audio_ref_clk),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async_in0),
+ MSM_PIN_FUNCTION(cci_i2c_scl),
+ MSM_PIN_FUNCTION(cci_i2c_sda),
+ MSM_PIN_FUNCTION(cci_timer),
+ MSM_PIN_FUNCTION(coex_uart1_rx),
+ MSM_PIN_FUNCTION(coex_uart1_tx),
+ MSM_PIN_FUNCTION(dbg_out_clk),
+ MSM_PIN_FUNCTION(ddr_bist_complete),
+ MSM_PIN_FUNCTION(ddr_bist_fail),
+ MSM_PIN_FUNCTION(ddr_bist_start),
+ MSM_PIN_FUNCTION(ddr_bist_stop),
+ MSM_PIN_FUNCTION(ddr_pxi0),
+ MSM_PIN_FUNCTION(ddr_pxi1),
+ MSM_PIN_FUNCTION(dp0_hot),
+ MSM_PIN_FUNCTION(egpio),
+ MSM_PIN_FUNCTION(gcc_gp1),
+ MSM_PIN_FUNCTION(gcc_gp2),
+ MSM_PIN_FUNCTION(gcc_gp3),
+ MSM_PIN_FUNCTION(host2wlan_sol),
+ MSM_PIN_FUNCTION(i2s0_data0),
+ MSM_PIN_FUNCTION(i2s0_data1),
+ MSM_PIN_FUNCTION(i2s0_sck),
+ MSM_PIN_FUNCTION(i2s0_ws),
+ MSM_PIN_FUNCTION(ibi_i3c),
+ MSM_PIN_FUNCTION(jitter_bist),
+ MSM_PIN_FUNCTION(mdp_vsync),
+ MSM_PIN_FUNCTION(mdp_vsync0_out),
+ MSM_PIN_FUNCTION(mdp_vsync1_out),
+ MSM_PIN_FUNCTION(mdp_vsync2_out),
+ MSM_PIN_FUNCTION(mdp_vsync3_out),
+ MSM_PIN_FUNCTION(mdp_vsync_e),
+ MSM_PIN_FUNCTION(nav_gpio0),
+ MSM_PIN_FUNCTION(nav_gpio1),
+ MSM_PIN_FUNCTION(nav_gpio2),
+ MSM_PIN_FUNCTION(pcie0_clk_req_n),
+ MSM_PIN_FUNCTION(pcie1_clk_req_n),
+ MSM_PIN_FUNCTION(phase_flag),
+ MSM_PIN_FUNCTION(pll_bist_sync),
+ MSM_PIN_FUNCTION(pll_clk_aux),
+ MSM_PIN_FUNCTION(prng_rosc0),
+ MSM_PIN_FUNCTION(prng_rosc1),
+ MSM_PIN_FUNCTION(prng_rosc2),
+ MSM_PIN_FUNCTION(prng_rosc3),
+ MSM_PIN_FUNCTION(qdss_cti),
+ MSM_PIN_FUNCTION(qdss_gpio),
+ MSM_PIN_FUNCTION(qlink0_enable),
+ MSM_PIN_FUNCTION(qlink0_request),
+ MSM_PIN_FUNCTION(qlink0_wmss),
+ MSM_PIN_FUNCTION(qlink1_enable),
+ MSM_PIN_FUNCTION(qlink1_request),
+ MSM_PIN_FUNCTION(qlink1_wmss),
+ MSM_PIN_FUNCTION(qspi0),
+ MSM_PIN_FUNCTION(qup0_se0),
+ MSM_PIN_FUNCTION(qup0_se1),
+ MSM_PIN_FUNCTION(qup0_se2),
+ MSM_PIN_FUNCTION(qup0_se3),
+ MSM_PIN_FUNCTION(qup0_se4),
+ MSM_PIN_FUNCTION(qup0_se5),
+ MSM_PIN_FUNCTION(qup0_se6),
+ MSM_PIN_FUNCTION(qup1_se0),
+ MSM_PIN_FUNCTION(qup1_se1),
+ MSM_PIN_FUNCTION(qup1_se2),
+ MSM_PIN_FUNCTION(qup1_se3),
+ MSM_PIN_FUNCTION(qup1_se4),
+ MSM_PIN_FUNCTION(qup1_se5),
+ MSM_PIN_FUNCTION(qup1_se6),
+ MSM_PIN_FUNCTION(resout_gpio_n),
+ MSM_PIN_FUNCTION(sd_write_protect),
+ MSM_PIN_FUNCTION(sdc1_clk),
+ MSM_PIN_FUNCTION(sdc1_cmd),
+ MSM_PIN_FUNCTION(sdc1_data),
+ MSM_PIN_FUNCTION(sdc1_rclk),
+ MSM_PIN_FUNCTION(sdc2_clk),
+ MSM_PIN_FUNCTION(sdc2_cmd),
+ MSM_PIN_FUNCTION(sdc2_data),
+ MSM_PIN_FUNCTION(sdc2_fb_clk),
+ MSM_PIN_FUNCTION(tb_trig_sdc1),
+ MSM_PIN_FUNCTION(tb_trig_sdc2),
+ MSM_PIN_FUNCTION(tgu_ch0_trigout),
+ MSM_PIN_FUNCTION(tgu_ch1_trigout),
+ MSM_PIN_FUNCTION(tmess_prng0),
+ MSM_PIN_FUNCTION(tmess_prng1),
+ MSM_PIN_FUNCTION(tmess_prng2),
+ MSM_PIN_FUNCTION(tmess_prng3),
+ MSM_PIN_FUNCTION(tsense_pwm1),
+ MSM_PIN_FUNCTION(tsense_pwm2),
+ MSM_PIN_FUNCTION(uim0_clk),
+ MSM_PIN_FUNCTION(uim0_data),
+ MSM_PIN_FUNCTION(uim0_present),
+ MSM_PIN_FUNCTION(uim0_reset),
+ MSM_PIN_FUNCTION(uim1_clk_mira),
+ MSM_PIN_FUNCTION(uim1_clk_mirb),
+ MSM_PIN_FUNCTION(uim1_data_mira),
+ MSM_PIN_FUNCTION(uim1_data_mirb),
+ MSM_PIN_FUNCTION(uim1_present_mira),
+ MSM_PIN_FUNCTION(uim1_present_mirb),
+ MSM_PIN_FUNCTION(uim1_reset_mira),
+ MSM_PIN_FUNCTION(uim1_reset_mirb),
+ MSM_PIN_FUNCTION(usb0_hs),
+ MSM_PIN_FUNCTION(usb0_phy_ps),
+ MSM_PIN_FUNCTION(vfr_0),
+ MSM_PIN_FUNCTION(vfr_1),
+ MSM_PIN_FUNCTION(vsense_trigger_mirnat),
+ MSM_PIN_FUNCTION(wcn_sw),
+ MSM_PIN_FUNCTION(wcn_sw_ctrl),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup milos_groups[] = {
+ [0] = PINGROUP(0, qup0_se0, ibi_i3c, aoss_cti, _, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup0_se0, ibi_i3c, aoss_cti, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup0_se0, _, _, _, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup0_se0, _, _, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, qup0_se1, ibi_i3c, aoss_cti, sd_write_protect, qdss_cti, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup0_se1, ibi_i3c, aoss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup0_se1, qdss_cti, _, _, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup0_se1, qdss_cti, _, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, qup0_se2, qspi0, _, phase_flag, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qup0_se2, qspi0, _, phase_flag, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, qup0_se2, qspi0, _, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup0_se2, qspi0, _, phase_flag, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup0_se2, qspi0, mdp_vsync0_out, mdp_vsync1_out, _, phase_flag, _, _, _, _, _),
+ [13] = PINGROUP(13, qup0_se2, qspi0, _, phase_flag, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup0_se2, qspi0, _, phase_flag, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup0_se3, i2s0_sck, _, phase_flag, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup0_se3, i2s0_data0, _, phase_flag, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup0_se3, i2s0_data1, tsense_pwm1, tsense_pwm2, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, qup0_se3, i2s0_ws, _, phase_flag, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup0_se4, mdp_vsync, _, _, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup0_se4, _, _, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup0_se4, _, _, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, qup0_se4, _, _, _, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup0_se5, qup0_se3, audio_ext_mclk0, _, atest_usb, ddr_pxi0, _, _, _, _, _),
+ [24] = PINGROUP(24, qup0_se5, qup0_se3, audio_ext_mclk1, audio_ref_clk, dbg_out_clk, _, atest_usb, ddr_pxi0, _, _, _),
+ [25] = PINGROUP(25, qup0_se5, _, _, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup0_se5, qup0_se3, pll_bist_sync, _, phase_flag, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup0_se6, _, _, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qup0_se6, gcc_gp2, _, _, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, qup0_se6, gcc_gp1, _, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, qup0_se6, gcc_gp2, _, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, qup0_se6, gcc_gp3, _, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup1_se0, ibi_i3c, gcc_gp1, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup1_se0, ibi_i3c, gcc_gp3, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, sdc2_data, _, _, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, sdc2_data, _, _, _, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup1_se1, qup1_se2, ibi_i3c, pll_clk_aux, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup1_se1, qup1_se2, ibi_i3c, mdp_vsync, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup1_se1, qup1_se2, _, phase_flag, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, qup1_se1, resout_gpio_n, _, phase_flag, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup1_se2, mdp_vsync2_out, mdp_vsync3_out, _, phase_flag, qdss_gpio, _, _, _, _, _),
+ [41] = PINGROUP(41, qup1_se2, _, phase_flag, qdss_gpio, _, _, _, _, _, _, _),
+ [42] = PINGROUP(42, qup1_se2, _, phase_flag, qdss_gpio, _, _, _, _, _, _, _),
+ [43] = PINGROUP(43, qup1_se2, _, _, phase_flag, qdss_gpio, _, _, _, _, _, _),
+ [44] = PINGROUP(44, _, _, phase_flag, qdss_cti, atest_char, _, _, _, _, _, _),
+ [45] = PINGROUP(45, wcn_sw_ctrl, mdp_vsync_e, _, _, phase_flag, qdss_cti, atest_char, _, _, _, _),
+ [46] = PINGROUP(46, host2wlan_sol, _, phase_flag, qdss_gpio, atest_char, _, _, _, _, _, _),
+ [47] = PINGROUP(47, ddr_bist_stop, _, phase_flag, qdss_gpio, atest_char, _, _, _, _, _, _),
+ [48] = PINGROUP(48, qup1_se4, _, phase_flag, qdss_gpio, _, _, _, _, _, _, _),
+ [49] = PINGROUP(49, qup1_se4, _, phase_flag, qdss_gpio, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, qup1_se4, qdss_gpio, ddr_pxi1, _, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, qup1_se4, qdss_gpio, ddr_pxi1, _, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, qup1_se4, wcn_sw, qdss_gpio, _, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, qup1_se4, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qup1_se4, qdss_cti, _, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qup1_se5, qup1_se6, _, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup1_se5, qup1_se6, vfr_0, ddr_bist_fail, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, sdc2_data, _, _, _, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, sdc2_data, _, _, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qup1_se6, _, qup1_se5, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, qup1_se6, _, qup1_se5, atest_usb, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, sdc2_cmd, _, _, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, sdc2_clk, _, _, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, coex_uart1_tx, _, phase_flag, atest_char, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, coex_uart1_rx, _, phase_flag, _, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, _, _, _, _, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, prng_rosc0, _, _, _, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, pcie0_clk_req_n, prng_rosc1, _, _, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, prng_rosc2, _, _, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, prng_rosc3, _, _, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, pcie1_clk_req_n, _, _, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, _, _, _, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, sdc1_rclk, mdp_vsync, _, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, sdc1_data, _, _, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, sdc1_data, _, _, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, sdc1_data, dp0_hot, _, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, sdc1_data, _, _, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, sdc1_clk, cci_timer, _, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, sdc1_cmd, tb_trig_sdc2, _, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, sdc1_data, _, _, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, sdc1_data, _, _, _, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, sdc1_data, _, _, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, sdc1_data, _, _, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, cam_mclk, cci_timer, tmess_prng1, qdss_gpio, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, cam_mclk, cci_timer, tmess_prng2, qdss_gpio, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, cam_mclk, cci_timer, tmess_prng3, qdss_gpio, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, cam_mclk, cci_async_in0, tmess_prng0, qdss_gpio, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, cam_mclk, tb_trig_sdc1, tgu_ch0_trigout, qdss_cti, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, cci_i2c_sda, tgu_ch1_trigout, _, qdss_gpio, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, cci_i2c_scl, _, qdss_gpio, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, cci_i2c_sda, qup1_se6, _, _, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, cci_i2c_scl, qup1_se6, _, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, cci_i2c_sda, qup1_se3, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, cci_i2c_scl, qup1_se3, _, _, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, cci_i2c_sda, qup1_se3, qup1_se0, _, vsense_trigger_mirnat, _, _, _, _, _, _),
+ [95] = PINGROUP(95, cci_i2c_scl, qup1_se3, qup1_se0, _, _, _, _, _, _, _, _),
+ [96] = PINGROUP(96, uim0_data, _, _, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, uim0_clk, _, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, uim0_reset, _, _, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, uim0_present, _, _, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, uim1_data_mirb, _, _, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, uim1_clk_mirb, _, _, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, uim1_reset_mirb, _, _, _, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, uim1_present_mirb, _, _, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, qlink0_request, _, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, qlink0_enable, _, _, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, qlink0_wmss, _, _, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, qlink1_request, _, _, _, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, qlink1_enable, _, _, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, qlink1_wmss, _, _, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, uim1_data_mira, _, _, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, uim1_clk_mira, _, _, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, uim1_reset_mira, _, _, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, uim1_present_mira, _, _, _, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, _, _, _, _, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, _, _, _, _, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, nav_gpio0, _, _, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, nav_gpio1, usb0_hs, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, _, nav_gpio2, vfr_1, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, _, _, phase_flag, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, sdc2_fb_clk, _, _, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, mdp_vsync, _, _, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, usb0_phy_ps, _, _, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _, _, egpio),
+ [133] = PINGROUP(133, ddr_bist_start, _, _, _, _, _, _, _, _, _, egpio),
+ [134] = PINGROUP(134, _, _, _, _, _, _, _, _, _, _, egpio),
+ [135] = PINGROUP(135, _, _, _, _, _, _, _, _, _, _, egpio),
+ [136] = PINGROUP(136, _, _, _, _, _, _, _, _, _, _, egpio),
+ [137] = PINGROUP(137, ddr_bist_complete, _, _, _, _, _, _, _, _, _, egpio),
+ [138] = PINGROUP(138, _, phase_flag, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [139] = PINGROUP(139, _, phase_flag, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [140] = PINGROUP(140, _, phase_flag, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [141] = PINGROUP(141, jitter_bist, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [142] = PINGROUP(142, _, phase_flag, _, _, _, _, _, _, _, _, egpio),
+ [143] = PINGROUP(143, _, phase_flag, _, _, _, _, _, _, _, _, egpio),
+ [144] = PINGROUP(144, _, phase_flag, _, _, _, _, _, _, _, _, egpio),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _, _, egpio),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _, _, egpio),
+ [147] = PINGROUP(147, _, phase_flag, _, _, _, _, _, _, _, _, egpio),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _, _, egpio),
+ [149] = PINGROUP(149, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [150] = PINGROUP(150, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [151] = PINGROUP(151, _, _, _, _, _, _, _, _, _, _, egpio),
+ [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _, _, egpio),
+ [153] = PINGROUP(153, _, _, _, _, _, _, _, _, _, _, egpio),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _, _, egpio),
+ [155] = PINGROUP(155, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [156] = PINGROUP(156, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [157] = PINGROUP(157, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [158] = PINGROUP(158, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [159] = PINGROUP(159, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [160] = PINGROUP(160, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [161] = PINGROUP(161, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [162] = PINGROUP(162, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [163] = PINGROUP(163, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [164] = PINGROUP(164, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [165] = PINGROUP(165, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [166] = PINGROUP(166, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [167] = UFS_RESET(ufs_reset, 0xb4004, 0xb5000),
+ [168] = SDC_QDSD_PINGROUP(sdc2_clk, 0xab000, 0, 6),
+ [169] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xab000, 12, 3),
+ [170] = SDC_QDSD_PINGROUP(sdc2_data, 0xab000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map milos_pdc_map[] = {
+ { 0, 122 }, { 3, 95 }, { 4, 100 }, { 6, 52 }, { 7, 119 },
+ { 8, 92 }, { 11, 54 }, { 12, 56 }, { 13, 64 }, { 14, 75 },
+ { 15, 82 }, { 18, 89 }, { 19, 90 }, { 22, 93 }, { 23, 94 },
+ { 26, 91 }, { 27, 57 }, { 30, 138 }, { 31, 96 }, { 32, 67 },
+ { 34, 128 }, { 35, 98 }, { 36, 99 }, { 38, 101 }, { 39, 102 },
+ { 40, 69 }, { 43, 103 }, { 44, 104 }, { 45, 126 }, { 47, 59 },
+ { 48, 106 }, { 51, 107 }, { 52, 108 }, { 54, 110 }, { 55, 140 },
+ { 56, 58 }, { 57, 129 }, { 58, 111 }, { 59, 112 }, { 60, 115 },
+ { 61, 113 }, { 62, 114 }, { 64, 105 }, { 65, 55 }, { 67, 116 },
+ { 68, 117 }, { 70, 120 }, { 71, 121 }, { 72, 97 }, { 73, 109 },
+ { 74, 118 }, { 75, 132 }, { 76, 144 }, { 77, 127 }, { 78, 133 },
+ { 79, 134 }, { 80, 135 }, { 81, 124 }, { 82, 136 }, { 87, 60 },
+ { 91, 123 }, { 92, 125 }, { 95, 139 }, { 99, 53 }, { 103, 61 },
+ { 104, 71 }, { 107, 137 }, { 113, 51 }, { 124, 72 }, { 125, 62 },
+ { 126, 73 }, { 128, 63 }, { 129, 130 }, { 130, 65 }, { 131, 66 },
+ { 133, 68 }, { 136, 70 }, { 143, 78 }, { 144, 79 }, { 145, 142 },
+ { 148, 81 }, { 149, 76 }, { 150, 83 }, { 151, 84 }, { 153, 74 },
+ { 155, 131 }, { 158, 85 }, { 159, 77 }, { 161, 80 }, { 162, 143 },
+ { 163, 86 }, { 164, 87 }, { 166, 88 },
+};
+
+static const struct msm_pinctrl_soc_data milos_tlmm = {
+ .pins = milos_pins,
+ .npins = ARRAY_SIZE(milos_pins),
+ .functions = milos_functions,
+ .nfunctions = ARRAY_SIZE(milos_functions),
+ .groups = milos_groups,
+ .ngroups = ARRAY_SIZE(milos_groups),
+ .ngpios = 168,
+ .wakeirq_map = milos_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(milos_pdc_map),
+ .egpio_func = 11,
+};
+
+static int milos_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &milos_tlmm);
+}
+
+static const struct of_device_id milos_tlmm_of_match[] = {
+ { .compatible = "qcom,milos-tlmm" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver milos_tlmm_driver = {
+ .driver = {
+ .name = "milos-tlmm",
+ .of_match_table = milos_tlmm_of_match,
+ },
+ .probe = milos_tlmm_probe,
+};
+
+static int __init milos_tlmm_init(void)
+{
+ return platform_driver_register(&milos_tlmm_driver);
+}
+arch_initcall(milos_tlmm_init);
+
+static void __exit milos_tlmm_exit(void)
+{
+ platform_driver_unregister(&milos_tlmm_driver);
+}
+module_exit(milos_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI Milos TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, milos_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f713c80d7f3e..83eb075b6bfa 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -792,7 +792,7 @@ static const struct gpio_chip msm_gpio_template = {
.direction_output = msm_gpio_direction_output,
.get_direction = msm_gpio_get_direction,
.get = msm_gpio_get,
- .set_rv = msm_gpio_set,
+ .set = msm_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = msm_gpio_dbg_show,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index bc082bfb52ef..b7b15874e488 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -802,7 +802,7 @@ static const struct gpio_chip pmic_gpio_gpio_template = {
.direction_input = pmic_gpio_direction_input,
.direction_output = pmic_gpio_direction_output,
.get = pmic_gpio_get,
- .set_rv = pmic_gpio_set,
+ .set = pmic_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_gpio_of_xlate,
@@ -1206,6 +1206,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm6450-gpio", .data = (void *) 9 },
{ .compatible = "qcom,pm7250b-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm7550-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8},
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
@@ -1244,6 +1245,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
{ .compatible = "qcom,pmih0108-gpio", .data = (void *) 18 },
+ { .compatible = "qcom,pmiv0104-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmk8550-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pmm8155au-gpio", .data = (void *) 10 },
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index ba9084978f90..22d76b1013a3 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -638,7 +638,7 @@ static const struct gpio_chip pmic_mpp_gpio_template = {
.direction_input = pmic_mpp_direction_input,
.direction_output = pmic_mpp_direction_output,
.get = pmic_mpp_get,
- .set_rv = pmic_mpp_set,
+ .set = pmic_mpp_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_mpp_of_xlate,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 3a8014ebf064..fb37b1c1acb4 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -597,7 +597,7 @@ static const struct gpio_chip pm8xxx_gpio_template = {
.direction_input = pm8xxx_gpio_direction_input,
.direction_output = pm8xxx_gpio_direction_output,
.get = pm8xxx_gpio_get,
- .set_rv = pm8xxx_gpio_set,
+ .set = pm8xxx_gpio_set,
.of_xlate = pm8xxx_gpio_of_xlate,
.dbg_show = pm8xxx_gpio_dbg_show,
.owner = THIS_MODULE,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 087c37d304fc..6103849af042 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -634,7 +634,7 @@ static const struct gpio_chip pm8xxx_mpp_template = {
.direction_input = pm8xxx_mpp_direction_input,
.direction_output = pm8xxx_mpp_direction_output,
.get = pm8xxx_mpp_get,
- .set_rv = pm8xxx_mpp_set,
+ .set = pm8xxx_mpp_set,
.of_xlate = pm8xxx_mpp_of_xlate,
.dbg_show = pm8xxx_mpp_dbg_show,
.owner = THIS_MODULE,
diff --git a/drivers/pinctrl/qcom/tlmm-test.c b/drivers/pinctrl/qcom/tlmm-test.c
index 7b99e89e0f67..7d7fff538755 100644
--- a/drivers/pinctrl/qcom/tlmm-test.c
+++ b/drivers/pinctrl/qcom/tlmm-test.c
@@ -16,6 +16,7 @@
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
/*
* This TLMM test module serves the purpose of validating that the TLMM driver
@@ -38,7 +39,10 @@
#define TLMM_REG_SIZE 0x1000
static int tlmm_test_gpio = -1;
+static char *tlmm_reg_name = "default_region";
+
module_param_named(gpio, tlmm_test_gpio, int, 0600);
+module_param_named(name, tlmm_reg_name, charp, 0600);
static struct {
void __iomem *base;
@@ -570,6 +574,47 @@ static const struct of_device_id tlmm_of_match[] = {
{}
};
+static int tlmm_reg_base(struct device_node *tlmm, struct resource *res)
+{
+ const char **reg_names;
+ int count;
+ int ret;
+ int i;
+
+ count = of_property_count_strings(tlmm, "reg-names");
+ if (count <= 0) {
+ pr_err("failed to find tlmm reg name\n");
+ return count;
+ }
+
+ reg_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
+ if (!reg_names)
+ return -ENOMEM;
+
+ ret = of_property_read_string_array(tlmm, "reg-names", reg_names, count);
+ if (ret != count) {
+ kfree(reg_names);
+ return -EINVAL;
+ }
+
+ if (!strcmp(tlmm_reg_name, "default_region")) {
+ ret = of_address_to_resource(tlmm, 0, res);
+ } else {
+ for (i = 0; i < count; i++) {
+ if (!strcmp(reg_names[i], tlmm_reg_name)) {
+ ret = of_address_to_resource(tlmm, i, res);
+ break;
+ }
+ }
+ if (i == count)
+ ret = -EINVAL;
+ }
+
+ kfree(reg_names);
+
+ return ret;
+}
+
static int tlmm_test_init_suite(struct kunit_suite *suite)
{
struct of_phandle_args args = {};
@@ -588,7 +633,7 @@ static int tlmm_test_init_suite(struct kunit_suite *suite)
return -EINVAL;
}
- ret = of_address_to_resource(tlmm, 0, &res);
+ ret = tlmm_reg_base(tlmm, &res);
if (ret < 0)
return ret;
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index e16034fc1bbf..99ae34a56871 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -86,89 +86,178 @@ config PINCTRL_PFC_EMEV2
bool "pin control support for Emma Mobile EV2" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77995
- bool "pin control support for R-Car D3" if COMPILE_TEST
+config PINCTRL_PFC_R8A73A4
+ bool "pin control support for R8A73A4 (R-Mobile APE6)" if COMPILE_TEST
+ select PINCTRL_SH_PFC_GPIO
+
+config PINCTRL_PFC_R8A7740
+ bool "pin control support for R8A7740 (R-Mobile A1)" if COMPILE_TEST
+ select PINCTRL_SH_PFC_GPIO
+
+config PINCTRL_PFC_R8A7742
+ bool "pin control support for R8A7742 (RZ/G1H)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7794
- bool "pin control support for R-Car E2" if COMPILE_TEST
+config PINCTRL_PFC_R8A7743
+ bool "pin control support for R8A7743 (RZ/G1M)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77990
- bool "pin control support for R-Car E3" if COMPILE_TEST
+config PINCTRL_PFC_R8A7744
+ bool "pin control support for R8A7744 (RZ/G1N)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A7745
+ bool "pin control support for R8A7745 (RZ/G1E)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A77470
+ bool "pin control support for R8A77470 (RZ/G1C)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A774A1
+ bool "pin control support for R8A774A1 (RZ/G2M)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A774B1
+ bool "pin control support for R8A774B1 (RZ/G2N)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A774C0
+ bool "pin control support for R8A774C0 (RZ/G2E)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A774E1
+ bool "pin control support for R8A774E1 (RZ/G2H)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A7778
+ bool "pin control support for R8A7778 (R-Car M1A)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A7779
- bool "pin control support for R-Car H1" if COMPILE_TEST
+ bool "pin control support for R8A7779 (R-Car H1)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A7790
- bool "pin control support for R-Car H2" if COMPILE_TEST
+ bool "pin control support for R8A7790 (R-Car H2)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77951
- bool "pin control support for R-Car H3 ES2.0+" if COMPILE_TEST
+config PINCTRL_PFC_R8A7791
+ bool "pin control support for R8A7791 (R-Car M2-W)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7778
- bool "pin control support for R-Car M1A" if COMPILE_TEST
+config PINCTRL_PFC_R8A7792
+ bool "pin control support for R8A7792 (R-Car V2H)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A7793
- bool "pin control support for R-Car M2-N" if COMPILE_TEST
+ bool "pin control support for R8A7793 (R-Car M2-N)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7791
- bool "pin control support for R-Car M2-W" if COMPILE_TEST
+config PINCTRL_PFC_R8A7794
+ bool "pin control support for R8A7794 (R-Car E2)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77965
- bool "pin control support for R-Car M3-N" if COMPILE_TEST
+config PINCTRL_PFC_R8A77951
+ bool "pin control support for R8A77951 (R-Car H3 ES2.0+)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A77960
- bool "pin control support for R-Car M3-W" if COMPILE_TEST
+ bool "pin control support for R8A77960 (R-Car M3-W)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A77961
- bool "pin control support for R-Car M3-W+" if COMPILE_TEST
+ bool "pin control support for R8A77961 (R-Car M3-W+)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A779F0
- bool "pin control support for R-Car S4-8" if COMPILE_TEST
+config PINCTRL_PFC_R8A77965
+ bool "pin control support for R8A77965 (R-Car M3-N)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7792
- bool "pin control support for R-Car V2H" if COMPILE_TEST
+config PINCTRL_PFC_R8A77970
+ bool "pin control support for R8A77970 (R-Car V3M)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A77980
- bool "pin control support for R-Car V3H" if COMPILE_TEST
+ bool "pin control support for R8A77980 (R-Car V3H)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77970
- bool "pin control support for R-Car V3M" if COMPILE_TEST
+config PINCTRL_PFC_R8A77990
+ bool "pin control support for R8A77990 (R-Car E3)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A77995
+ bool "pin control support for R8A77995 (R-Car D3)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A779A0
- bool "pin control support for R-Car V3U" if COMPILE_TEST
+ bool "pin control support for R8A779A0 (R-Car V3U)" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A779F0
+ bool "pin control support for R8A779F0 (R-Car S4-8)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A779G0
- bool "pin control support for R-Car V4H" if COMPILE_TEST
+ bool "pin control support for R8A779G0 (R-Car V4H)" if COMPILE_TEST
select PINCTRL_SH_PFC
config PINCTRL_PFC_R8A779H0
- bool "pin control support for R-Car V4M" if COMPILE_TEST
+ bool "pin control support for R8A779H0 (R-Car V4M)" if COMPILE_TEST
select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7740
- bool "pin control support for R-Mobile A1" if COMPILE_TEST
- select PINCTRL_SH_PFC_GPIO
+config PINCTRL_PFC_SH7203
+ bool "pin control support for SH7203" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
-config PINCTRL_PFC_R8A73A4
- bool "pin control support for R-Mobile APE6" if COMPILE_TEST
+config PINCTRL_PFC_SH7264
+ bool "pin control support for SH7264" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7269
+ bool "pin control support for SH7269" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH73A0
+ bool "pin control support for SH73A0 (SH-Mobile AG5)" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
+ select REGULATOR
+
+config PINCTRL_PFC_SH7720
+ bool "pin control support for SH7720" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7722
+ bool "pin control support for SH7722" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7723
+ bool "pin control support for SH7723 (SH-Mobile R2)" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7724
+ bool "pin control support for SH7724 (SH-Mobile R2R)" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7734
+ bool "pin control support for SH7734" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7757
+ bool "pin control support for SH7757" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7785
+ bool "pin control support for SH7785" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7786
+ bool "pin control support for SH7786" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SHX3
+ bool "pin control support for SH-X3" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
config PINCTRL_RZA1
bool "pin control support for RZ/A1"
@@ -204,42 +293,6 @@ config PINCTRL_RZG2L
This selects GPIO and pinctrl driver for Renesas RZ/{G2L,G2UL,V2L}
platforms.
-config PINCTRL_PFC_R8A77470
- bool "pin control support for RZ/G1C" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
-config PINCTRL_PFC_R8A7745
- bool "pin control support for RZ/G1E" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
-config PINCTRL_PFC_R8A7742
- bool "pin control support for RZ/G1H" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
-config PINCTRL_PFC_R8A7743
- bool "pin control support for RZ/G1M" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
-config PINCTRL_PFC_R8A7744
- bool "pin control support for RZ/G1N" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
-config PINCTRL_PFC_R8A774C0
- bool "pin control support for RZ/G2E" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
-config PINCTRL_PFC_R8A774E1
- bool "pin control support for RZ/G2H" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
-config PINCTRL_PFC_R8A774A1
- bool "pin control support for RZ/G2M" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
-config PINCTRL_PFC_R8A774B1
- bool "pin control support for RZ/G2N" if COMPILE_TEST
- select PINCTRL_SH_PFC
-
config PINCTRL_RZN1
bool "pin control support for RZ/N1"
depends on OF
@@ -250,9 +303,8 @@ config PINCTRL_RZN1
This selects pinctrl driver for Renesas RZ/N1 devices.
config PINCTRL_RZV2M
- bool "pin control support for RZ/V2M"
+ bool "pin control support for RZ/V2M" if COMPILE_TEST
depends on OF
- depends on ARCH_R9A09G011 || COMPILE_TEST
select GPIOLIB
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
@@ -261,57 +313,4 @@ config PINCTRL_RZV2M
This selects GPIO and pinctrl driver for Renesas RZ/V2M
platforms.
-config PINCTRL_PFC_SH7203
- bool "pin control support for SH7203" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7264
- bool "pin control support for SH7264" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7269
- bool "pin control support for SH7269" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7720
- bool "pin control support for SH7720" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7722
- bool "pin control support for SH7722" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7734
- bool "pin control support for SH7734" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7757
- bool "pin control support for SH7757" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7785
- bool "pin control support for SH7785" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7786
- bool "pin control support for SH7786" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH73A0
- bool "pin control support for SH-Mobile AG5" if COMPILE_TEST
- select PINCTRL_SH_PFC_GPIO
- select REGULATOR
-
-config PINCTRL_PFC_SH7723
- bool "pin control support for SH-Mobile R2" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7724
- bool "pin control support for SH-Mobile R2R" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SHX3
- bool "pin control support for SH-X3" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
endmenu
diff --git a/drivers/pinctrl/renesas/gpio.c b/drivers/pinctrl/renesas/gpio.c
index a5136dacaaf2..2293af642849 100644
--- a/drivers/pinctrl/renesas/gpio.c
+++ b/drivers/pinctrl/renesas/gpio.c
@@ -189,9 +189,11 @@ static int gpio_pin_get(struct gpio_chip *gc, unsigned offset)
return (gpio_read_data_reg(chip, reg->info) >> pos) & 1;
}
-static void gpio_pin_set(struct gpio_chip *gc, unsigned offset, int value)
+static int gpio_pin_set(struct gpio_chip *gc, unsigned int offset, int value)
{
gpio_pin_set_value(gpiochip_get_data(gc), offset, value);
+
+ return 0;
}
static int gpio_pin_to_irq(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index b1058504e0bb..23812116ef42 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -830,12 +830,13 @@ static int rza1_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return rza1_pin_get(port, gpio);
}
-static void rza1_gpio_set(struct gpio_chip *chip, unsigned int gpio,
- int value)
+static int rza1_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct rza1_port *port = gpiochip_get_data(chip);
rza1_pin_set(port, gpio, value);
+
+ return 0;
}
static const struct gpio_chip rza1_gpiochip_template = {
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index 3b5812963850..b78b5b4ec5af 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -172,8 +172,7 @@ static int rza2_chip_get(struct gpio_chip *chip, unsigned int offset)
return !!(readb(priv->base + RZA2_PIDR(port)) & BIT(pin));
}
-static void rza2_chip_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int rza2_chip_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct rza2_pinctrl_priv *priv = gpiochip_get_data(chip);
u8 port = RZA2_PIN_ID_TO_PORT(offset);
@@ -188,6 +187,8 @@ static void rza2_chip_set(struct gpio_chip *chip, unsigned int offset,
new_value &= ~BIT(pin);
writeb(new_value, priv->base + RZA2_PODR(port));
+
+ return 0;
}
static int rza2_chip_direction_output(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 78fa08ff0faa..c52263c2a7b0 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -493,6 +493,23 @@ static void rzv2h_pmc_writeb(struct rzg2l_pinctrl *pctrl, u8 val, u16 offset)
writeb(pwpr & ~PWPR_REGWE_A, pctrl->base + regs->pwpr);
}
+static int rzg2l_validate_pin(struct rzg2l_pinctrl *pctrl,
+ u64 cfg, u32 port, u8 bit)
+{
+ u8 pinmap = FIELD_GET(PIN_CFG_PIN_MAP_MASK, cfg);
+ u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
+ u64 data;
+
+ if (!(pinmap & BIT(bit)) || port >= pctrl->data->n_port_pins)
+ return -EINVAL;
+
+ data = pctrl->data->port_pin_configs[port];
+ if (off != RZG2L_PIN_CFG_TO_PORT_OFFSET(data))
+ return -EINVAL;
+
+ return 0;
+}
+
static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
u8 pin, u8 off, u8 func)
{
@@ -536,6 +553,7 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned int i, *psel_val;
struct group_desc *group;
const unsigned int *pins;
+ int ret;
func = pinmux_generic_get_function(pctldev, func_selector);
if (!func)
@@ -552,6 +570,10 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u32 pin = RZG2L_PIN_ID_TO_PIN(pins[i]);
+ ret = rzg2l_validate_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(pins[i]), pin);
+ if (ret)
+ return ret;
+
dev_dbg(pctrl->dev, "port:%u pin: %u off:%x PSEL:%u\n",
RZG2L_PIN_ID_TO_PORT(pins[i]), pin, off, psel_val[i] - hwcfg->func_base);
@@ -806,23 +828,6 @@ done:
return ret;
}
-static int rzg2l_validate_gpio_pin(struct rzg2l_pinctrl *pctrl,
- u64 cfg, u32 port, u8 bit)
-{
- u8 pinmap = FIELD_GET(PIN_CFG_PIN_MAP_MASK, cfg);
- u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(cfg);
- u64 data;
-
- if (!(pinmap & BIT(bit)) || port >= pctrl->data->n_port_pins)
- return -EINVAL;
-
- data = pctrl->data->port_pin_configs[port];
- if (off != RZG2L_PIN_CFG_TO_PORT_OFFSET(data))
- return -EINVAL;
-
- return 0;
-}
-
static u32 rzg2l_read_pin_config(struct rzg2l_pinctrl *pctrl, u32 offset,
u8 bit, u32 mask)
{
@@ -1287,7 +1292,7 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
} else {
bit = RZG2L_PIN_ID_TO_PIN(_pin);
- if (rzg2l_validate_gpio_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
+ if (rzg2l_validate_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
return -EINVAL;
}
@@ -1447,7 +1452,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
} else {
bit = RZG2L_PIN_ID_TO_PIN(_pin);
- if (rzg2l_validate_gpio_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
+ if (rzg2l_validate_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
return -EINVAL;
}
@@ -1687,7 +1692,7 @@ static int rzg2l_gpio_request(struct gpio_chip *chip, unsigned int offset)
u8 reg8;
int ret;
- ret = rzg2l_validate_gpio_pin(pctrl, *pin_data, port, bit);
+ ret = rzg2l_validate_pin(pctrl, *pin_data, port, bit);
if (ret)
return ret;
@@ -1758,8 +1763,8 @@ static int rzg2l_gpio_direction_input(struct gpio_chip *chip,
return 0;
}
-static void rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct rzg2l_pinctrl *pctrl = gpiochip_get_data(chip);
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[offset];
@@ -1779,6 +1784,8 @@ static void rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset,
writeb(reg8 & ~BIT(bit), pctrl->base + P(off));
spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
}
static int rzg2l_gpio_direction_output(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c
index d442d4f9981c..fb874867dbfb 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzn1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c
@@ -680,6 +680,8 @@ static struct pinctrl_desc rzn1_pinctrl_desc = {
.pmxops = &rzn1_pmx_ops,
.confops = &rzn1_pinconf_ops,
.owner = THIS_MODULE,
+ .pins = rzn1_pins,
+ .npins = ARRAY_SIZE(rzn1_pins),
};
static int rzn1_pinctrl_parse_groups(struct device_node *np,
@@ -878,8 +880,6 @@ static int rzn1_pinctrl_probe(struct platform_device *pdev)
ipctl->dev = &pdev->dev;
rzn1_pinctrl_desc.name = dev_name(&pdev->dev);
- rzn1_pinctrl_desc.pins = rzn1_pins;
- rzn1_pinctrl_desc.npins = ARRAY_SIZE(rzn1_pins);
ret = rzn1_pinctrl_probe_dt(pdev, ipctl);
if (ret) {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 8c7169db4fcc..daaa986d994d 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -790,14 +790,16 @@ static int rzv2m_gpio_direction_input(struct gpio_chip *chip,
return 0;
}
-static void rzv2m_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int rzv2m_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct rzv2m_pinctrl *pctrl = gpiochip_get_data(chip);
u32 port = RZV2M_PIN_ID_TO_PORT(offset);
u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
rzv2m_writel_we(pctrl->base + DO(port), bit, !!value);
+
+ return 0;
}
static int rzv2m_gpio_direction_output(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 9fd894729a7b..5fe7c4b9f7bd 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -1405,7 +1405,7 @@ static const struct samsung_pin_bank_data exynosautov920_pin_banks7[] = {
EXYNOSV920_PIN_BANK_EINTG(8, 0x8000, "gpg1", 0x18, 0x24, 0x28),
};
-static const struct samsung_retention_data exynosautov920_retention_data __initconst = {
+static const struct samsung_retention_data no_retention_data __initconst = {
.regs = NULL,
.nr_regs = 0,
.value = 0,
@@ -1421,7 +1421,7 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynosautov920_pinctrl_suspend,
.resume = exynosautov920_pinctrl_resume,
- .retention_data = &exynosautov920_retention_data,
+ .retention_data = &no_retention_data,
}, {
/* pin-controller instance 1 AUD data */
.pin_banks = exynosautov920_pin_banks1,
@@ -1764,6 +1764,7 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = gs101_pinctrl_suspend,
.resume = gs101_pinctrl_resume,
+ .retention_data = &no_retention_data,
}, {
/* pin banks of gs101 pin-controller (FAR_ALIVE) */
.pin_banks = gs101_pin_far_alive,
@@ -1771,6 +1772,7 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = gs101_pinctrl_suspend,
.resume = gs101_pinctrl_resume,
+ .retention_data = &no_retention_data,
}, {
/* pin banks of gs101 pin-controller (GSACORE) */
.pin_banks = gs101_pin_gsacore,
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index f3e1c11abe55..81fe0b08a9af 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -32,18 +32,24 @@
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
+#define MAX_WAKEUP_REG 3
+
struct exynos_irq_chip {
struct irq_chip chip;
u32 eint_con;
u32 eint_mask;
u32 eint_pend;
- u32 *eint_wake_mask_value;
+ u32 eint_num_wakeup_reg;
u32 eint_wake_mask_reg;
void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata,
struct exynos_irq_chip *irq_chip);
};
+static u32 eint_wake_mask_values[MAX_WAKEUP_REG] = { EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+ EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+ EXYNOS_EINT_WAKEUP_MASK_DISABLED};
+
static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
{
return container_of(chip, struct exynos_irq_chip, chip);
@@ -307,7 +313,7 @@ static const struct exynos_irq_chip exynos_gpio_irq_chip __initconst = {
.eint_con = EXYNOS_GPIO_ECON_OFFSET,
.eint_mask = EXYNOS_GPIO_EMASK_OFFSET,
.eint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- /* eint_wake_mask_value not used */
+ /* eint_wake_mask_values not used */
};
static int exynos_eint_irq_map(struct irq_domain *h, unsigned int virq,
@@ -467,10 +473,55 @@ err_domains:
return ret;
}
+#define BITS_PER_U32 32
+static int gs101_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
+{
+ struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+ struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ u32 bit, wakeup_reg, shift;
+
+ bit = bank->eint_num + irqd->hwirq;
+ wakeup_reg = bit / BITS_PER_U32;
+ shift = bit - (wakeup_reg * BITS_PER_U32);
+
+ if (!on)
+ eint_wake_mask_values[wakeup_reg] |= BIT_U32(shift);
+ else
+ eint_wake_mask_values[wakeup_reg] &= ~BIT_U32(shift);
+
+ dev_info(d->dev, "wake %s for irq %d\n", str_enabled_disabled(on),
+ irqd->irq);
+
+ return 0;
+}
+
+static void
+gs101_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+{
+ struct regmap *pmu_regs;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No PMU syscon available. Wake-up mask will not be set.\n");
+ return;
+ }
+
+ pmu_regs = drvdata->retention_ctrl->priv;
+
+ dev_dbg(drvdata->dev, "Setting external wakeup interrupt mask:\n");
+
+ for (int i = 0; i < irq_chip->eint_num_wakeup_reg; i++) {
+ dev_dbg(drvdata->dev, "\tWAKEUP_MASK%d[0x%X] value[0x%X]\n",
+ i, irq_chip->eint_wake_mask_reg + i * 4,
+ eint_wake_mask_values[i]);
+ regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg + i * 4,
+ eint_wake_mask_values[i]);
+ }
+}
+
static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
{
- struct irq_chip *chip = irq_data_get_irq_chip(irqd);
- struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
@@ -478,9 +529,9 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
irqd->irq, bank->name, irqd->hwirq);
if (!on)
- *our_chip->eint_wake_mask_value |= bit;
+ eint_wake_mask_values[0] |= bit;
else
- *our_chip->eint_wake_mask_value &= ~bit;
+ eint_wake_mask_values[0] &= ~bit;
return 0;
}
@@ -500,10 +551,10 @@ exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
pmu_regs = drvdata->retention_ctrl->priv;
dev_info(drvdata->dev,
"Setting external wakeup interrupt mask: 0x%x\n",
- *irq_chip->eint_wake_mask_value);
+ eint_wake_mask_values[0]);
regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
- *irq_chip->eint_wake_mask_value);
+ eint_wake_mask_values[0]);
}
static void
@@ -522,11 +573,10 @@ s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
clk_base = (void __iomem *) drvdata->retention_ctrl->priv;
- __raw_writel(*irq_chip->eint_wake_mask_value,
+ __raw_writel(eint_wake_mask_values[0],
clk_base + irq_chip->eint_wake_mask_reg);
}
-static u32 eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED;
/*
* irq_chip for wakeup interrupts
*/
@@ -544,7 +594,7 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = {
.eint_con = EXYNOS_WKUP_ECON_OFFSET,
.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .eint_wake_mask_value = &eint_wake_mask_value,
+ .eint_num_wakeup_reg = 1,
/* Only differences with exynos4210_wkup_irq_chip: */
.eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK,
.set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask,
@@ -564,7 +614,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.eint_con = EXYNOS_WKUP_ECON_OFFSET,
.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .eint_wake_mask_value = &eint_wake_mask_value,
+ .eint_num_wakeup_reg = 1,
.eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK,
.set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
@@ -583,7 +633,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
.eint_con = EXYNOS7_WKUP_ECON_OFFSET,
.eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
- .eint_wake_mask_value = &eint_wake_mask_value,
+ .eint_num_wakeup_reg = 1,
.eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
.set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
@@ -599,13 +649,34 @@ static const struct exynos_irq_chip exynosautov920_wkup_irq_chip __initconst = {
.irq_request_resources = exynos_irq_request_resources,
.irq_release_resources = exynos_irq_release_resources,
},
- .eint_wake_mask_value = &eint_wake_mask_value,
+ .eint_num_wakeup_reg = 1,
.eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
.set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
+static const struct exynos_irq_chip gs101_wkup_irq_chip __initconst = {
+ .chip = {
+ .name = "gs101_wkup_irq_chip",
+ .irq_unmask = exynos_irq_unmask,
+ .irq_mask = exynos_irq_mask,
+ .irq_ack = exynos_irq_ack,
+ .irq_set_type = exynos_irq_set_type,
+ .irq_set_wake = gs101_wkup_irq_set_wake,
+ .irq_request_resources = exynos_irq_request_resources,
+ .irq_release_resources = exynos_irq_release_resources,
+ },
+ .eint_con = EXYNOS7_WKUP_ECON_OFFSET,
+ .eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
+ .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
+ .eint_num_wakeup_reg = 3,
+ .eint_wake_mask_reg = GS101_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = gs101_pinctrl_set_eint_wakeup_mask,
+};
+
/* list of external wakeup controllers supported */
static const struct of_device_id exynos_wkup_irq_ids[] = {
+ { .compatible = "google,gs101-wakeup-eint",
+ .data = &gs101_wkup_irq_chip },
{ .compatible = "samsung,s5pv210-wakeup-eint",
.data = &s5pv210_wkup_irq_chip },
{ .compatible = "samsung,exynos4210-wakeup-eint",
@@ -688,6 +759,7 @@ out:
chained_irq_exit(chip, desc);
}
+static int eint_num;
/*
* exynos_eint_wkup_init() - setup handling of external wakeup interrupts.
* @d: driver data of samsung pinctrl driver.
@@ -736,6 +808,9 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
return -ENXIO;
}
+ bank->eint_num = eint_num;
+ eint_num = eint_num + bank->nr_pins;
+
if (!fwnode_property_present(bank->fwnode, "interrupts")) {
bank->eint_type = EINT_TYPE_WKUP_MUX;
++muxed_banks;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index fe1ac82b9d79..24745e1d78ce 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1067,7 +1067,7 @@ static int samsung_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
static const struct gpio_chip samsung_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = samsung_gpio_set,
+ .set = samsung_gpio_set,
.get = samsung_gpio_get,
.direction_input = samsung_gpio_direction_input,
.direction_output = samsung_gpio_direction_output,
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index fcc57c244d16..1cabcbe1401a 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -141,6 +141,7 @@ struct samsung_pin_bank_type {
* @eint_type: type of the external interrupt supported by the bank.
* @eint_mask: bit mask of pins which support EINT function.
* @eint_offset: SoC-specific EINT register or interrupt offset of bank.
+ * @eint_num: total number of eint pins.
* @eint_con_offset: ExynosAuto SoC-specific EINT control register offset of bank.
* @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
* @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
@@ -156,6 +157,7 @@ struct samsung_pin_bank_data {
enum eint_type eint_type;
u32 eint_mask;
u32 eint_offset;
+ u32 eint_num;
u32 eint_con_offset;
u32 eint_mask_offset;
u32 eint_pend_offset;
@@ -174,6 +176,7 @@ struct samsung_pin_bank_data {
* @eint_type: type of the external interrupt supported by the bank.
* @eint_mask: bit mask of pins which support EINT function.
* @eint_offset: SoC-specific EINT register or interrupt offset of bank.
+ * @eint_num: total number of eint pins.
* @eint_con_offset: ExynosAuto SoC-specific EINT register or interrupt offset of bank.
* @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
* @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
@@ -201,6 +204,7 @@ struct samsung_pin_bank {
enum eint_type eint_type;
u32 eint_mask;
u32 eint_offset;
+ u32 eint_num;
u32 eint_con_offset;
u32 eint_mask_offset;
u32 eint_pend_offset;
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index a05570c7d833..1ec22010a3f9 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -181,24 +181,27 @@ static int plgpio_get_value(struct gpio_chip *chip, unsigned offset)
return is_plgpio_set(plgpio->regmap, offset, plgpio->regs.rdata);
}
-static void plgpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
+static int plgpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct plgpio *plgpio = gpiochip_get_data(chip);
if (offset >= chip->ngpio)
- return;
+ return -EINVAL;
/* get correct offset for "offset" pin */
if (plgpio->p2o && (plgpio->p2o_regs & PTO_WDATA_REG)) {
offset = plgpio->p2o(offset);
if (offset == -1)
- return;
+ return -EINVAL;
}
if (value)
plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.wdata);
else
plgpio_reg_reset(plgpio->regmap, offset, plgpio->regs.wdata);
+
+ return 0;
}
static int plgpio_request(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
index 27f99183d994..7fa13f282b85 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
@@ -898,7 +898,7 @@ static const struct pinconf_ops starfive_pinconf_ops = {
.is_generic = true,
};
-static struct pinctrl_desc starfive_desc = {
+static const struct pinctrl_desc starfive_desc = {
.name = DRIVER_NAME,
.pins = starfive_pins,
.npins = ARRAY_SIZE(starfive_pins),
@@ -969,8 +969,8 @@ static int starfive_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return !!(readl_relaxed(din) & BIT(gpio % 32));
}
-static void starfive_gpio_set(struct gpio_chip *gc, unsigned int gpio,
- int value)
+static int starfive_gpio_set(struct gpio_chip *gc, unsigned int gpio,
+ int value)
{
struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
void __iomem *dout = sfp->base + GPON_DOUT_CFG + 8 * gpio;
@@ -979,6 +979,8 @@ static void starfive_gpio_set(struct gpio_chip *gc, unsigned int gpio,
raw_spin_lock_irqsave(&sfp->lock, flags);
writel_relaxed(value, dout);
raw_spin_unlock_irqrestore(&sfp->lock, flags);
+
+ return 0;
}
static int starfive_gpio_set_config(struct gpio_chip *gc, unsigned int gpio,
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
index 1d0d6c224c10..05e3af75b09f 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -608,8 +608,7 @@ static int jh7110_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return !!(readl_relaxed(reg) & BIT(gpio % 32));
}
-static void jh7110_gpio_set(struct gpio_chip *gc,
- unsigned int gpio, int value)
+static int jh7110_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct jh7110_pinctrl *sfp = container_of(gc,
struct jh7110_pinctrl, gc);
@@ -625,6 +624,8 @@ static void jh7110_gpio_set(struct gpio_chip *gc,
dout |= readl_relaxed(reg_dout) & ~mask;
writel_relaxed(dout, reg_dout);
raw_spin_unlock_irqrestore(&sfp->lock, flags);
+
+ return 0;
}
static int jh7110_gpio_set_config(struct gpio_chip *gc,
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
index 2656d3d3ae40..5f67e1ee66dd 100644
--- a/drivers/pinctrl/stm32/Kconfig
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -2,7 +2,7 @@
if ARCH_STM32 || COMPILE_TEST
config PINCTRL_STM32
- bool
+ tristate
depends on OF
select PINMUX
select GENERIC_PINCONF
@@ -53,8 +53,22 @@ config PINCTRL_STM32MP157
select PINCTRL_STM32
config PINCTRL_STM32MP257
- bool "STMicroelectronics STM32MP257 pin control" if COMPILE_TEST && !MACH_STM32MP25
+ tristate "STMicroelectronics STM32MP257 pin control"
depends on OF && HAS_IOMEM
- default MACH_STM32MP25
+ default MACH_STM32MP25 || (ARCH_STM32 && ARM64)
select PINCTRL_STM32
+
+config PINCTRL_STM32_HDP
+ tristate "STMicroelectronics STM32 Hardware Debug Port (HDP) pin control"
+ depends on OF && HAS_IOMEM
+ default ARCH_STM32 && !ARM_SINGLE_ARMV7M
+ select PINMUX
+ select GENERIC_PINCONF
+ select GPIOLIB
+ help
+ The Hardware Debug Port allows the observation of internal signals.
+ It uses configurable multiplexer to route signals in a dedicated observation register.
+ This driver also permits the observation of signals on external SoC pins.
+ It permits the observation of up to 16 signals per HDP line.
+
endif
diff --git a/drivers/pinctrl/stm32/Makefile b/drivers/pinctrl/stm32/Makefile
index 7b17464d8de1..98a1bbc7e16c 100644
--- a/drivers/pinctrl/stm32/Makefile
+++ b/drivers/pinctrl/stm32/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_PINCTRL_STM32H743) += pinctrl-stm32h743.o
obj-$(CONFIG_PINCTRL_STM32MP135) += pinctrl-stm32mp135.o
obj-$(CONFIG_PINCTRL_STM32MP157) += pinctrl-stm32mp157.o
obj-$(CONFIG_PINCTRL_STM32MP257) += pinctrl-stm32mp257.o
+obj-$(CONFIG_PINCTRL_STM32_HDP) += pinctrl-stm32-hdp.o
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c b/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
new file mode 100644
index 000000000000..e91442eb566b
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Clément Le Goffic <clement.legoffic@foss.st.com> for STMicroelectronics.
+ */
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include "../core.h"
+
+#define DRIVER_NAME "stm32_hdp"
+#define HDP_CTRL_ENABLE 1
+#define HDP_CTRL_DISABLE 0
+
+#define HDP_CTRL 0x000
+#define HDP_MUX 0x004
+#define HDP_VAL 0x010
+#define HDP_GPOSET 0x014
+#define HDP_GPOCLR 0x018
+#define HDP_GPOVAL 0x01c
+#define HDP_VERR 0x3f4
+#define HDP_IPIDR 0x3f8
+#define HDP_SIDR 0x3fc
+
+#define HDP_MUX_SHIFT(n) ((n) * 4)
+#define HDP_MUX_MASK(n) (GENMASK(3, 0) << HDP_MUX_SHIFT(n))
+#define HDP_MUX_GPOVAL(n) (0xf << HDP_MUX_SHIFT(n))
+
+#define HDP_PIN 8
+#define HDP_FUNC 16
+#define HDP_FUNC_TOTAL (HDP_PIN * HDP_FUNC)
+
+struct stm32_hdp {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct pinctrl_dev *pctl_dev;
+ struct gpio_chip gpio_chip;
+ u32 mux_conf;
+ u32 gposet_conf;
+ const char * const *func_name;
+};
+
+static const struct pinctrl_pin_desc stm32_hdp_pins[] = {
+ PINCTRL_PIN(0, "HDP0"),
+ PINCTRL_PIN(1, "HDP1"),
+ PINCTRL_PIN(2, "HDP2"),
+ PINCTRL_PIN(3, "HDP3"),
+ PINCTRL_PIN(4, "HDP4"),
+ PINCTRL_PIN(5, "HDP5"),
+ PINCTRL_PIN(6, "HDP6"),
+ PINCTRL_PIN(7, "HDP7"),
+};
+
+static const char * const func_name_mp13[] = {
+ //HDP0 functions:
+ "pwr_pwrwake_sys",
+ "pwr_stop_forbidden",
+ "pwr_stdby_wakeup",
+ "pwr_encomp_vddcore",
+ "bsec_out_sec_niden",
+ "aiec_sys_wakeup",
+ "none",
+ "none",
+ "ddrctrl_lp_req",
+ "pwr_ddr_ret_enable_n",
+ "dts_clk_ptat",
+ "none",
+ "sram3ctrl_tamp_erase_act",
+ "none",
+ "none",
+ "gpoval0",
+ //HDP1 functions:
+ "pwr_sel_vth_vddcpu",
+ "pwr_mpu_ram_lowspeed",
+ "ca7_naxierrirq",
+ "pwr_okin_mr",
+ "bsec_out_sec_dbgen",
+ "aiec_c1_wakeup",
+ "rcc_pwrds_mpu",
+ "none",
+ "ddrctrl_dfi_ctrlupd_req",
+ "ddrctrl_cactive_ddrc_asr",
+ "none",
+ "none",
+ "sram3ctrl_hw_erase_act",
+ "nic400_s0_bready",
+ "none",
+ "gpoval1",
+ //HDP2 functions:
+ "pwr_pwrwake_mpu",
+ "pwr_mpu_clock_disable_ack",
+ "ca7_ndbgreset_i",
+ "none",
+ "bsec_in_rstcore_n",
+ "bsec_out_sec_bsc_dis",
+ "none",
+ "none",
+ "ddrctrl_dfi_init_complete",
+ "ddrctrl_perf_op_is_refresh",
+ "ddrctrl_gskp_dfi_lp_req",
+ "none",
+ "sram3ctrl_sw_erase_act",
+ "nic400_s0_bvalid",
+ "none",
+ "gpoval2",
+ //HDP3 functions:
+ "pwr_sel_vth_vddcore",
+ "pwr_mpu_clock_disable_req",
+ "ca7_npmuirq0",
+ "ca7_nfiqout0",
+ "bsec_out_sec_dftlock",
+ "bsec_out_sec_jtag_dis",
+ "rcc_pwrds_sys",
+ "sram3ctrl_tamp_erase_req",
+ "ddrctrl_stat_ddrc_reg_selfref_type0",
+ "none",
+ "dts_valobus1_0",
+ "dts_valobus2_0",
+ "tamp_potential_tamp_erfcfg",
+ "nic400_s0_wready",
+ "nic400_s0_rready",
+ "gpoval3",
+ //HDP4 functions:
+ "none",
+ "pwr_stop2_active",
+ "ca7_nl2reset_i",
+ "ca7_npreset_varm_i",
+ "bsec_out_sec_dften",
+ "bsec_out_sec_dbgswenable",
+ "eth1_out_pmt_intr_o",
+ "eth2_out_pmt_intr_o",
+ "ddrctrl_stat_ddrc_reg_selfref_type1",
+ "ddrctrl_cactive_0",
+ "dts_valobus1_1",
+ "dts_valobus2_1",
+ "tamp_nreset_sram_ercfg",
+ "nic400_s0_wlast",
+ "nic400_s0_rlast",
+ "gpoval4",
+ //HDP5 functions:
+ "ca7_standbywfil2",
+ "pwr_vth_vddcore_ack",
+ "ca7_ncorereset_i",
+ "ca7_nirqout0",
+ "bsec_in_pwrok",
+ "bsec_out_sec_deviceen",
+ "eth1_out_lpi_intr_o",
+ "eth2_out_lpi_intr_o",
+ "ddrctrl_cactive_ddrc",
+ "ddrctrl_wr_credit_cnt",
+ "dts_valobus1_2",
+ "dts_valobus2_2",
+ "pka_pka_itamp_out",
+ "nic400_s0_wvalid",
+ "nic400_s0_rvalid",
+ "gpoval5",
+ //HDP6 functions:
+ "ca7_standbywfe0",
+ "pwr_vth_vddcpu_ack",
+ "ca7_evento",
+ "none",
+ "bsec_in_tamper_det",
+ "bsec_out_sec_spniden",
+ "eth1_out_mac_speed_o1",
+ "eth2_out_mac_speed_o1",
+ "ddrctrl_csysack_ddrc",
+ "ddrctrl_lpr_credit_cnt",
+ "dts_valobus1_3",
+ "dts_valobus2_3",
+ "saes_tamper_out",
+ "nic400_s0_awready",
+ "nic400_s0_arready",
+ "gpoval6",
+ //HDP7 functions:
+ "ca7_standbywfi0",
+ "pwr_rcc_vcpu_rdy",
+ "ca7_eventi",
+ "ca7_dbgack0",
+ "bsec_out_fuse_ok",
+ "bsec_out_sec_spiden",
+ "eth1_out_mac_speed_o0",
+ "eth2_out_mac_speed_o0",
+ "ddrctrl_csysreq_ddrc",
+ "ddrctrl_hpr_credit_cnt",
+ "dts_valobus1_4",
+ "dts_valobus2_4",
+ "rng_tamper_out",
+ "nic400_s0_awavalid",
+ "nic400_s0_aravalid",
+ "gpoval7",
+};
+
+static const char * const func_name_mp15[] = {
+ //HDP0 functions:
+ "pwr_pwrwake_sys",
+ "cm4_sleepdeep",
+ "pwr_stdby_wkup",
+ "pwr_encomp_vddcore",
+ "bsec_out_sec_niden",
+ "none",
+ "rcc_cm4_sleepdeep",
+ "gpu_dbg7",
+ "ddrctrl_lp_req",
+ "pwr_ddr_ret_enable_n",
+ "dts_clk_ptat",
+ "none",
+ "none",
+ "none",
+ "none",
+ "gpoval0",
+ //HDP1 functions:
+ "pwr_pwrwake_mcu",
+ "cm4_halted",
+ "ca7_naxierrirq",
+ "pwr_okin_mr",
+ "bsec_out_sec_dbgen",
+ "exti_sys_wakeup",
+ "rcc_pwrds_mpu",
+ "gpu_dbg6",
+ "ddrctrl_dfi_ctrlupd_req",
+ "ddrctrl_cactive_ddrc_asr",
+ "none",
+ "none",
+ "none",
+ "none",
+ "none",
+ "gpoval1",
+ //HDP2 functions:
+ "pwr_pwrwake_mpu",
+ "cm4_rxev",
+ "ca7_npmuirq1",
+ "ca7_nfiqout1",
+ "bsec_in_rstcore_n",
+ "exti_c2_wakeup",
+ "rcc_pwrds_mcu",
+ "gpu_dbg5",
+ "ddrctrl_dfi_init_complete",
+ "ddrctrl_perf_op_is_refresh",
+ "ddrctrl_gskp_dfi_lp_req",
+ "none",
+ "none",
+ "none",
+ "none",
+ "gpoval2",
+ //HDP3 functions:
+ "pwr_sel_vth_vddcore",
+ "cm4_txev",
+ "ca7_npmuirq0",
+ "ca7_nfiqout0",
+ "bsec_out_sec_dftlock",
+ "exti_c1_wakeup",
+ "rcc_pwrds_sys",
+ "gpu_dbg4",
+ "ddrctrl_stat_ddrc_reg_selfref_type0",
+ "ddrctrl_cactive_1",
+ "dts_valobus1_0",
+ "dts_valobus2_0",
+ "none",
+ "none",
+ "none",
+ "gpoval3",
+ //HDP4 functions:
+ "pwr_mpu_pdds_not_cstbydis",
+ "cm4_sleeping",
+ "ca7_nreset1",
+ "ca7_nirqout1",
+ "bsec_out_sec_dften",
+ "bsec_out_sec_dbgswenable",
+ "eth_out_pmt_intr_o",
+ "gpu_dbg3",
+ "ddrctrl_stat_ddrc_reg_selfref_type1",
+ "ddrctrl_cactive_0",
+ "dts_valobus1_1",
+ "dts_valobus2_1",
+ "none",
+ "none",
+ "none",
+ "gpoval4",
+ //HDP5 functions:
+ "ca7_standbywfil2",
+ "pwr_vth_vddcore_ack",
+ "ca7_nreset0",
+ "ca7_nirqout0",
+ "bsec_in_pwrok",
+ "bsec_out_sec_deviceen",
+ "eth_out_lpi_intr_o",
+ "gpu_dbg2",
+ "ddrctrl_cactive_ddrc",
+ "ddrctrl_wr_credit_cnt",
+ "dts_valobus1_2",
+ "dts_valobus2_2",
+ "none",
+ "none",
+ "none",
+ "gpoval5",
+ //HDP6 functions:
+ "ca7_standbywfi1",
+ "ca7_standbywfe1",
+ "ca7_evento",
+ "ca7_dbgack1",
+ "none",
+ "bsec_out_sec_spniden",
+ "eth_out_mac_speed_o1",
+ "gpu_dbg1",
+ "ddrctrl_csysack_ddrc",
+ "ddrctrl_lpr_credit_cnt",
+ "dts_valobus1_3",
+ "dts_valobus2_3",
+ "none",
+ "none",
+ "none",
+ "gpoval6",
+ //HDP7 functions:
+ "ca7_standbywfi0",
+ "ca7_standbywfe0",
+ "none",
+ "ca7_dbgack0",
+ "bsec_out_fuse_ok",
+ "bsec_out_sec_spiden",
+ "eth_out_mac_speed_o0",
+ "gpu_dbg0",
+ "ddrctrl_csysreq_ddrc",
+ "ddrctrl_hpr_credit_cnt",
+ "dts_valobus1_4",
+ "dts_valobus2_4",
+ "none",
+ "none",
+ "none",
+ "gpoval7"
+};
+
+static const char * const func_name_mp25[] = {
+ //HDP0 functions:
+ "pwr_pwrwake_sys",
+ "cpu2_sleep_deep",
+ "bsec_out_tst_sdr_unlock_or_disable_scan",
+ "bsec_out_nidenm",
+ "bsec_out_nidena",
+ "cpu2_state_0",
+ "rcc_pwrds_sys",
+ "gpu_dbg7",
+ "ddrss_csysreq_ddrc",
+ "ddrss_dfi_phyupd_req",
+ "cpu3_sleep_deep",
+ "d2_gbl_per_clk_bus_req",
+ "pcie_usb_cxpl_debug_info_ei_0",
+ "pcie_usb_cxpl_debug_info_ei_8",
+ "d3_state_0",
+ "gpoval0",
+ //HDP1 functions:
+ "pwr_pwrwake_cpu2",
+ "cpu2_halted",
+ "cpu2_state_1",
+ "bsec_out_dbgenm",
+ "bsec_out_dbgena",
+ "exti1_sys_wakeup",
+ "rcc_pwrds_cpu2",
+ "gpu_dbg6",
+ "ddrss_csysack_ddrc",
+ "ddrss_dfi_phymstr_req",
+ "cpu3_halted",
+ "d2_gbl_per_dma_req",
+ "pcie_usb_cxpl_debug_info_ei_1",
+ "pcie_usb_cxpl_debug_info_ei_9",
+ "d3_state_1",
+ "gpoval1",
+ //HDP2 functions:
+ "pwr_pwrwake_cpu1",
+ "cpu2_rxev",
+ "cpu1_npumirq1",
+ "cpu1_nfiqout1",
+ "bsec_out_shdbgen",
+ "exti1_cpu2_wakeup",
+ "rcc_pwrds_cpu1",
+ "gpu_dbg5",
+ "ddrss_cactive_ddrc",
+ "ddrss_dfi_lp_req",
+ "cpu3_rxev",
+ "hpdma1_clk_bus_req",
+ "pcie_usb_cxpl_debug_info_ei_2",
+ "pcie_usb_cxpl_debug_info_ei_10",
+ "d3_state_2",
+ "gpoval2",
+ //HDP3 functions:
+ "pwr_sel_vth_vddcpu",
+ "cpu2_txev",
+ "cpu1_npumirq0",
+ "cpu1_nfiqout0",
+ "bsec_out_ddbgen",
+ "exti1_cpu1_wakeup",
+ "cpu3_state_0",
+ "gpu_dbg4",
+ "ddrss_mcdcg_en",
+ "ddrss_dfi_freq_0",
+ "cpu3_txev",
+ "hpdma2_clk_bus_req",
+ "pcie_usb_cxpl_debug_info_ei_3",
+ "pcie_usb_cxpl_debug_info_ei_11",
+ "d1_state_0",
+ "gpoval3",
+ //HDP4 functions:
+ "pwr_sel_vth_vddcore",
+ "cpu2_sleeping",
+ "cpu1_evento",
+ "cpu1_nirqout1",
+ "bsec_out_spnidena",
+ "exti2_d3_wakeup",
+ "eth1_out_pmt_intr_o",
+ "gpu_dbg3",
+ "ddrss_dphycg_en",
+ "ddrss_obsp0",
+ "cpu3_sleeping",
+ "hpdma3_clk_bus_req",
+ "pcie_usb_cxpl_debug_info_ei_4",
+ "pcie_usb_cxpl_debug_info_ei_12",
+ "d1_state_1",
+ "gpoval4",
+ //HDP5 functions:
+ "cpu1_standby_wfil2",
+ "none",
+ "none",
+ "cpu1_nirqout0",
+ "bsec_out_spidena",
+ "exti2_cpu3_wakeup",
+ "eth1_out_lpi_intr_o",
+ "gpu_dbg2",
+ "ddrctrl_dfi_init_start",
+ "ddrss_obsp1",
+ "cpu3_state_1",
+ "d3_gbl_per_clk_bus_req",
+ "pcie_usb_cxpl_debug_info_ei_5",
+ "pcie_usb_cxpl_debug_info_ei_13",
+ "d1_state_2",
+ "gpoval5",
+ //HDP6 functions:
+ "cpu1_standby_wfi1",
+ "cpu1_standby_wfe1",
+ "cpu1_halted1",
+ "cpu1_naxierrirq",
+ "bsec_out_spnidenm",
+ "exti2_cpu2_wakeup",
+ "eth2_out_pmt_intr_o",
+ "gpu_dbg1",
+ "ddrss_dfi_init_complete",
+ "ddrss_obsp2",
+ "d2_state_0",
+ "d3_gbl_per_dma_req",
+ "pcie_usb_cxpl_debug_info_ei_6",
+ "pcie_usb_cxpl_debug_info_ei_14",
+ "cpu1_state_0",
+ "gpoval6",
+ //HDP7 functions:
+ "cpu1_standby_wfi0",
+ "cpu1_standby_wfe0",
+ "cpu1_halted0",
+ "none",
+ "bsec_out_spidenm",
+ "exti2_cpu1__wakeup",
+ "eth2_out_lpi_intr_o",
+ "gpu_dbg0",
+ "ddrss_dfi_ctrlupd_req",
+ "ddrss_obsp3",
+ "d2_state_1",
+ "lpdma1_clk_bus_req",
+ "pcie_usb_cxpl_debug_info_ei_7",
+ "pcie_usb_cxpl_debug_info_ei_15",
+ "cpu1_state_1",
+ "gpoval7",
+};
+
+static const char * const stm32_hdp_pins_group[] = {
+ "HDP0",
+ "HDP1",
+ "HDP2",
+ "HDP3",
+ "HDP4",
+ "HDP5",
+ "HDP6",
+ "HDP7"
+};
+
+static int stm32_hdp_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int stm32_hdp_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(stm32_hdp_pins);
+}
+
+static const char *stm32_hdp_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return stm32_hdp_pins[selector].name;
+}
+
+static int stm32_hdp_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins, unsigned int *num_pins)
+{
+ *pins = &stm32_hdp_pins[selector].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops stm32_hdp_pinctrl_ops = {
+ .get_groups_count = stm32_hdp_pinctrl_get_groups_count,
+ .get_group_name = stm32_hdp_pinctrl_get_group_name,
+ .get_group_pins = stm32_hdp_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int stm32_hdp_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return HDP_FUNC_TOTAL;
+}
+
+static const char *stm32_hdp_pinmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct stm32_hdp *hdp = pinctrl_dev_get_drvdata(pctldev);
+
+ return hdp->func_name[selector];
+}
+
+static int stm32_hdp_pinmux_get_function_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char *const **groups,
+ unsigned int *num_groups)
+{
+ u32 index = selector / HDP_FUNC;
+
+ *groups = &stm32_hdp_pins[index].name;
+ *num_groups = 1;
+
+ return 0;
+}
+
+static int stm32_hdp_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct stm32_hdp *hdp = pinctrl_dev_get_drvdata(pctldev);
+
+ unsigned int pin = stm32_hdp_pins[group_selector].number;
+ u32 mux;
+
+ func_selector %= HDP_FUNC;
+ mux = readl_relaxed(hdp->base + HDP_MUX);
+ mux &= ~HDP_MUX_MASK(pin);
+ mux |= func_selector << HDP_MUX_SHIFT(pin);
+
+ writel_relaxed(mux, hdp->base + HDP_MUX);
+ hdp->mux_conf = mux;
+
+ return 0;
+}
+
+static const struct pinmux_ops stm32_hdp_pinmux_ops = {
+ .get_functions_count = stm32_hdp_pinmux_get_functions_count,
+ .get_function_name = stm32_hdp_pinmux_get_function_name,
+ .get_function_groups = stm32_hdp_pinmux_get_function_groups,
+ .set_mux = stm32_hdp_pinmux_set_mux,
+ .gpio_set_direction = NULL,
+};
+
+static struct pinctrl_desc stm32_hdp_pdesc = {
+ .name = DRIVER_NAME,
+ .pins = stm32_hdp_pins,
+ .npins = ARRAY_SIZE(stm32_hdp_pins),
+ .pctlops = &stm32_hdp_pinctrl_ops,
+ .pmxops = &stm32_hdp_pinmux_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id stm32_hdp_of_match[] = {
+ {
+ .compatible = "st,stm32mp131-hdp",
+ .data = &func_name_mp13,
+ },
+ {
+ .compatible = "st,stm32mp151-hdp",
+ .data = &func_name_mp15,
+ },
+ {
+ .compatible = "st,stm32mp251-hdp",
+ .data = &func_name_mp25,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_hdp_of_match);
+
+static int stm32_hdp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_hdp *hdp;
+ u8 version;
+ int err;
+
+ hdp = devm_kzalloc(dev, sizeof(*hdp), GFP_KERNEL);
+ if (!hdp)
+ return -ENOMEM;
+ hdp->dev = dev;
+
+ platform_set_drvdata(pdev, hdp);
+
+ hdp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hdp->base))
+ return PTR_ERR(hdp->base);
+
+ hdp->func_name = of_device_get_match_data(dev);
+ if (!hdp->func_name)
+ return dev_err_probe(dev, -ENODEV, "No function name provided\n");
+
+ hdp->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(hdp->clk))
+ return dev_err_probe(dev, PTR_ERR(hdp->clk), "No HDP clock provided\n");
+
+ err = devm_pinctrl_register_and_init(dev, &stm32_hdp_pdesc, hdp, &hdp->pctl_dev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register pinctrl\n");
+
+ err = pinctrl_enable(hdp->pctl_dev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable pinctrl\n");
+
+ hdp->gpio_chip.get_direction = stm32_hdp_gpio_get_direction;
+ hdp->gpio_chip.ngpio = ARRAY_SIZE(stm32_hdp_pins);
+ hdp->gpio_chip.can_sleep = true;
+ hdp->gpio_chip.names = stm32_hdp_pins_group;
+
+ err = bgpio_init(&hdp->gpio_chip, dev, 4,
+ hdp->base + HDP_GPOVAL,
+ hdp->base + HDP_GPOSET,
+ hdp->base + HDP_GPOCLR,
+ NULL, NULL, BGPIOF_NO_INPUT);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to init bgpio\n");
+
+
+ err = devm_gpiochip_add_data(dev, &hdp->gpio_chip, hdp);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to add gpiochip\n");
+
+ writel_relaxed(HDP_CTRL_ENABLE, hdp->base + HDP_CTRL);
+
+ version = readl_relaxed(hdp->base + HDP_VERR);
+ dev_dbg(dev, "STM32 HDP version %u.%u initialized\n", version >> 4, version & 0x0f);
+
+ return 0;
+}
+
+static void stm32_hdp_remove(struct platform_device *pdev)
+{
+ struct stm32_hdp *hdp = platform_get_drvdata(pdev);
+
+ writel_relaxed(HDP_CTRL_DISABLE, hdp->base + HDP_CTRL);
+}
+
+static int stm32_hdp_suspend(struct device *dev)
+{
+ struct stm32_hdp *hdp = dev_get_drvdata(dev);
+
+ hdp->gposet_conf = readl_relaxed(hdp->base + HDP_GPOSET);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ clk_disable_unprepare(hdp->clk);
+
+ return 0;
+}
+
+static int stm32_hdp_resume(struct device *dev)
+{
+ struct stm32_hdp *hdp = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(hdp->clk);
+ if (err) {
+ dev_err(dev, "Failed to prepare_enable clk (%d)\n", err);
+ return err;
+ }
+
+ writel_relaxed(HDP_CTRL_ENABLE, hdp->base + HDP_CTRL);
+ writel_relaxed(hdp->gposet_conf, hdp->base + HDP_GPOSET);
+ writel_relaxed(hdp->mux_conf, hdp->base + HDP_MUX);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(stm32_hdp_pm_ops, stm32_hdp_suspend, stm32_hdp_resume);
+
+static struct platform_driver stm32_hdp_driver = {
+ .probe = stm32_hdp_probe,
+ .remove = stm32_hdp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = pm_sleep_ptr(&stm32_hdp_pm_ops),
+ .of_match_table = stm32_hdp_of_match,
+ }
+};
+
+module_platform_driver(stm32_hdp_driver);
+
+MODULE_AUTHOR("Clément Le Goffic");
+MODULE_DESCRIPTION("STMicroelectronics STM32 Hardware Debug Port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index ba49d48c3a1d..823c8fe758e2 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -6,7 +6,9 @@
*
* Heavily based on Mediatek's pinctrl driver
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/gpio/driver.h>
#include <linux/hwspinlock.h>
#include <linux/io.h>
@@ -36,6 +38,8 @@
#include "../pinctrl-utils.h"
#include "pinctrl-stm32.h"
+#define STM32_GPIO_CID1 1
+
#define STM32_GPIO_MODER 0x00
#define STM32_GPIO_TYPER 0x04
#define STM32_GPIO_SPEEDR 0x08
@@ -47,6 +51,8 @@
#define STM32_GPIO_AFRL 0x20
#define STM32_GPIO_AFRH 0x24
#define STM32_GPIO_SECCFGR 0x30
+#define STM32_GPIO_CIDCFGR(x) (0x50 + (0x8 * (x)))
+#define STM32_GPIO_SEMCR(x) (0x54 + (0x8 * (x)))
/* custom bitfield to backup pin status */
#define STM32_GPIO_BKP_MODE_SHIFT 0
@@ -60,6 +66,14 @@
#define STM32_GPIO_BKP_TYPE 10
#define STM32_GPIO_BKP_VAL 11
+#define STM32_GPIO_CIDCFGR_CFEN BIT(0)
+#define STM32_GPIO_CIDCFGR_SEMEN BIT(1)
+#define STM32_GPIO_CIDCFGR_SCID_MASK GENMASK(5, 4)
+#define STM32_GPIO_CIDCFGR_SEMWL_CID1 BIT(16 + STM32_GPIO_CID1)
+
+#define STM32_GPIO_SEMCR_SEM_MUTEX BIT(0)
+#define STM32_GPIO_SEMCR_SEMCID_MASK GENMASK(5, 4)
+
#define STM32_GPIO_PINS_PER_BANK 16
#define STM32_GPIO_IRQ_LINE 16
@@ -77,6 +91,7 @@ static const char * const stm32_gpio_functions[] = {
"af8", "af9", "af10",
"af11", "af12", "af13",
"af14", "af15", "analog",
+ "reserved",
};
struct stm32_pinctrl_group {
@@ -98,6 +113,7 @@ struct stm32_gpio_bank {
u32 pin_backup[STM32_GPIO_PINS_PER_BANK];
u8 irq_type[STM32_GPIO_PINS_PER_BANK];
bool secure_control;
+ bool rif_control;
};
struct stm32_pinctrl {
@@ -122,6 +138,8 @@ struct stm32_pinctrl {
spinlock_t irqmux_lock;
};
+static void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode, u32 *alt);
+
static inline int stm32_gpio_pin(int gpio)
{
return gpio % STM32_GPIO_PINS_PER_BANK;
@@ -192,6 +210,80 @@ static void stm32_gpio_backup_bias(struct stm32_gpio_bank *bank, u32 offset,
bank->pin_backup[offset] |= bias << STM32_GPIO_BKP_PUPD_SHIFT;
}
+/* RIF functions */
+
+static bool stm32_gpio_rif_valid(struct stm32_gpio_bank *bank, unsigned int gpio_nr)
+{
+ u32 cid;
+
+ cid = readl_relaxed(bank->base + STM32_GPIO_CIDCFGR(gpio_nr));
+
+ if (!(cid & STM32_GPIO_CIDCFGR_CFEN))
+ return true;
+
+ if (!(cid & STM32_GPIO_CIDCFGR_SEMEN)) {
+ if (FIELD_GET(STM32_GPIO_CIDCFGR_SCID_MASK, cid) == STM32_GPIO_CID1)
+ return true;
+
+ return false;
+ }
+
+ if (cid & STM32_GPIO_CIDCFGR_SEMWL_CID1)
+ return true;
+
+ return false;
+}
+
+static bool stm32_gpio_rif_acquire_semaphore(struct stm32_gpio_bank *bank, unsigned int gpio_nr)
+{
+ u32 cid, sem;
+
+ cid = readl_relaxed(bank->base + STM32_GPIO_CIDCFGR(gpio_nr));
+
+ if (!(cid & STM32_GPIO_CIDCFGR_CFEN))
+ return true;
+
+ if (!(cid & STM32_GPIO_CIDCFGR_SEMEN)) {
+ if (FIELD_GET(STM32_GPIO_CIDCFGR_SCID_MASK, cid) == STM32_GPIO_CID1)
+ return true;
+
+ return false;
+ }
+
+ if (!(cid & STM32_GPIO_CIDCFGR_SEMWL_CID1))
+ return false;
+
+ sem = readl_relaxed(bank->base + STM32_GPIO_SEMCR(gpio_nr));
+ if (sem & STM32_GPIO_SEMCR_SEM_MUTEX) {
+ if (FIELD_GET(STM32_GPIO_SEMCR_SEMCID_MASK, sem) == STM32_GPIO_CID1)
+ return true;
+
+ return false;
+ }
+
+ writel_relaxed(STM32_GPIO_SEMCR_SEM_MUTEX, bank->base + STM32_GPIO_SEMCR(gpio_nr));
+
+ sem = readl_relaxed(bank->base + STM32_GPIO_SEMCR(gpio_nr));
+ if (sem & STM32_GPIO_SEMCR_SEM_MUTEX &&
+ FIELD_GET(STM32_GPIO_SEMCR_SEMCID_MASK, sem) == STM32_GPIO_CID1)
+ return true;
+
+ return false;
+}
+
+static void stm32_gpio_rif_release_semaphore(struct stm32_gpio_bank *bank, unsigned int gpio_nr)
+{
+ u32 cid;
+
+ cid = readl_relaxed(bank->base + STM32_GPIO_CIDCFGR(gpio_nr));
+
+ if (!(cid & STM32_GPIO_CIDCFGR_CFEN))
+ return;
+
+ if (cid & STM32_GPIO_CIDCFGR_SEMEN)
+ writel_relaxed(0, bank->base + STM32_GPIO_SEMCR(gpio_nr));
+}
+
/* GPIO functions */
static inline void __stm32_gpio_set(struct stm32_gpio_bank *bank,
@@ -218,9 +310,26 @@ static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
+ if (bank->rif_control) {
+ if (!stm32_gpio_rif_acquire_semaphore(bank, offset)) {
+ dev_err(pctl->dev, "pin %d not available.\n", pin);
+ return -EINVAL;
+ }
+ }
+
return pinctrl_gpio_request(chip, offset);
}
+static void stm32_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+ pinctrl_gpio_free(chip, offset);
+
+ if (bank->rif_control)
+ stm32_gpio_rif_release_semaphore(bank, offset);
+}
+
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
@@ -304,14 +413,27 @@ static int stm32_gpio_init_valid_mask(struct gpio_chip *chip,
}
}
+ if (bank->rif_control) {
+ for (i = 0; i < ngpios; i++) {
+ if (!test_bit(i, valid_mask))
+ continue;
+
+ if (stm32_gpio_rif_valid(bank, i))
+ continue;
+
+ dev_dbg(pctl->dev, "RIF semaphore ownership conflict, GPIO %u", i);
+ clear_bit(i, valid_mask);
+ }
+ }
+
return 0;
}
static const struct gpio_chip stm32_gpio_template = {
.request = stm32_gpio_request,
- .free = pinctrl_gpio_free,
+ .free = stm32_gpio_free,
.get = stm32_gpio_get,
- .set_rv = stm32_gpio_set,
+ .set = stm32_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = stm32_gpio_direction_output,
.to_irq = stm32_gpio_to_irq,
@@ -411,6 +533,7 @@ static struct irq_chip stm32_gpio_irq_chip = {
.irq_set_wake = irq_chip_set_wake_parent,
.irq_request_resources = stm32_gpio_irq_request_resources,
.irq_release_resources = stm32_gpio_irq_release_resources,
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL,
};
static int stm32_gpio_domain_translate(struct irq_domain *d,
@@ -541,6 +664,9 @@ static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl,
if (pin->pin.number != pin_num)
continue;
+ if (fnum == STM32_PIN_RSVD)
+ return true;
+
for (k = 0; k < STM32_CONFIG_NUM; k++) {
if (func->num == fnum)
return true;
@@ -798,8 +924,7 @@ unlock:
return err;
}
-void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode,
- u32 *alt)
+static void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode, u32 *alt)
{
u32 val;
int alt_shift = (pin % 8) * 4;
@@ -841,6 +966,11 @@ static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
}
+ if (function == STM32_PIN_RSVD) {
+ dev_dbg(pctl->dev, "Reserved pins, skipping HW update.\n");
+ return 0;
+ }
+
bank = gpiochip_get_data(range->gc);
pin = stm32_gpio_pin(g->pin);
@@ -1348,6 +1478,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
bank->bank_nr = bank_nr;
bank->bank_ioport_nr = bank_ioport_nr;
bank->secure_control = pctl->match_data->secure_control;
+ bank->rif_control = pctl->match_data->rif_control;
spin_lock_init(&bank->lock);
if (pctl->domain) {
@@ -1664,6 +1795,7 @@ err_register:
clk_bulk_disable_unprepare(banks, pctl->clks);
return ret;
}
+EXPORT_SYMBOL(stm32_pctl_probe);
static int __maybe_unused stm32_pinctrl_restore_gpio_regs(
struct stm32_pinctrl *pctl, u32 pin)
@@ -1736,6 +1868,7 @@ int __maybe_unused stm32_pinctrl_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL(stm32_pinctrl_suspend);
int __maybe_unused stm32_pinctrl_resume(struct device *dev)
{
@@ -1752,3 +1885,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL(stm32_pinctrl_resume);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@foss.st.com>");
+MODULE_DESCRIPTION("STM32 core pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
index 5e5de92ddd58..b98a4141bf2c 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.h
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -17,7 +17,8 @@
#define STM32_PIN_GPIO 0
#define STM32_PIN_AF(x) ((x) + 1)
#define STM32_PIN_ANALOG (STM32_PIN_AF(15) + 1)
-#define STM32_CONFIG_NUM (STM32_PIN_ANALOG + 1)
+#define STM32_PIN_RSVD (STM32_PIN_ANALOG + 1)
+#define STM32_CONFIG_NUM (STM32_PIN_RSVD + 1)
/* package information */
#define STM32MP_PKG_AA BIT(0)
@@ -63,14 +64,25 @@ struct stm32_pinctrl_match_data {
const struct stm32_desc_pin *pins;
const unsigned int npins;
bool secure_control;
+ bool rif_control;
};
-struct stm32_gpio_bank;
-
+/**
+ * stm32_pctl_probe() - Common probe for stm32 pinctrl drivers.
+ * @pdev: Pinctrl platform device.
+ */
int stm32_pctl_probe(struct platform_device *pdev);
-void stm32_pmx_get_mode(struct stm32_gpio_bank *bank,
- int pin, u32 *mode, u32 *alt);
+
+/**
+ * stm32_pinctrl_suspend() - Common suspend for stm32 pinctrl drivers.
+ * @dev: Pinctrl device.
+ */
int stm32_pinctrl_suspend(struct device *dev);
+
+/**
+ * stm32_pinctrl_resume() - Common resume for stm32 pinctrl drivers.
+ * @dev: Pinctrl device.
+ */
int stm32_pinctrl_resume(struct device *dev);
#endif /* __PINCTRL_STM32_H */
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
index 23aebd4695e9..d226de524bfc 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
@@ -4,6 +4,7 @@
* Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -2542,11 +2543,15 @@ static const struct stm32_desc_pin stm32mp257_z_pins[] = {
static struct stm32_pinctrl_match_data stm32mp257_match_data = {
.pins = stm32mp257_pins,
.npins = ARRAY_SIZE(stm32mp257_pins),
+ .secure_control = true,
+ .rif_control = true,
};
static struct stm32_pinctrl_match_data stm32mp257_z_match_data = {
.pins = stm32mp257_z_pins,
.npins = ARRAY_SIZE(stm32mp257_z_pins),
+ .secure_control = true,
+ .rif_control = true,
};
static const struct of_device_id stm32mp257_pctrl_match[] = {
@@ -2560,6 +2565,7 @@ static const struct of_device_id stm32mp257_pctrl_match[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, stm32mp257_pctrl_match);
static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume)
@@ -2573,9 +2579,8 @@ static struct platform_driver stm32mp257_pinctrl_driver = {
.pm = &stm32_pinctrl_dev_pm_ops,
},
};
+module_platform_driver(stm32mp257_pinctrl_driver);
-static int __init stm32mp257_pinctrl_init(void)
-{
- return platform_driver_register(&stm32mp257_pinctrl_driver);
-}
-arch_initcall(stm32mp257_pinctrl_init);
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP257 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
index ae156f779a16..3e924aa86cc2 100644
--- a/drivers/pinctrl/sunplus/sppctl.c
+++ b/drivers/pinctrl/sunplus/sppctl.c
@@ -461,13 +461,15 @@ static int sppctl_gpio_get(struct gpio_chip *chip, unsigned int offset)
return (reg & BIT(bit_off)) ? 1 : 0;
}
-static void sppctl_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
+static int sppctl_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct sppctl_gpio_chip *spp_gchip = gpiochip_get_data(chip);
u32 reg_off, reg;
reg = sppctl_prep_moon_reg_and_offset(offset, &reg_off, val);
sppctl_gpio_out_writel(spp_gchip, reg, reg_off);
+
+ return 0;
}
static int sppctl_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
index 696d7dd8d87b..2e3bd36a4410 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c
@@ -45,7 +45,7 @@ static const struct sunxi_desc_pin sun8i_v3s_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* D1 */
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PB_EINT3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index bf8612d72daa..0fb057a07dcc 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -408,6 +408,7 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
const char *function, *pin_prop;
const char *group;
int ret, npins, nmaps, configlen = 0, i = 0;
+ struct pinctrl_map *new_map;
*map = NULL;
*num_maps = 0;
@@ -482,9 +483,13 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
* We know have the number of maps we need, we can resize our
* map array
*/
- *map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL);
- if (!*map)
- return -ENOMEM;
+ new_map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL);
+ if (!new_map) {
+ ret = -ENOMEM;
+ goto err_free_map;
+ }
+
+ *map = new_map;
return 0;
@@ -955,8 +960,8 @@ static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
return val;
}
-static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
- unsigned offset, int value)
+static int sunxi_pinctrl_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
u32 reg, shift, mask, val;
@@ -976,6 +981,8 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
writel(val, pctl->membase + reg);
raw_spin_unlock_irqrestore(&pctl->lock, flags);
+
+ return 0;
}
static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
@@ -1646,7 +1653,7 @@ int sunxi_pinctrl_init_with_flags(struct platform_device *pdev,
}
}
- pctl->domain = irq_domain_create_linear(of_fwnode_handle(node),
+ pctl->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev),
pctl->desc->irq_banks * IRQ_PER_BANK,
&sunxi_pinctrl_irq_domain_ops, pctl);
if (!pctl->domain) {
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index fce92111a32e..7213a8d4bf09 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -507,8 +507,8 @@ static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
return !!(readl_relaxed(data->base + reg_data_in) & BIT(bit));
}
-static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
- int val)
+static int wmt_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct wmt_pinctrl_data *data = gpiochip_get_data(chip);
u32 bank = WMT_BANK_FROM_PIN(offset);
@@ -517,19 +517,26 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
if (reg_data_out == NO_REG) {
dev_err(data->dev, "no data out register defined\n");
- return;
+ return -EINVAL;
}
if (val)
wmt_setbits(data, reg_data_out, BIT(bit));
else
wmt_clearbits(data, reg_data_out, BIT(bit));
+
+ return 0;
}
static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
- wmt_gpio_set_value(chip, offset, value);
+ int ret;
+
+ ret = wmt_gpio_set_value(chip, offset, value);
+ if (ret)
+ return ret;
+
return pinctrl_gpio_direction_output(chip, offset);
}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
index 77184c8b42ea..7f0ada4fa606 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-gpio.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
@@ -1024,8 +1024,8 @@ int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
mcu->gc.direction_output = omnia_gpio_direction_output;
mcu->gc.get = omnia_gpio_get;
mcu->gc.get_multiple = omnia_gpio_get_multiple;
- mcu->gc.set_rv = omnia_gpio_set;
- mcu->gc.set_multiple_rv = omnia_gpio_set_multiple;
+ mcu->gc.set = omnia_gpio_set;
+ mcu->gc.set_multiple = omnia_gpio_set_multiple;
mcu->gc.init_valid_mask = omnia_gpio_init_valid_mask;
mcu->gc.can_sleep = true;
mcu->gc.names = omnia_mcu_gpio_names;
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index 54986a752f7d..a94009203e01 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -504,7 +504,7 @@ static int init_acpi(struct device *dev)
dev_set_drvdata(dev, &hsmp_pdev->sock[sock_ind]);
- return ret;
+ return 0;
}
static const struct bin_attribute hsmp_metric_tbl_attr = {
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 885e2f8136fd..19f82c1d3090 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -356,6 +356,11 @@ ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size)
if (!sock || !buf)
return -EINVAL;
+ if (!sock->metric_tbl_addr) {
+ dev_err(sock->dev, "Metrics table address not available\n");
+ return -ENOMEM;
+ }
+
/* Do not support lseek(), also don't allow more than the size of metric table */
if (size != sizeof(struct hsmp_metric_table)) {
dev_err(sock->dev, "Wrong buffer size\n");
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index ded4c84f5ed1..7ffc659b2794 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -28,10 +28,15 @@ static struct quirk_entry quirk_spurious_8042 = {
.spurious_8042 = true,
};
+static struct quirk_entry quirk_s2idle_spurious_8042 = {
+ .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH,
+ .spurious_8042 = true,
+};
+
static const struct dmi_system_id fwbug_list[] = {
{
.ident = "L14 Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
@@ -39,7 +44,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14s Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
@@ -47,7 +52,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "X13 Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
@@ -55,7 +60,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14 Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
@@ -63,7 +68,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14 Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
@@ -71,7 +76,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14 Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
@@ -79,7 +84,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14s Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
@@ -87,7 +92,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14s Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
@@ -95,7 +100,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "P14s Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
@@ -103,7 +108,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "P14s Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
@@ -111,7 +116,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "P14s Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
@@ -152,7 +157,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad 1 14AMN7",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
@@ -160,7 +165,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad 1 15AMN7",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
@@ -168,7 +173,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad 1 15AMN7",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
@@ -176,7 +181,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad Slim 3 14AMN8",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
@@ -184,7 +189,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad Slim 3 15AMN8",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
@@ -193,7 +198,7 @@ static const struct dmi_system_id fwbug_list[] = {
/* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */
{
.ident = "Lenovo Yoga 6 13ALC6",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82ND"),
@@ -202,7 +207,7 @@ static const struct dmi_system_id fwbug_list[] = {
/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
{
.ident = "HP Laptop 15s-eq2xxx",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
@@ -285,6 +290,16 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
{
const struct dmi_system_id *dmi_id;
+ /*
+ * IRQ1 may cause an interrupt during resume even without a keyboard
+ * press.
+ *
+ * Affects Renoir, Cezanne and Barcelo SoCs
+ *
+ * A solution is available in PMFW 64.66.0, but it must be activated by
+ * SBIOS. If SBIOS is known to have the fix a quirk can be added for
+ * a given system to avoid workaround.
+ */
if (dev->cpu_id == AMD_CPU_ID_CZN)
dev->disable_8042_wakeup = true;
@@ -295,6 +310,5 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
if (dev->quirks->s2idle_bug_mmio)
pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
dmi_id->ident);
- if (dev->quirks->spurious_8042)
- dev->disable_8042_wakeup = true;
+ dev->disable_8042_wakeup = dev->quirks->spurious_8042;
}
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 0b9b23eb7c2c..bd318fd02ccf 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -530,19 +530,6 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev)
{
struct device *d;
- int rc;
-
- /* cezanne platform firmware has a fix in 64.66.0 */
- if (pdev->cpu_id == AMD_CPU_ID_CZN) {
- if (!pdev->major) {
- rc = amd_pmc_get_smu_version(pdev);
- if (rc)
- return rc;
- }
-
- if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
- return 0;
- }
d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
if (!d)
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
index bb3393bbfb89..28012eebdb10 100644
--- a/drivers/platform/x86/barco-p50-gpio.c
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -316,7 +316,7 @@ static int p50_gpio_probe(struct platform_device *pdev)
p50->gc.base = -1;
p50->gc.get_direction = p50_gpio_get_direction;
p50->gc.get = p50_gpio_get;
- p50->gc.set_rv = p50_gpio_set;
+ p50->gc.set = p50_gpio_set;
/* reset mbox */
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index 01c72b91a50d..444786102f02 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -39,6 +39,7 @@ struct token_sysfs_data {
struct smbios_device {
struct list_head list;
struct device *device;
+ int priority;
int (*call_fn)(struct calling_interface_buffer *arg);
};
@@ -145,7 +146,7 @@ int dell_smbios_error(int value)
}
EXPORT_SYMBOL_GPL(dell_smbios_error);
-int dell_smbios_register_device(struct device *d, void *call_fn)
+int dell_smbios_register_device(struct device *d, int priority, void *call_fn)
{
struct smbios_device *priv;
@@ -154,6 +155,7 @@ int dell_smbios_register_device(struct device *d, void *call_fn)
return -ENOMEM;
get_device(d);
priv->device = d;
+ priv->priority = priority;
priv->call_fn = call_fn;
mutex_lock(&smbios_mutex);
list_add_tail(&priv->list, &smbios_device_list);
@@ -292,28 +294,25 @@ EXPORT_SYMBOL_GPL(dell_smbios_call_filter);
int dell_smbios_call(struct calling_interface_buffer *buffer)
{
- int (*call_fn)(struct calling_interface_buffer *) = NULL;
- struct device *selected_dev = NULL;
+ struct smbios_device *selected = NULL;
struct smbios_device *priv;
int ret;
mutex_lock(&smbios_mutex);
list_for_each_entry(priv, &smbios_device_list, list) {
- if (!selected_dev || priv->device->id >= selected_dev->id) {
- dev_dbg(priv->device, "Trying device ID: %d\n",
- priv->device->id);
- call_fn = priv->call_fn;
- selected_dev = priv->device;
+ if (!selected || priv->priority >= selected->priority) {
+ dev_dbg(priv->device, "Trying device ID: %d\n", priv->priority);
+ selected = priv;
}
}
- if (!selected_dev) {
+ if (!selected) {
ret = -ENODEV;
pr_err("No dell-smbios drivers are loaded\n");
goto out_smbios_call;
}
- ret = call_fn(buffer);
+ ret = selected->call_fn(buffer);
out_smbios_call:
mutex_unlock(&smbios_mutex);
diff --git a/drivers/platform/x86/dell/dell-smbios-smm.c b/drivers/platform/x86/dell/dell-smbios-smm.c
index 4d375985c85f..7055e2c40f34 100644
--- a/drivers/platform/x86/dell/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell/dell-smbios-smm.c
@@ -125,8 +125,7 @@ int init_dell_smbios_smm(void)
if (ret)
goto fail_platform_device_add;
- ret = dell_smbios_register_device(&platform_device->dev,
- &dell_smbios_smm_call);
+ ret = dell_smbios_register_device(&platform_device->dev, 0, &dell_smbios_smm_call);
if (ret)
goto fail_register;
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index ae9012549560..a7dca8c59d60 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -264,9 +264,7 @@ static int dell_smbios_wmi_probe(struct wmi_device *wdev, const void *context)
if (ret)
return ret;
- /* ID is used by dell-smbios to set priority of drivers */
- wdev->dev.id = 1;
- ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call);
+ ret = dell_smbios_register_device(&wdev->dev, 1, &dell_smbios_wmi_call);
if (ret)
return ret;
diff --git a/drivers/platform/x86/dell/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h
index 77baa15eb523..f421b8533a9e 100644
--- a/drivers/platform/x86/dell/dell-smbios.h
+++ b/drivers/platform/x86/dell/dell-smbios.h
@@ -64,7 +64,7 @@ struct calling_interface_structure {
struct calling_interface_token tokens[];
} __packed;
-int dell_smbios_register_device(struct device *d, void *call_fn);
+int dell_smbios_register_device(struct device *d, int priority, void *call_fn);
void dell_smbios_unregister_device(struct device *d);
int dell_smbios_error(int value);
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index db5fdee2109c..60c8ac8d902c 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -92,9 +92,9 @@ static const char * const victus_thermal_profile_boards[] = {
"8A25"
};
-/* DMI Board names of Victus 16-s1000 laptops */
+/* DMI Board names of Victus 16-r1000 and Victus 16-s1000 laptops */
static const char * const victus_s_thermal_profile_boards[] = {
- "8C9C"
+ "8C99", "8C9C"
};
enum hp_wmi_radio {
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 9bc24ed19c64..6f5629dc3f8d 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -193,7 +193,7 @@ static int int0002_probe(struct platform_device *pdev)
chip->parent = dev;
chip->owner = THIS_MODULE;
chip->get = int0002_gpio_get;
- chip->set_rv = int0002_gpio_set;
+ chip->set = int0002_gpio_set;
chip->direction_input = int0002_gpio_get;
chip->direction_output = int0002_gpio_direction_output;
chip->base = -1;
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 4c0aed6e626f..bdfb8a800c54 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -193,6 +193,10 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3
*con_id = "privacy-led";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
+ case INT3472_GPIO_TYPE_HOTPLUG_DETECT:
+ *con_id = "hpd";
+ *gpio_flags = GPIO_ACTIVE_HIGH;
+ break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
*con_id = "avdd";
*gpio_flags = GPIO_ACTIVE_HIGH;
@@ -223,6 +227,7 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3
* 0x0b Power enable
* 0x0c Clock enable
* 0x0d Privacy LED
+ * 0x13 Hotplug detect
*
* There are some known platform specific quirks where that does not quite
* hold up; for example where a pin with type 0x01 (Power down) is mapped to
@@ -292,6 +297,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
+ case INT3472_GPIO_TYPE_HOTPLUG_DETECT:
ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, con_id, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index 6df55c8e16b7..bfcf92aa4d69 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -192,9 +192,14 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu
static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
{
struct tpmi_uncore_cluster_info *cluster_info;
+ struct tpmi_uncore_struct *uncore_root;
u64 control;
cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+ uncore_root = cluster_info->uncore_root;
+
+ if (uncore_root->write_blocked)
+ return -EPERM;
if (cluster_info->root_domain)
return -ENODATA;
diff --git a/drivers/platform/x86/portwell-ec.c b/drivers/platform/x86/portwell-ec.c
index 3e019c51913e..322f296e9315 100644
--- a/drivers/platform/x86/portwell-ec.c
+++ b/drivers/platform/x86/portwell-ec.c
@@ -86,7 +86,7 @@ static int pwec_gpio_get(struct gpio_chip *chip, unsigned int offset)
return pwec_read(PORTWELL_GPIO_VAL_REG) & BIT(offset) ? 1 : 0;
}
-static int pwec_gpio_set_rv(struct gpio_chip *chip, unsigned int offset, int val)
+static int pwec_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
u8 tmp = pwec_read(PORTWELL_GPIO_VAL_REG);
@@ -130,7 +130,7 @@ static struct gpio_chip pwec_gpio_chip = {
.direction_input = pwec_gpio_direction_input,
.direction_output = pwec_gpio_direction_output,
.get = pwec_gpio_get,
- .set_rv = pwec_gpio_set_rv,
+ .set = pwec_gpio_set,
.base = -1,
.ngpio = PORTWELL_GPIO_PINS,
};
diff --git a/drivers/platform/x86/silicom-platform.c b/drivers/platform/x86/silicom-platform.c
index 63b5da410ed5..266f7bc5e416 100644
--- a/drivers/platform/x86/silicom-platform.c
+++ b/drivers/platform/x86/silicom-platform.c
@@ -466,7 +466,7 @@ static struct gpio_chip silicom_gpio_chip = {
.direction_input = silicom_gpio_direction_input,
.direction_output = silicom_gpio_direction_output,
.get = silicom_gpio_get,
- .set_rv = silicom_gpio_set,
+ .set = silicom_gpio_set,
.base = -1,
.ngpio = ARRAY_SIZE(plat_0222_gpio_channels),
.names = plat_0222_gpio_names,
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 733d81262159..77ea3129c708 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -227,6 +227,7 @@ config POWER_RESET_ST
config POWER_RESET_TORADEX_EC
tristate "Toradex Embedded Controller power-off and reset driver"
+ depends on ARCH_MXC || COMPILE_TEST
depends on I2C
select REGMAP_I2C
help
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index e9fe08ee3812..ecf15694f925 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -129,7 +129,7 @@ static void at91_wakeup_status(struct platform_device *pdev)
else if (SHDW_RTTWK(reg, &rcfg->shdwc))
reason = "RTT";
- pr_info("AT91: Wake-Up source: %s\n", reason);
+ dev_info(&pdev->dev, "Wake-Up source: %s\n", reason);
}
static void at91_poweroff(void)
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
index 1344b361a475..7e108982a582 100644
--- a/drivers/power/reset/qcom-pon.c
+++ b/drivers/power/reset/qcom-pon.c
@@ -19,7 +19,7 @@
#define NO_REASON_SHIFT 0
-struct pm8916_pon {
+struct qcom_pon {
struct device *dev;
struct regmap *regmap;
u32 baseaddr;
@@ -27,11 +27,11 @@ struct pm8916_pon {
long reason_shift;
};
-static int pm8916_reboot_mode_write(struct reboot_mode_driver *reboot,
+static int qcom_pon_reboot_mode_write(struct reboot_mode_driver *reboot,
unsigned int magic)
{
- struct pm8916_pon *pon = container_of
- (reboot, struct pm8916_pon, reboot_mode);
+ struct qcom_pon *pon = container_of
+ (reboot, struct qcom_pon, reboot_mode);
int ret;
ret = regmap_update_bits(pon->regmap,
@@ -44,9 +44,9 @@ static int pm8916_reboot_mode_write(struct reboot_mode_driver *reboot,
return ret;
}
-static int pm8916_pon_probe(struct platform_device *pdev)
+static int qcom_pon_probe(struct platform_device *pdev)
{
- struct pm8916_pon *pon;
+ struct qcom_pon *pon;
long reason_shift;
int error;
@@ -72,7 +72,7 @@ static int pm8916_pon_probe(struct platform_device *pdev)
if (reason_shift != NO_REASON_SHIFT) {
pon->reboot_mode.dev = &pdev->dev;
pon->reason_shift = reason_shift;
- pon->reboot_mode.write = pm8916_reboot_mode_write;
+ pon->reboot_mode.write = qcom_pon_reboot_mode_write;
error = devm_reboot_mode_register(&pdev->dev, &pon->reboot_mode);
if (error) {
dev_err(&pdev->dev, "can't register reboot mode\n");
@@ -85,7 +85,7 @@ static int pm8916_pon_probe(struct platform_device *pdev)
return devm_of_platform_populate(&pdev->dev);
}
-static const struct of_device_id pm8916_pon_id_table[] = {
+static const struct of_device_id qcom_pon_id_table[] = {
{ .compatible = "qcom,pm8916-pon", .data = (void *)GEN1_REASON_SHIFT },
{ .compatible = "qcom,pm8941-pon", .data = (void *)NO_REASON_SHIFT },
{ .compatible = "qcom,pms405-pon", .data = (void *)GEN1_REASON_SHIFT },
@@ -93,16 +93,16 @@ static const struct of_device_id pm8916_pon_id_table[] = {
{ .compatible = "qcom,pmk8350-pon", .data = (void *)GEN2_REASON_SHIFT },
{ }
};
-MODULE_DEVICE_TABLE(of, pm8916_pon_id_table);
+MODULE_DEVICE_TABLE(of, qcom_pon_id_table);
-static struct platform_driver pm8916_pon_driver = {
- .probe = pm8916_pon_probe,
+static struct platform_driver qcom_pon_driver = {
+ .probe = qcom_pon_probe,
.driver = {
- .name = "pm8916-pon",
- .of_match_table = pm8916_pon_id_table,
+ .name = "qcom-pon",
+ .of_match_table = qcom_pon_id_table,
},
};
-module_platform_driver(pm8916_pon_driver);
+module_platform_driver(qcom_pon_driver);
-MODULE_DESCRIPTION("pm8916 Power On driver");
+MODULE_DESCRIPTION("Qualcomm Power On driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 4f5f8e3507f8..f943c9150b32 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -120,5 +120,5 @@ obj-$(CONFIG_BATTERY_ACER_A500) += acer_a500_battery.o
obj-$(CONFIG_BATTERY_SURFACE) += surface_battery.o
obj-$(CONFIG_CHARGER_SURFACE) += surface_charger.o
obj-$(CONFIG_BATTERY_UG3105) += ug3105_battery.o
-obj-$(CONFIG_CHARGER_QCOM_SMB2) += qcom_pmi8998_charger.o
+obj-$(CONFIG_CHARGER_QCOM_SMB2) += qcom_smbx.o
obj-$(CONFIG_FUEL_GAUGE_MM8013) += mm8013.o
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 9e3b9181ee76..917c26ee56bc 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1674,7 +1674,7 @@ static int bq2415x_probe(struct i2c_client *client)
/* Query for initial reported_mode and set it */
if (bq->nb.notifier_call) {
if (np) {
- notify_psy = power_supply_get_by_phandle(np,
+ notify_psy = power_supply_get_by_reference(of_fwnode_handle(np),
"ti,usb-charger-detection");
if (IS_ERR(notify_psy))
notify_psy = NULL;
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 1867beadd7af..e1510c7fdab3 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -504,7 +504,6 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
else
count = sysfs_emit(buf, "%hhx\n", v);
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
return count;
@@ -535,7 +534,6 @@ static ssize_t bq24190_sysfs_store(struct device *dev,
if (ret)
count = ret;
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
return count;
@@ -562,7 +560,6 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
else
ret = bq24190_charger_set_charge_type(bdi, &val);
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
return ret;
@@ -605,7 +602,6 @@ static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
}
out:
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
return ret;
@@ -638,7 +634,6 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev)
BQ24190_REG_POC_CHG_CONFIG_MASK,
BQ24190_REG_POC_CHG_CONFIG_SHIFT, &val);
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
if (ret)
@@ -675,7 +670,6 @@ static int bq24296_vbus_is_enabled(struct regulator_dev *dev)
BQ24296_REG_POC_OTG_CONFIG_MASK,
BQ24296_REG_POC_OTG_CONFIG_SHIFT, &val);
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
if (ret)
@@ -1376,7 +1370,6 @@ static int bq24190_charger_get_property(struct power_supply *psy,
ret = -ENODATA;
}
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
return ret;
@@ -1419,7 +1412,6 @@ static int bq24190_charger_set_property(struct power_supply *psy,
ret = -EINVAL;
}
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
return ret;
@@ -1682,7 +1674,6 @@ static int bq24190_battery_get_property(struct power_supply *psy,
ret = -ENODATA;
}
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
return ret;
@@ -1713,7 +1704,6 @@ static int bq24190_battery_set_property(struct power_supply *psy,
ret = -EINVAL;
}
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
return ret;
@@ -1861,7 +1851,6 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
return IRQ_NONE;
}
bq24190_check_status(bdi);
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
bdi->irq_event = false;
@@ -1983,6 +1972,8 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
v = info->constant_charge_voltage_max_uv;
if (v >= bq24190_cvc_vreg_values[0] && v <= bdi->vreg_max)
bdi->vreg = bdi->vreg_max = v;
+
+ power_supply_put_battery_info(bdi->charger, info);
}
return 0;
@@ -2186,7 +2177,6 @@ static int bq24190_probe(struct i2c_client *client)
enable_irq_wake(client->irq);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -2273,7 +2263,6 @@ static __maybe_unused int bq24190_pm_suspend(struct device *dev)
bq24190_register_reset(bdi);
if (error >= 0) {
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
}
@@ -2298,7 +2287,6 @@ static __maybe_unused int bq24190_pm_resume(struct device *dev)
bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
if (error >= 0) {
- pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
}
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index 9f9b6019f8e1..ae14162f017a 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -387,7 +387,7 @@ static void bq256xx_usb_work(struct work_struct *data)
}
}
-static struct reg_default bq2560x_reg_defs[] = {
+static const struct reg_default bq2560x_reg_defs[] = {
{BQ256XX_INPUT_CURRENT_LIMIT, 0x17},
{BQ256XX_CHARGER_CONTROL_0, 0x1a},
{BQ256XX_CHARGE_CURRENT_LIMIT, 0xa2},
@@ -398,7 +398,7 @@ static struct reg_default bq2560x_reg_defs[] = {
{BQ256XX_CHARGER_CONTROL_3, 0x4c},
};
-static struct reg_default bq25611d_reg_defs[] = {
+static const struct reg_default bq25611d_reg_defs[] = {
{BQ256XX_INPUT_CURRENT_LIMIT, 0x17},
{BQ256XX_CHARGER_CONTROL_0, 0x1a},
{BQ256XX_CHARGE_CURRENT_LIMIT, 0x91},
@@ -411,7 +411,7 @@ static struct reg_default bq25611d_reg_defs[] = {
{BQ256XX_CHARGER_CONTROL_4, 0x75},
};
-static struct reg_default bq25618_619_reg_defs[] = {
+static const struct reg_default bq25618_619_reg_defs[] = {
{BQ256XX_INPUT_CURRENT_LIMIT, 0x17},
{BQ256XX_CHARGER_CONTROL_0, 0x1a},
{BQ256XX_CHARGE_CURRENT_LIMIT, 0x91},
diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
index 4ff76e3dddf6..723858d62d14 100644
--- a/drivers/power/supply/bq25980_charger.c
+++ b/drivers/power/supply/bq25980_charger.c
@@ -104,7 +104,7 @@ struct bq25980_device {
int watchdog_timer;
};
-static struct reg_default bq25980_reg_defs[] = {
+static const struct reg_default bq25980_reg_defs[] = {
{BQ25980_BATOVP, 0x5A},
{BQ25980_BATOVP_ALM, 0x46},
{BQ25980_BATOCP, 0x51},
@@ -159,7 +159,7 @@ static struct reg_default bq25980_reg_defs[] = {
{BQ25980_CHRGR_CTRL_6, 0x0},
};
-static struct reg_default bq25975_reg_defs[] = {
+static const struct reg_default bq25975_reg_defs[] = {
{BQ25980_BATOVP, 0x5A},
{BQ25980_BATOVP_ALM, 0x46},
{BQ25980_BATOCP, 0x51},
@@ -214,7 +214,7 @@ static struct reg_default bq25975_reg_defs[] = {
{BQ25980_CHRGR_CTRL_6, 0x0},
};
-static struct reg_default bq25960_reg_defs[] = {
+static const struct reg_default bq25960_reg_defs[] = {
{BQ25980_BATOVP, 0x5A},
{BQ25980_BATOVP_ALM, 0x46},
{BQ25980_BATOCP, 0x51},
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 13300dc60baf..d0c3008db534 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -689,9 +689,8 @@ static void cpcap_usb_detect(struct work_struct *work)
struct power_supply *battery;
battery = power_supply_get_by_name("battery");
- if (IS_ERR_OR_NULL(battery)) {
- dev_err(ddata->dev, "battery power_supply not available %li\n",
- PTR_ERR(battery));
+ if (!battery) {
+ dev_err(ddata->dev, "battery power_supply not available\n");
return;
}
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index 1cef2f860b5f..63077d38ea30 100644
--- a/drivers/power/supply/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -501,7 +501,7 @@ static struct max14577_charger_platform_data *max14577_charger_dt_init(
static struct max14577_charger_platform_data *max14577_charger_dt_init(
struct platform_device *pdev)
{
- return NULL;
+ return ERR_PTR(-ENODATA);
}
#endif /* CONFIG_OF */
@@ -572,7 +572,7 @@ static int max14577_charger_probe(struct platform_device *pdev)
chg->max14577 = max14577;
chg->pdata = max14577_charger_dt_init(pdev);
- if (IS_ERR_OR_NULL(chg->pdata))
+ if (IS_ERR(chg->pdata))
return PTR_ERR(chg->pdata);
ret = max14577_charger_reg_init(chg);
diff --git a/drivers/power/supply/max1720x_battery.c b/drivers/power/supply/max1720x_battery.c
index ea3912fd1de8..e2bd54ee3970 100644
--- a/drivers/power/supply/max1720x_battery.c
+++ b/drivers/power/supply/max1720x_battery.c
@@ -288,9 +288,10 @@ static int max172xx_voltage_to_ps(unsigned int reg)
return reg * 1250; /* in uV */
}
-static int max172xx_capacity_to_ps(unsigned int reg)
+static int max172xx_capacity_to_ps(unsigned int reg,
+ struct max1720x_device_info *info)
{
- return reg * 500; /* in uAh */
+ return reg * (500000 / info->rsense); /* in uAh */
}
/*
@@ -394,11 +395,11 @@ static int max1720x_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
ret = regmap_read(info->regmap, MAX172XX_DESIGN_CAP, &reg_val);
- val->intval = max172xx_capacity_to_ps(reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val, info);
break;
case POWER_SUPPLY_PROP_CHARGE_AVG:
ret = regmap_read(info->regmap, MAX172XX_REPCAP, &reg_val);
- val->intval = max172xx_capacity_to_ps(reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val, info);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
ret = regmap_read(info->regmap, MAX172XX_TTE, &reg_val);
@@ -422,10 +423,12 @@ static int max1720x_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
ret = regmap_read(info->regmap, MAX172XX_FULL_CAP, &reg_val);
- val->intval = max172xx_capacity_to_ps(reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val, info);
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
ret = regmap_read(info->regmap, MAX172XX_DEV_NAME, &reg_val);
+ if (ret)
+ return ret;
reg_val = FIELD_GET(MAX172XX_DEV_NAME_TYPE_MASK, reg_val);
if (reg_val == MAX172XX_DEV_NAME_TYPE_MAX17201)
val->strval = max17201_model;
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index cfb0e3e0d4aa..9a28381e2607 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -18,7 +18,6 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/err.h>
-#include <linux/of.h>
#include <linux/power_supply.h>
#include <linux/property.h>
#include <linux/thermal.h>
@@ -196,24 +195,24 @@ static int __power_supply_populate_supplied_from(struct power_supply *epsy,
void *data)
{
struct power_supply *psy = data;
- struct device_node *np;
+ struct fwnode_handle *np;
int i = 0;
do {
- np = of_parse_phandle(psy->dev.of_node, "power-supplies", i++);
- if (!np)
+ np = fwnode_find_reference(psy->dev.fwnode, "power-supplies", i++);
+ if (IS_ERR(np))
break;
- if (np == epsy->dev.of_node) {
+ if (np == epsy->dev.fwnode) {
dev_dbg(&psy->dev, "%s: Found supply : %s\n",
psy->desc->name, epsy->desc->name);
psy->supplied_from[i-1] = (char *)epsy->desc->name;
psy->num_supplies++;
- of_node_put(np);
+ fwnode_handle_put(np);
break;
}
- of_node_put(np);
- } while (np);
+ fwnode_handle_put(np);
+ } while (true);
return 0;
}
@@ -232,16 +231,16 @@ static int power_supply_populate_supplied_from(struct power_supply *psy)
static int __power_supply_find_supply_from_node(struct power_supply *epsy,
void *data)
{
- struct device_node *np = data;
+ struct fwnode_handle *fwnode = data;
/* returning non-zero breaks out of power_supply_for_each_psy loop */
- if (epsy->dev.of_node == np)
+ if (epsy->dev.fwnode == fwnode)
return 1;
return 0;
}
-static int power_supply_find_supply_from_node(struct device_node *supply_node)
+static int power_supply_find_supply_from_fwnode(struct fwnode_handle *supply_node)
{
int error;
@@ -249,7 +248,7 @@ static int power_supply_find_supply_from_node(struct device_node *supply_node)
* power_supply_for_each_psy() either returns its own errors or values
* returned by __power_supply_find_supply_from_node().
*
- * __power_supply_find_supply_from_node() will return 0 (no match)
+ * __power_supply_find_supply_from_fwnode() will return 0 (no match)
* or 1 (match).
*
* We return 0 if power_supply_for_each_psy() returned 1, -EPROBE_DEFER if
@@ -262,7 +261,7 @@ static int power_supply_find_supply_from_node(struct device_node *supply_node)
static int power_supply_check_supplies(struct power_supply *psy)
{
- struct device_node *np;
+ struct fwnode_handle *np;
int cnt = 0;
/* If there is already a list honor it */
@@ -270,24 +269,24 @@ static int power_supply_check_supplies(struct power_supply *psy)
return 0;
/* No device node found, nothing to do */
- if (!psy->dev.of_node)
+ if (!psy->dev.fwnode)
return 0;
do {
int ret;
- np = of_parse_phandle(psy->dev.of_node, "power-supplies", cnt++);
- if (!np)
+ np = fwnode_find_reference(psy->dev.fwnode, "power-supplies", cnt++);
+ if (IS_ERR(np))
break;
- ret = power_supply_find_supply_from_node(np);
- of_node_put(np);
+ ret = power_supply_find_supply_from_fwnode(np);
+ fwnode_handle_put(np);
if (ret) {
dev_dbg(&psy->dev, "Failed to find supply!\n");
return ret;
}
- } while (np);
+ } while (!IS_ERR(np));
/* Missing valid "power-supplies" entries */
if (cnt == 1)
@@ -497,15 +496,14 @@ void power_supply_put(struct power_supply *psy)
}
EXPORT_SYMBOL_GPL(power_supply_put);
-#ifdef CONFIG_OF
-static int power_supply_match_device_node(struct device *dev, const void *data)
+static int power_supply_match_device_fwnode(struct device *dev, const void *data)
{
- return dev->parent && dev->parent->of_node == data;
+ return dev->parent && dev_fwnode(dev->parent) == data;
}
/**
- * power_supply_get_by_phandle() - Search for a power supply and returns its ref
- * @np: Pointer to device node holding phandle property
+ * power_supply_get_by_reference() - Search for a power supply and returns its ref
+ * @fwnode: Pointer to fwnode holding phandle property
* @property: Name of property holding a power supply name
*
* If power supply was found, it increases reference count for the
@@ -515,21 +513,21 @@ static int power_supply_match_device_node(struct device *dev, const void *data)
* Return: On success returns a reference to a power supply with
* matching name equals to value under @property, NULL or ERR_PTR otherwise.
*/
-struct power_supply *power_supply_get_by_phandle(struct device_node *np,
- const char *property)
+struct power_supply *power_supply_get_by_reference(struct fwnode_handle *fwnode,
+ const char *property)
{
- struct device_node *power_supply_np;
+ struct fwnode_handle *power_supply_fwnode;
struct power_supply *psy = NULL;
struct device *dev;
- power_supply_np = of_parse_phandle(np, property, 0);
- if (!power_supply_np)
- return ERR_PTR(-ENODEV);
+ power_supply_fwnode = fwnode_find_reference(fwnode, property, 0);
+ if (IS_ERR(power_supply_fwnode))
+ return ERR_CAST(power_supply_fwnode);
- dev = class_find_device(&power_supply_class, NULL, power_supply_np,
- power_supply_match_device_node);
+ dev = class_find_device(&power_supply_class, NULL, power_supply_fwnode,
+ power_supply_match_device_fwnode);
- of_node_put(power_supply_np);
+ fwnode_handle_put(power_supply_fwnode);
if (dev) {
psy = dev_to_psy(dev);
@@ -538,7 +536,7 @@ struct power_supply *power_supply_get_by_phandle(struct device_node *np,
return psy;
}
-EXPORT_SYMBOL_GPL(power_supply_get_by_phandle);
+EXPORT_SYMBOL_GPL(power_supply_get_by_reference);
static void devm_power_supply_put(struct device *dev, void *res)
{
@@ -548,27 +546,27 @@ static void devm_power_supply_put(struct device *dev, void *res)
}
/**
- * devm_power_supply_get_by_phandle() - Resource managed version of
- * power_supply_get_by_phandle()
+ * devm_power_supply_get_by_reference() - Resource managed version of
+ * power_supply_get_by_reference()
* @dev: Pointer to device holding phandle property
* @property: Name of property holding a power supply phandle
*
* Return: On success returns a reference to a power supply with
* matching name equals to value under @property, NULL or ERR_PTR otherwise.
*/
-struct power_supply *devm_power_supply_get_by_phandle(struct device *dev,
- const char *property)
+struct power_supply *devm_power_supply_get_by_reference(struct device *dev,
+ const char *property)
{
struct power_supply **ptr, *psy;
- if (!dev->of_node)
+ if (!dev_fwnode(dev))
return ERR_PTR(-ENODEV);
ptr = devres_alloc(devm_power_supply_put, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
- psy = power_supply_get_by_phandle(dev->of_node, property);
+ psy = power_supply_get_by_reference(dev_fwnode(dev), property);
if (IS_ERR_OR_NULL(psy)) {
devres_free(ptr);
} else {
@@ -577,40 +575,26 @@ struct power_supply *devm_power_supply_get_by_phandle(struct device *dev,
}
return psy;
}
-EXPORT_SYMBOL_GPL(devm_power_supply_get_by_phandle);
-#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(devm_power_supply_get_by_reference);
int power_supply_get_battery_info(struct power_supply *psy,
struct power_supply_battery_info **info_out)
{
struct power_supply_resistance_temp_table *resist_table;
struct power_supply_battery_info *info;
- struct device_node *battery_np = NULL;
- struct fwnode_reference_args args;
- struct fwnode_handle *fwnode = NULL;
+ struct fwnode_handle *srcnode, *fwnode;
const char *value;
- int err, len, index;
- const __be32 *list;
+ int err, len, index, proplen;
+ u32 *propdata __free(kfree) = NULL;
u32 min_max[2];
- if (psy->dev.of_node) {
- battery_np = of_parse_phandle(psy->dev.of_node, "monitored-battery", 0);
- if (!battery_np)
- return -ENODEV;
+ srcnode = dev_fwnode(&psy->dev);
+ if (!srcnode && psy->dev.parent)
+ srcnode = dev_fwnode(psy->dev.parent);
- fwnode = fwnode_handle_get(of_fwnode_handle(battery_np));
- } else if (psy->dev.parent) {
- err = fwnode_property_get_reference_args(
- dev_fwnode(psy->dev.parent),
- "monitored-battery", NULL, 0, 0, &args);
- if (err)
- return err;
-
- fwnode = args.fwnode;
- }
-
- if (!fwnode)
- return -ENOENT;
+ fwnode = fwnode_find_reference(srcnode, "monitored-battery", 0);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
err = fwnode_property_read_string(fwnode, "compatible", &value);
if (err)
@@ -740,15 +724,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
info->temp_max = min_max[1];
}
- /*
- * The below code uses raw of-data parsing to parse
- * /schemas/types.yaml#/definitions/uint32-matrix
- * data, so for now this is only support with of.
- */
- if (!battery_np)
- goto out_ret_pointer;
-
- len = of_property_count_u32_elems(battery_np, "ocv-capacity-celsius");
+ len = fwnode_property_count_u32(fwnode, "ocv-capacity-celsius");
if (len < 0 && len != -EINVAL) {
err = len;
goto out_put_node;
@@ -757,13 +733,13 @@ int power_supply_get_battery_info(struct power_supply *psy,
err = -EINVAL;
goto out_put_node;
} else if (len > 0) {
- of_property_read_u32_array(battery_np, "ocv-capacity-celsius",
+ fwnode_property_read_u32_array(fwnode, "ocv-capacity-celsius",
info->ocv_temp, len);
}
for (index = 0; index < len; index++) {
struct power_supply_battery_ocv_table *table;
- int i, tab_len, size;
+ int i, tab_len;
char *propname __free(kfree) = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d",
index);
@@ -772,15 +748,28 @@ int power_supply_get_battery_info(struct power_supply *psy,
err = -ENOMEM;
goto out_put_node;
}
- list = of_get_property(battery_np, propname, &size);
- if (!list || !size) {
+ proplen = fwnode_property_count_u32(fwnode, propname);
+ if (proplen < 0 || proplen % 2 != 0) {
dev_err(&psy->dev, "failed to get %s\n", propname);
power_supply_put_battery_info(psy, info);
err = -EINVAL;
goto out_put_node;
}
- tab_len = size / (2 * sizeof(__be32));
+ u32 *propdata __free(kfree) = kcalloc(proplen, sizeof(*propdata), GFP_KERNEL);
+ if (!propdata) {
+ power_supply_put_battery_info(psy, info);
+ err = -EINVAL;
+ goto out_put_node;
+ }
+ err = fwnode_property_read_u32_array(fwnode, propname, propdata, proplen);
+ if (err < 0) {
+ dev_err(&psy->dev, "failed to get %s\n", propname);
+ power_supply_put_battery_info(psy, info);
+ goto out_put_node;
+ }
+
+ tab_len = proplen / 2;
info->ocv_table_size[index] = tab_len;
info->ocv_table[index] = table =
@@ -792,18 +781,36 @@ int power_supply_get_battery_info(struct power_supply *psy,
}
for (i = 0; i < tab_len; i++) {
- table[i].ocv = be32_to_cpu(*list);
- list++;
- table[i].capacity = be32_to_cpu(*list);
- list++;
+ table[i].ocv = propdata[i*2];
+ table[i].capacity = propdata[i*2+1];
}
}
- list = of_get_property(battery_np, "resistance-temp-table", &len);
- if (!list || !len)
+ proplen = fwnode_property_count_u32(fwnode, "resistance-temp-table");
+ if (proplen == 0 || proplen == -EINVAL) {
+ err = 0;
goto out_ret_pointer;
+ } else if (proplen < 0 || proplen % 2 != 0) {
+ power_supply_put_battery_info(psy, info);
+ err = (proplen < 0) ? proplen : -EINVAL;
+ goto out_put_node;
+ }
+
+ propdata = kcalloc(proplen, sizeof(*propdata), GFP_KERNEL);
+ if (!propdata) {
+ power_supply_put_battery_info(psy, info);
+ err = -ENOMEM;
+ goto out_put_node;
+ }
+
+ err = fwnode_property_read_u32_array(fwnode, "resistance-temp-table",
+ propdata, proplen);
+ if (err < 0) {
+ power_supply_put_battery_info(psy, info);
+ goto out_put_node;
+ }
- info->resist_table_size = len / (2 * sizeof(__be32));
+ info->resist_table_size = proplen / 2;
info->resist_table = resist_table = devm_kcalloc(&psy->dev,
info->resist_table_size,
sizeof(*resist_table),
@@ -815,8 +822,8 @@ int power_supply_get_battery_info(struct power_supply *psy,
}
for (index = 0; index < info->resist_table_size; index++) {
- resist_table[index].temp = be32_to_cpu(*list++);
- resist_table[index].resistance = be32_to_cpu(*list++);
+ resist_table[index].temp = propdata[index*2];
+ resist_table[index].resistance = propdata[index*2+1];
}
out_ret_pointer:
@@ -825,7 +832,6 @@ out_ret_pointer:
out_put_node:
fwnode_handle_put(fwnode);
- of_node_put(battery_np);
return err;
}
EXPORT_SYMBOL_GPL(power_supply_get_battery_info);
@@ -1587,10 +1593,9 @@ __power_supply_register(struct device *parent,
dev_set_drvdata(dev, psy);
psy->desc = desc;
if (cfg) {
+ device_set_node(dev, cfg->fwnode);
dev->groups = cfg->attr_grp;
psy->drv_data = cfg->drv_data;
- dev->of_node =
- cfg->fwnode ? to_of_node(cfg->fwnode) : cfg->of_node;
psy->supplied_to = cfg->supplied_to;
psy->num_supplicants = cfg->num_supplicants;
}
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index fe27676fbc7c..99808ea9851f 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -577,6 +577,8 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
val->intval = battmgr->status.capacity;
break;
case POWER_SUPPLY_PROP_CAPACITY:
+ if (battmgr->status.percent == (unsigned int)-1)
+ return -ENODATA;
val->intval = battmgr->status.percent;
break;
case POWER_SUPPLY_PROP_TEMP:
@@ -617,6 +619,7 @@ static const enum power_supply_property sc8280xp_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -981,6 +984,8 @@ static unsigned int qcom_battmgr_sc8280xp_parse_technology(const char *chemistry
{
if (!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN))
return POWER_SUPPLY_TECHNOLOGY_LION;
+ if (!strncmp(chemistry, "LIP", BATTMGR_CHEMISTRY_LEN))
+ return POWER_SUPPLY_TECHNOLOGY_LIPO;
pr_err("Unknown battery technology '%s'\n", chemistry);
return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
@@ -1063,6 +1068,26 @@ static void qcom_battmgr_sc8280xp_callback(struct qcom_battmgr *battmgr,
battmgr->ac.online = source == BATTMGR_CHARGING_SOURCE_AC;
battmgr->usb.online = source == BATTMGR_CHARGING_SOURCE_USB;
battmgr->wireless.online = source == BATTMGR_CHARGING_SOURCE_WIRELESS;
+ if (battmgr->info.last_full_capacity != 0) {
+ /*
+ * 100 * battmgr->status.capacity can overflow a 32bit
+ * unsigned integer. FW readings are in m{W/A}h, which
+ * are multiplied by 1000 converting them to u{W/A}h,
+ * the format the power_supply API expects.
+ * To avoid overflow use the original value for dividend
+ * and convert the divider back to m{W/A}h, which can be
+ * done without any loss of precision.
+ */
+ battmgr->status.percent =
+ (100 * le32_to_cpu(resp->status.capacity)) /
+ (battmgr->info.last_full_capacity / 1000);
+ } else {
+ /*
+ * Let the sysfs handler know no data is available at
+ * this time.
+ */
+ battmgr->status.percent = (unsigned int)-1;
+ }
break;
case BATTMGR_BAT_DISCHARGE_TIME:
battmgr->status.discharge_time = le32_to_cpu(resp->time);
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_smbx.c
index c2f8f2e24398..b1cb925581ec 100644
--- a/drivers/power/supply/qcom_pmi8998_charger.c
+++ b/drivers/power/supply/qcom_smbx.c
@@ -362,17 +362,17 @@ enum charger_status {
DISABLE_CHARGE,
};
-struct smb2_register {
+struct smb_init_register {
u16 addr;
u8 mask;
u8 val;
};
/**
- * struct smb2_chip - smb2 chip structure
+ * struct smb_chip - smb chip structure
* @dev: Device reference for power_supply
* @name: The platform device name
- * @base: Base address for smb2 registers
+ * @base: Base address for smb registers
* @regmap: Register map
* @batt_info: Battery data from DT
* @status_change_work: Worker to handle plug/unplug events
@@ -382,7 +382,7 @@ struct smb2_register {
* @usb_in_v_chan: USB_IN voltage measurement channel
* @chg_psy: Charger power supply instance
*/
-struct smb2_chip {
+struct smb_chip {
struct device *dev;
const char *name;
unsigned int base;
@@ -399,7 +399,7 @@ struct smb2_chip {
struct power_supply *chg_psy;
};
-static enum power_supply_property smb2_properties[] = {
+static enum power_supply_property smb_properties[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_CURRENT_MAX,
@@ -411,7 +411,7 @@ static enum power_supply_property smb2_properties[] = {
POWER_SUPPLY_PROP_USB_TYPE,
};
-static int smb2_get_prop_usb_online(struct smb2_chip *chip, int *val)
+static int smb_get_prop_usb_online(struct smb_chip *chip, int *val)
{
unsigned int stat;
int rc;
@@ -431,13 +431,13 @@ static int smb2_get_prop_usb_online(struct smb2_chip *chip, int *val)
* Qualcomm "automatic power source detection" aka APSD
* tells us what type of charger we're connected to.
*/
-static int smb2_apsd_get_charger_type(struct smb2_chip *chip, int *val)
+static int smb_apsd_get_charger_type(struct smb_chip *chip, int *val)
{
unsigned int apsd_stat, stat;
int usb_online = 0;
int rc;
- rc = smb2_get_prop_usb_online(chip, &usb_online);
+ rc = smb_get_prop_usb_online(chip, &usb_online);
if (!usb_online) {
*val = POWER_SUPPLY_USB_TYPE_UNKNOWN;
return rc;
@@ -471,13 +471,13 @@ static int smb2_apsd_get_charger_type(struct smb2_chip *chip, int *val)
return 0;
}
-static int smb2_get_prop_status(struct smb2_chip *chip, int *val)
+static int smb_get_prop_status(struct smb_chip *chip, int *val)
{
unsigned char stat[2];
int usb_online = 0;
int rc;
- rc = smb2_get_prop_usb_online(chip, &usb_online);
+ rc = smb_get_prop_usb_online(chip, &usb_online);
if (!usb_online) {
*val = POWER_SUPPLY_STATUS_DISCHARGING;
return rc;
@@ -519,7 +519,7 @@ static int smb2_get_prop_status(struct smb2_chip *chip, int *val)
}
}
-static inline int smb2_get_current_limit(struct smb2_chip *chip,
+static inline int smb_get_current_limit(struct smb_chip *chip,
unsigned int *val)
{
int rc = regmap_read(chip->regmap, chip->base + ICL_STATUS, val);
@@ -529,7 +529,7 @@ static inline int smb2_get_current_limit(struct smb2_chip *chip,
return rc;
}
-static int smb2_set_current_limit(struct smb2_chip *chip, unsigned int val)
+static int smb_set_current_limit(struct smb_chip *chip, unsigned int val)
{
unsigned char val_raw;
@@ -544,22 +544,22 @@ static int smb2_set_current_limit(struct smb2_chip *chip, unsigned int val)
val_raw);
}
-static void smb2_status_change_work(struct work_struct *work)
+static void smb_status_change_work(struct work_struct *work)
{
unsigned int charger_type, current_ua;
int usb_online = 0;
int count, rc;
- struct smb2_chip *chip;
+ struct smb_chip *chip;
- chip = container_of(work, struct smb2_chip, status_change_work.work);
+ chip = container_of(work, struct smb_chip, status_change_work.work);
- smb2_get_prop_usb_online(chip, &usb_online);
+ smb_get_prop_usb_online(chip, &usb_online);
if (!usb_online)
return;
for (count = 0; count < 3; count++) {
dev_dbg(chip->dev, "get charger type retry %d\n", count);
- rc = smb2_apsd_get_charger_type(chip, &charger_type);
+ rc = smb_apsd_get_charger_type(chip, &charger_type);
if (rc != -EAGAIN)
break;
msleep(100);
@@ -592,11 +592,11 @@ static void smb2_status_change_work(struct work_struct *work)
break;
}
- smb2_set_current_limit(chip, current_ua);
+ smb_set_current_limit(chip, current_ua);
power_supply_changed(chip->chg_psy);
}
-static int smb2_get_iio_chan(struct smb2_chip *chip, struct iio_channel *chan,
+static int smb_get_iio_chan(struct smb_chip *chip, struct iio_channel *chan,
int *val)
{
int rc;
@@ -617,7 +617,7 @@ static int smb2_get_iio_chan(struct smb2_chip *chip, struct iio_channel *chan,
return iio_read_channel_processed(chan, val);
}
-static int smb2_get_prop_health(struct smb2_chip *chip, int *val)
+static int smb_get_prop_health(struct smb_chip *chip, int *val)
{
int rc;
unsigned int stat;
@@ -651,11 +651,11 @@ static int smb2_get_prop_health(struct smb2_chip *chip, int *val)
}
}
-static int smb2_get_property(struct power_supply *psy,
+static int smb_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct smb2_chip *chip = power_supply_get_drvdata(psy);
+ struct smb_chip *chip = power_supply_get_drvdata(psy);
switch (psp) {
case POWER_SUPPLY_PROP_MANUFACTURER:
@@ -665,43 +665,43 @@ static int smb2_get_property(struct power_supply *psy,
val->strval = chip->name;
return 0;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- return smb2_get_current_limit(chip, &val->intval);
+ return smb_get_current_limit(chip, &val->intval);
case POWER_SUPPLY_PROP_CURRENT_NOW:
- return smb2_get_iio_chan(chip, chip->usb_in_i_chan,
+ return smb_get_iio_chan(chip, chip->usb_in_i_chan,
&val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- return smb2_get_iio_chan(chip, chip->usb_in_v_chan,
+ return smb_get_iio_chan(chip, chip->usb_in_v_chan,
&val->intval);
case POWER_SUPPLY_PROP_ONLINE:
- return smb2_get_prop_usb_online(chip, &val->intval);
+ return smb_get_prop_usb_online(chip, &val->intval);
case POWER_SUPPLY_PROP_STATUS:
- return smb2_get_prop_status(chip, &val->intval);
+ return smb_get_prop_status(chip, &val->intval);
case POWER_SUPPLY_PROP_HEALTH:
- return smb2_get_prop_health(chip, &val->intval);
+ return smb_get_prop_health(chip, &val->intval);
case POWER_SUPPLY_PROP_USB_TYPE:
- return smb2_apsd_get_charger_type(chip, &val->intval);
+ return smb_apsd_get_charger_type(chip, &val->intval);
default:
dev_err(chip->dev, "invalid property: %d\n", psp);
return -EINVAL;
}
}
-static int smb2_set_property(struct power_supply *psy,
+static int smb_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
- struct smb2_chip *chip = power_supply_get_drvdata(psy);
+ struct smb_chip *chip = power_supply_get_drvdata(psy);
switch (psp) {
case POWER_SUPPLY_PROP_CURRENT_MAX:
- return smb2_set_current_limit(chip, val->intval);
+ return smb_set_current_limit(chip, val->intval);
default:
dev_err(chip->dev, "No setter for property: %d\n", psp);
return -EINVAL;
}
}
-static int smb2_property_is_writable(struct power_supply *psy,
+static int smb_property_is_writable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
@@ -712,9 +712,9 @@ static int smb2_property_is_writable(struct power_supply *psy,
}
}
-static irqreturn_t smb2_handle_batt_overvoltage(int irq, void *data)
+static irqreturn_t smb_handle_batt_overvoltage(int irq, void *data)
{
- struct smb2_chip *chip = data;
+ struct smb_chip *chip = data;
unsigned int status;
regmap_read(chip->regmap, chip->base + BATTERY_CHARGER_STATUS_2,
@@ -729,9 +729,9 @@ static irqreturn_t smb2_handle_batt_overvoltage(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t smb2_handle_usb_plugin(int irq, void *data)
+static irqreturn_t smb_handle_usb_plugin(int irq, void *data)
{
- struct smb2_chip *chip = data;
+ struct smb_chip *chip = data;
power_supply_changed(chip->chg_psy);
@@ -741,18 +741,18 @@ static irqreturn_t smb2_handle_usb_plugin(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t smb2_handle_usb_icl_change(int irq, void *data)
+static irqreturn_t smb_handle_usb_icl_change(int irq, void *data)
{
- struct smb2_chip *chip = data;
+ struct smb_chip *chip = data;
power_supply_changed(chip->chg_psy);
return IRQ_HANDLED;
}
-static irqreturn_t smb2_handle_wdog_bark(int irq, void *data)
+static irqreturn_t smb_handle_wdog_bark(int irq, void *data)
{
- struct smb2_chip *chip = data;
+ struct smb_chip *chip = data;
int rc;
power_supply_changed(chip->chg_psy);
@@ -765,22 +765,22 @@ static irqreturn_t smb2_handle_wdog_bark(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct power_supply_desc smb2_psy_desc = {
+static const struct power_supply_desc smb_psy_desc = {
.name = "pmi8998_charger",
.type = POWER_SUPPLY_TYPE_USB,
.usb_types = BIT(POWER_SUPPLY_USB_TYPE_SDP) |
BIT(POWER_SUPPLY_USB_TYPE_CDP) |
BIT(POWER_SUPPLY_USB_TYPE_DCP) |
BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
- .properties = smb2_properties,
- .num_properties = ARRAY_SIZE(smb2_properties),
- .get_property = smb2_get_property,
- .set_property = smb2_set_property,
- .property_is_writeable = smb2_property_is_writable,
+ .properties = smb_properties,
+ .num_properties = ARRAY_SIZE(smb_properties),
+ .get_property = smb_get_property,
+ .set_property = smb_set_property,
+ .property_is_writeable = smb_property_is_writable,
};
/* Init sequence derived from vendor downstream driver */
-static const struct smb2_register smb2_init_seq[] = {
+static const struct smb_init_register smb_init_seq[] = {
{ .addr = AICL_RERUN_TIME_CFG, .mask = AICL_RERUN_TIME_MASK, .val = 0 },
/*
* By default configure us as an upstream facing port
@@ -882,17 +882,17 @@ static const struct smb2_register smb2_init_seq[] = {
.val = 1000000 / CURRENT_SCALE_FACTOR },
};
-static int smb2_init_hw(struct smb2_chip *chip)
+static int smb_init_hw(struct smb_chip *chip)
{
int rc, i;
- for (i = 0; i < ARRAY_SIZE(smb2_init_seq); i++) {
+ for (i = 0; i < ARRAY_SIZE(smb_init_seq); i++) {
dev_dbg(chip->dev, "%d: Writing 0x%02x to 0x%02x\n", i,
- smb2_init_seq[i].val, smb2_init_seq[i].addr);
+ smb_init_seq[i].val, smb_init_seq[i].addr);
rc = regmap_update_bits(chip->regmap,
- chip->base + smb2_init_seq[i].addr,
- smb2_init_seq[i].mask,
- smb2_init_seq[i].val);
+ chip->base + smb_init_seq[i].addr,
+ smb_init_seq[i].mask,
+ smb_init_seq[i].val);
if (rc < 0)
return dev_err_probe(chip->dev, rc,
"%s: init command %d failed\n",
@@ -902,7 +902,7 @@ static int smb2_init_hw(struct smb2_chip *chip)
return 0;
}
-static int smb2_init_irq(struct smb2_chip *chip, int *irq, const char *name,
+static int smb_init_irq(struct smb_chip *chip, int *irq, const char *name,
irqreturn_t (*handler)(int irq, void *data))
{
int irqnum;
@@ -924,11 +924,11 @@ static int smb2_init_irq(struct smb2_chip *chip, int *irq, const char *name,
return 0;
}
-static int smb2_probe(struct platform_device *pdev)
+static int smb_probe(struct platform_device *pdev)
{
struct power_supply_config supply_config = {};
struct power_supply_desc *desc;
- struct smb2_chip *chip;
+ struct smb_chip *chip;
int rc, irq;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
@@ -959,17 +959,17 @@ static int smb2_probe(struct platform_device *pdev)
"Couldn't get usbin_i IIO channel\n");
}
- rc = smb2_init_hw(chip);
+ rc = smb_init_hw(chip);
if (rc < 0)
return rc;
supply_config.drv_data = chip;
supply_config.fwnode = dev_fwnode(&pdev->dev);
- desc = devm_kzalloc(chip->dev, sizeof(smb2_psy_desc), GFP_KERNEL);
+ desc = devm_kzalloc(chip->dev, sizeof(smb_psy_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
- memcpy(desc, &smb2_psy_desc, sizeof(smb2_psy_desc));
+ memcpy(desc, &smb_psy_desc, sizeof(smb_psy_desc));
desc->name =
devm_kasprintf(chip->dev, GFP_KERNEL, "%s-charger",
(const char *)device_get_match_data(chip->dev));
@@ -988,7 +988,7 @@ static int smb2_probe(struct platform_device *pdev)
"Failed to get battery info\n");
rc = devm_delayed_work_autocancel(chip->dev, &chip->status_change_work,
- smb2_status_change_work);
+ smb_status_change_work);
if (rc)
return dev_err_probe(chip->dev, rc,
"Failed to init status change work\n");
@@ -999,24 +999,26 @@ static int smb2_probe(struct platform_device *pdev)
if (rc < 0)
return dev_err_probe(chip->dev, rc, "Couldn't set vbat max\n");
- rc = smb2_init_irq(chip, &irq, "bat-ov", smb2_handle_batt_overvoltage);
+ rc = smb_init_irq(chip, &irq, "bat-ov", smb_handle_batt_overvoltage);
if (rc < 0)
return rc;
- rc = smb2_init_irq(chip, &chip->cable_irq, "usb-plugin",
- smb2_handle_usb_plugin);
+ rc = smb_init_irq(chip, &chip->cable_irq, "usb-plugin",
+ smb_handle_usb_plugin);
if (rc < 0)
return rc;
- rc = smb2_init_irq(chip, &irq, "usbin-icl-change",
- smb2_handle_usb_icl_change);
+ rc = smb_init_irq(chip, &irq, "usbin-icl-change",
+ smb_handle_usb_icl_change);
if (rc < 0)
return rc;
- rc = smb2_init_irq(chip, &irq, "wdog-bark", smb2_handle_wdog_bark);
+ rc = smb_init_irq(chip, &irq, "wdog-bark", smb_handle_wdog_bark);
if (rc < 0)
return rc;
- rc = dev_pm_set_wake_irq(chip->dev, chip->cable_irq);
+ devm_device_init_wakeup(chip->dev);
+
+ rc = devm_pm_set_wake_irq(chip->dev, chip->cable_irq);
if (rc < 0)
return dev_err_probe(chip->dev, rc, "Couldn't set wake irq\n");
@@ -1028,22 +1030,22 @@ static int smb2_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id smb2_match_id_table[] = {
+static const struct of_device_id smb_match_id_table[] = {
{ .compatible = "qcom,pmi8998-charger", .data = "pmi8998" },
{ .compatible = "qcom,pm660-charger", .data = "pm660" },
{ /* sentinal */ }
};
-MODULE_DEVICE_TABLE(of, smb2_match_id_table);
+MODULE_DEVICE_TABLE(of, smb_match_id_table);
-static struct platform_driver qcom_spmi_smb2 = {
- .probe = smb2_probe,
+static struct platform_driver qcom_spmi_smb = {
+ .probe = smb_probe,
.driver = {
- .name = "qcom-pmi8998/pm660-charger",
- .of_match_table = smb2_match_id_table,
+ .name = "qcom-smbx-charger",
+ .of_match_table = smb_match_id_table,
},
};
-module_platform_driver(qcom_spmi_smb2);
+module_platform_driver(qcom_spmi_smb);
MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("Qualcomm SMB2 Charger Driver");
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 9dcb5457bef4..04216b2bfb6c 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -512,7 +512,6 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
ret |= twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x2a,
TWL4030_BCIMDKEY);
if (bci->usb_enabled) {
- pm_runtime_mark_last_busy(bci->transceiver->dev);
pm_runtime_put_autosuspend(bci->transceiver->dev);
bci->usb_enabled = 0;
}
diff --git a/drivers/power/supply/ug3105_battery.c b/drivers/power/supply/ug3105_battery.c
index 38e23bdd4603..e8a1de7cade0 100644
--- a/drivers/power/supply/ug3105_battery.c
+++ b/drivers/power/supply/ug3105_battery.c
@@ -66,10 +66,11 @@
#define UG3105_LOW_BAT_UV 3700000
#define UG3105_FULL_BAT_HYST_UV 38000
+#define AMBIENT_TEMP_CELCIUS 25
+
struct ug3105_chip {
struct i2c_client *client;
struct power_supply *psy;
- struct power_supply_battery_info *info;
struct delayed_work work;
struct mutex lock;
int ocv[UG3105_MOV_AVG_WINDOW]; /* micro-volt */
@@ -103,7 +104,8 @@ static int ug3105_read_word(struct i2c_client *client, u8 reg)
static int ug3105_get_status(struct ug3105_chip *chip)
{
- int full = chip->info->constant_charge_voltage_max_uv - UG3105_FULL_BAT_HYST_UV;
+ int full = chip->psy->battery_info->constant_charge_voltage_max_uv -
+ UG3105_FULL_BAT_HYST_UV;
if (chip->curr > UG3105_CURR_HYST_UA)
return POWER_SUPPLY_STATUS_CHARGING;
@@ -117,62 +119,6 @@ static int ug3105_get_status(struct ug3105_chip *chip)
return POWER_SUPPLY_STATUS_NOT_CHARGING;
}
-static int ug3105_get_capacity(struct ug3105_chip *chip)
-{
- /*
- * OCV voltages in uV for 0-110% in 5% increments, the 100-110% is
- * for LiPo HV (High-Voltage) bateries which can go up to 4.35V
- * instead of the usual 4.2V.
- */
- static const int ocv_capacity_tbl[23] = {
- 3350000,
- 3610000,
- 3690000,
- 3710000,
- 3730000,
- 3750000,
- 3770000,
- 3786667,
- 3803333,
- 3820000,
- 3836667,
- 3853333,
- 3870000,
- 3907500,
- 3945000,
- 3982500,
- 4020000,
- 4075000,
- 4110000,
- 4150000,
- 4200000,
- 4250000,
- 4300000,
- };
- int i, ocv_diff, ocv_step;
-
- if (chip->ocv_avg < ocv_capacity_tbl[0])
- return 0;
-
- if (chip->status == POWER_SUPPLY_STATUS_FULL)
- return 100;
-
- for (i = 1; i < ARRAY_SIZE(ocv_capacity_tbl); i++) {
- if (chip->ocv_avg > ocv_capacity_tbl[i])
- continue;
-
- ocv_diff = ocv_capacity_tbl[i] - chip->ocv_avg;
- ocv_step = ocv_capacity_tbl[i] - ocv_capacity_tbl[i - 1];
- /* scale 0-110% down to 0-100% for LiPo HV */
- if (chip->info->constant_charge_voltage_max_uv >= 4300000)
- return (i * 500 - ocv_diff * 500 / ocv_step) / 110;
- else
- return i * 5 - ocv_diff * 5 / ocv_step;
- }
-
- return 100;
-}
-
static void ug3105_work(struct work_struct *work)
{
struct ug3105_chip *chip = container_of(work, struct ug3105_chip,
@@ -231,7 +177,12 @@ static void ug3105_work(struct work_struct *work)
chip->supplied = power_supply_am_i_supplied(psy);
chip->status = ug3105_get_status(chip);
- chip->capacity = ug3105_get_capacity(chip);
+ if (chip->status == POWER_SUPPLY_STATUS_FULL)
+ chip->capacity = 100;
+ else
+ chip->capacity = power_supply_batinfo_ocv2cap(chip->psy->battery_info,
+ chip->ocv_avg,
+ AMBIENT_TEMP_CELCIUS);
/*
* Skip internal resistance calc on charger [un]plug and
@@ -401,12 +352,10 @@ static int ug3105_probe(struct i2c_client *client)
if (IS_ERR(psy))
return PTR_ERR(psy);
- ret = power_supply_get_battery_info(psy, &chip->info);
- if (ret)
- return ret;
-
- if (chip->info->factory_internal_resistance_uohm == -EINVAL ||
- chip->info->constant_charge_voltage_max_uv == -EINVAL) {
+ if (!psy->battery_info ||
+ psy->battery_info->factory_internal_resistance_uohm == -EINVAL ||
+ psy->battery_info->constant_charge_voltage_max_uv == -EINVAL ||
+ !psy->battery_info->ocv_table[0]) {
dev_err(dev, "error required properties are missing\n");
return -ENODEV;
}
@@ -422,7 +371,7 @@ static int ug3105_probe(struct i2c_client *client)
chip->ua_per_unit = 8100000 / curr_sense_res_uohm;
/* Use provided internal resistance as start point (in milli-ohm) */
- chip->intern_res_avg = chip->info->factory_internal_resistance_uohm / 1000;
+ chip->intern_res_avg = psy->battery_info->factory_internal_resistance_uohm / 1000;
/* Also add it to the internal resistance moving average window */
chip->intern_res[0] = chip->intern_res_avg;
chip->intern_res_avg_index = 1;
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index a6aad743c282..b352df4cd3f9 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -24,6 +24,11 @@
#define PTP_DEFAULT_MAX_VCLOCKS 20
#define PTP_MAX_CHANNELS 2048
+enum {
+ PTP_LOCK_PHYSICAL = 0,
+ PTP_LOCK_VIRTUAL,
+};
+
struct timestamp_event_queue {
struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
int head;
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
index 2fdeedd60e21..64c950456517 100644
--- a/drivers/ptp/ptp_vclock.c
+++ b/drivers/ptp/ptp_vclock.c
@@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
return PTP_VCLOCK_REFRESH_INTERVAL;
}
+static void ptp_vclock_set_subclass(struct ptp_clock *ptp)
+{
+ lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL);
+}
+
static const struct ptp_clock_info ptp_vclock_info = {
.owner = THIS_MODULE,
.name = "ptp virtual clock",
@@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
return NULL;
}
+ ptp_vclock_set_subclass(vclock->clock);
+
timecounter_init(&vclock->tc, &vclock->cc, 0);
ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index eb03ccd5b688..9ce75704a15f 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -323,7 +323,7 @@ static int pca9685_pwm_gpio_probe(struct pwm_chip *chip)
pca->gpio.direction_input = pca9685_pwm_gpio_direction_input;
pca->gpio.direction_output = pca9685_pwm_gpio_direction_output;
pca->gpio.get = pca9685_pwm_gpio_get;
- pca->gpio.set_rv = pca9685_pwm_gpio_set;
+ pca->gpio.set = pca9685_pwm_gpio_set;
pca->gpio.base = -1;
pca->gpio.ngpio = PCA9685_MAXCHAN;
pca->gpio.can_sleep = true;
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 0457af23c55a..b2a6ddc6f56d 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -643,7 +643,7 @@ static int act8600_charger_probe(struct device *dev, struct regmap *regmap)
struct power_supply *charger;
struct power_supply_config cfg = {
.drv_data = regmap,
- .of_node = dev->of_node,
+ .fwnode = dev_fwnode(dev),
};
charger = devm_power_supply_register(dev, &act8600_charger_desc, &cfg);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 8ed9b96518cf..554d83c4af0c 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3884,7 +3884,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
new_delta = ret;
/* check that voltage is converging quickly enough */
- if (new_delta - delta > rdev->constraints->max_uV_step) {
+ if (delta - new_delta < rdev->constraints->max_uV_step) {
ret = -EWOULDBLOCK;
goto out;
}
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index feadb21a8f30..4be270f4d6c3 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -40,7 +40,6 @@ struct pca9450 {
struct device *dev;
struct regmap *regmap;
struct gpio_desc *sd_vsel_gpio;
- struct notifier_block restart_nb;
enum pca9450_chip_type type;
unsigned int rcnt;
int irq;
@@ -1100,10 +1099,9 @@ static irqreturn_t pca9450_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int pca9450_i2c_restart_handler(struct notifier_block *nb,
- unsigned long action, void *data)
+static int pca9450_i2c_restart_handler(struct sys_off_data *data)
{
- struct pca9450 *pca9450 = container_of(nb, struct pca9450, restart_nb);
+ struct pca9450 *pca9450 = data->cb_data;
struct i2c_client *i2c = container_of(pca9450->dev, struct i2c_client, dev);
dev_dbg(&i2c->dev, "Restarting device..\n");
@@ -1261,10 +1259,9 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450->sd_vsel_fixed_low =
of_property_read_bool(ldo5->dev.of_node, "nxp,sd-vsel-fixed-low");
- pca9450->restart_nb.notifier_call = pca9450_i2c_restart_handler;
- pca9450->restart_nb.priority = PCA9450_RESTART_HANDLER_PRIORITY;
-
- if (register_restart_handler(&pca9450->restart_nb))
+ if (devm_register_sys_off_handler(&i2c->dev, SYS_OFF_MODE_RESTART,
+ PCA9450_RESTART_HANDLER_PRIORITY,
+ pca9450_i2c_restart_handler, pca9450))
dev_warn(&i2c->dev, "Failed to register restart handler\n");
dev_info(&i2c->dev, "%s probed.\n",
diff --git a/drivers/regulator/qcom-pm8008-regulator.c b/drivers/regulator/qcom-pm8008-regulator.c
index da017c1969d0..90c78ee1c37b 100644
--- a/drivers/regulator/qcom-pm8008-regulator.c
+++ b/drivers/regulator/qcom-pm8008-regulator.c
@@ -96,7 +96,7 @@ static int pm8008_regulator_get_voltage_sel(struct regulator_dev *rdev)
uV = le16_to_cpu(val) * 1000;
- return (uV - preg->desc.min_uV) / preg->desc.uV_step;
+ return regulator_map_voltage_linear_range(rdev, uV, INT_MAX);
}
static const struct regulator_ops pm8008_regulator_ops = {
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index 58dbf8bffa5d..3020839b9ef1 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -351,7 +351,7 @@ static int attiny_i2c_probe(struct i2c_client *i2c)
state->gc.base = -1;
state->gc.ngpio = NUM_GPIO;
- state->gc.set_rv = attiny_gpio_set;
+ state->gc.set = attiny_gpio_set;
state->gc.get_direction = attiny_gpio_get_direction;
state->gc.can_sleep = true;
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
index 5e67fdc88f49..d77ca486879f 100644
--- a/drivers/regulator/tps65219-regulator.c
+++ b/drivers/regulator/tps65219-regulator.c
@@ -454,9 +454,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
irq_type->irq_name,
irq_data);
if (error)
- return dev_err_probe(tps->dev, PTR_ERR(rdev),
- "Failed to request %s IRQ %d: %d\n",
- irq_type->irq_name, irq, error);
+ return dev_err_probe(tps->dev, error,
+ "Failed to request %s IRQ %d\n",
+ irq_type->irq_name, irq);
}
for (i = 0; i < pmic->dev_irq_size; ++i) {
@@ -477,9 +477,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
irq_type->irq_name,
irq_data);
if (error)
- return dev_err_probe(tps->dev, PTR_ERR(rdev),
- "Failed to request %s IRQ %d: %d\n",
- irq_type->irq_name, irq, error);
+ return dev_err_probe(tps->dev, error,
+ "Failed to request %s IRQ %d\n",
+ irq_type->irq_name, irq);
}
return 0;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 83962a114dc9..48a0d3a69ed0 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -214,7 +214,7 @@ config QCOM_Q6V5_MSS
handled by QCOM_Q6V5_PAS driver.
config QCOM_Q6V5_PAS
- tristate "Qualcomm Hexagon v5 Peripheral Authentication Service support"
+ tristate "Qualcomm Peripheral Authentication Service support"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
@@ -229,11 +229,10 @@ config QCOM_Q6V5_PAS
select QCOM_RPROC_COMMON
select QCOM_SCM
help
- Say y here to support the TrustZone based Peripheral Image Loader
- for the Qualcomm Hexagon v5 based remote processors. This is commonly
- used to control subsystems such as ADSP (Audio DSP),
- CDSP (Compute DSP), MPSS (Modem Peripheral SubSystem), and
- SLPI (Sensor Low Power Island).
+ Say y here to support the TrustZone based Peripheral Image Loader for
+ the Qualcomm remote processors. This is commonly used to control
+ subsystems such as ADSP (Audio DSP), CDSP (Compute DSP), MPSS (Modem
+ Peripheral SubSystem), and SLPI (Sensor Low Power Island).
config QCOM_Q6V5_WCSS
tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader"
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 9c7182b3b038..9c9e9c3cf378 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -1211,7 +1211,7 @@ static int omap_rproc_of_get_internal_memories(struct platform_device *pdev,
oproc->mem[i].dev_addr = data->mems[i].dev_addr;
oproc->mem[i].size = resource_size(res);
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %pK da 0x%x\n",
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %p da 0x%x\n",
data->mems[i].name, &oproc->mem[i].bus_addr,
oproc->mem[i].size, oproc->mem[i].cpu_addr,
oproc->mem[i].dev_addr);
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 4a4eb9c0b133..842e4b6cc5f9 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -1055,7 +1055,7 @@ static int pru_rproc_probe(struct platform_device *pdev)
pru->mem_regions[i].pa = res->start;
pru->mem_regions[i].size = resource_size(res);
- dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %p\n",
mem_names[i], &pru->mem_regions[i].pa,
pru->mem_regions[i].size, pru->mem_regions[i].va);
}
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index b306f223127c..02e29171cbbe 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Qualcomm ADSP/SLPI Peripheral Image Loader for MSM8974 and MSM8996
+ * Qualcomm Peripheral Authentication Service remoteproc driver
*
* Copyright (C) 2016 Linaro Ltd
* Copyright (C) 2014 Sony Mobile Communications AB
@@ -31,11 +31,11 @@
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
-#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
+#define QCOM_PAS_DECRYPT_SHUTDOWN_DELAY_MS 100
#define MAX_ASSIGN_COUNT 3
-struct adsp_data {
+struct qcom_pas_data {
int crash_reason_smem;
const char *firmware_name;
const char *dtb_firmware_name;
@@ -60,7 +60,7 @@ struct adsp_data {
int region_assign_vmid;
};
-struct qcom_adsp {
+struct qcom_pas {
struct device *dev;
struct rproc *rproc;
@@ -119,36 +119,37 @@ struct qcom_adsp {
struct qcom_scm_pas_metadata dtb_pas_metadata;
};
-static void adsp_segment_dump(struct rproc *rproc, struct rproc_dump_segment *segment,
- void *dest, size_t offset, size_t size)
+static void qcom_pas_segment_dump(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest, size_t offset, size_t size)
{
- struct qcom_adsp *adsp = rproc->priv;
+ struct qcom_pas *pas = rproc->priv;
int total_offset;
- total_offset = segment->da + segment->offset + offset - adsp->mem_phys;
- if (total_offset < 0 || total_offset + size > adsp->mem_size) {
- dev_err(adsp->dev,
+ total_offset = segment->da + segment->offset + offset - pas->mem_phys;
+ if (total_offset < 0 || total_offset + size > pas->mem_size) {
+ dev_err(pas->dev,
"invalid copy request for segment %pad with offset %zu and size %zu)\n",
&segment->da, offset, size);
memset(dest, 0xff, size);
return;
}
- memcpy_fromio(dest, adsp->mem_region + total_offset, size);
+ memcpy_fromio(dest, pas->mem_region + total_offset, size);
}
-static void adsp_minidump(struct rproc *rproc)
+static void qcom_pas_minidump(struct rproc *rproc)
{
- struct qcom_adsp *adsp = rproc->priv;
+ struct qcom_pas *pas = rproc->priv;
if (rproc->dump_conf == RPROC_COREDUMP_DISABLED)
return;
- qcom_minidump(rproc, adsp->minidump_id, adsp_segment_dump);
+ qcom_minidump(rproc, pas->minidump_id, qcom_pas_segment_dump);
}
-static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
- size_t pd_count)
+static int qcom_pas_pds_enable(struct qcom_pas *pas, struct device **pds,
+ size_t pd_count)
{
int ret;
int i;
@@ -174,8 +175,8 @@ unroll_pd_votes:
return ret;
};
-static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
- size_t pd_count)
+static void qcom_pas_pds_disable(struct qcom_pas *pas, struct device **pds,
+ size_t pd_count)
{
int i;
@@ -185,65 +186,65 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
}
}
-static int adsp_shutdown_poll_decrypt(struct qcom_adsp *adsp)
+static int qcom_pas_shutdown_poll_decrypt(struct qcom_pas *pas)
{
unsigned int retry_num = 50;
int ret;
do {
- msleep(ADSP_DECRYPT_SHUTDOWN_DELAY_MS);
- ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ msleep(QCOM_PAS_DECRYPT_SHUTDOWN_DELAY_MS);
+ ret = qcom_scm_pas_shutdown(pas->pas_id);
} while (ret == -EINVAL && --retry_num);
return ret;
}
-static int adsp_unprepare(struct rproc *rproc)
+static int qcom_pas_unprepare(struct rproc *rproc)
{
- struct qcom_adsp *adsp = rproc->priv;
+ struct qcom_pas *pas = rproc->priv;
/*
- * adsp_load() did pass pas_metadata to the SCM driver for storing
+ * qcom_pas_load() did pass pas_metadata to the SCM driver for storing
* metadata context. It might have been released already if
* auth_and_reset() was successful, but in other cases clean it up
* here.
*/
- qcom_scm_pas_metadata_release(&adsp->pas_metadata);
- if (adsp->dtb_pas_id)
- qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata);
+ qcom_scm_pas_metadata_release(&pas->pas_metadata);
+ if (pas->dtb_pas_id)
+ qcom_scm_pas_metadata_release(&pas->dtb_pas_metadata);
return 0;
}
-static int adsp_load(struct rproc *rproc, const struct firmware *fw)
+static int qcom_pas_load(struct rproc *rproc, const struct firmware *fw)
{
- struct qcom_adsp *adsp = rproc->priv;
+ struct qcom_pas *pas = rproc->priv;
int ret;
- /* Store firmware handle to be used in adsp_start() */
- adsp->firmware = fw;
+ /* Store firmware handle to be used in qcom_pas_start() */
+ pas->firmware = fw;
- if (adsp->lite_pas_id)
- ret = qcom_scm_pas_shutdown(adsp->lite_pas_id);
+ if (pas->lite_pas_id)
+ ret = qcom_scm_pas_shutdown(pas->lite_pas_id);
- if (adsp->dtb_pas_id) {
- ret = request_firmware(&adsp->dtb_firmware, adsp->dtb_firmware_name, adsp->dev);
+ if (pas->dtb_pas_id) {
+ ret = request_firmware(&pas->dtb_firmware, pas->dtb_firmware_name, pas->dev);
if (ret) {
- dev_err(adsp->dev, "request_firmware failed for %s: %d\n",
- adsp->dtb_firmware_name, ret);
+ dev_err(pas->dev, "request_firmware failed for %s: %d\n",
+ pas->dtb_firmware_name, ret);
return ret;
}
- ret = qcom_mdt_pas_init(adsp->dev, adsp->dtb_firmware, adsp->dtb_firmware_name,
- adsp->dtb_pas_id, adsp->dtb_mem_phys,
- &adsp->dtb_pas_metadata);
+ ret = qcom_mdt_pas_init(pas->dev, pas->dtb_firmware, pas->dtb_firmware_name,
+ pas->dtb_pas_id, pas->dtb_mem_phys,
+ &pas->dtb_pas_metadata);
if (ret)
goto release_dtb_firmware;
- ret = qcom_mdt_load_no_init(adsp->dev, adsp->dtb_firmware, adsp->dtb_firmware_name,
- adsp->dtb_pas_id, adsp->dtb_mem_region,
- adsp->dtb_mem_phys, adsp->dtb_mem_size,
- &adsp->dtb_mem_reloc);
+ ret = qcom_mdt_load_no_init(pas->dev, pas->dtb_firmware, pas->dtb_firmware_name,
+ pas->dtb_pas_id, pas->dtb_mem_region,
+ pas->dtb_mem_phys, pas->dtb_mem_size,
+ &pas->dtb_mem_reloc);
if (ret)
goto release_dtb_metadata;
}
@@ -251,248 +252,246 @@ static int adsp_load(struct rproc *rproc, const struct firmware *fw)
return 0;
release_dtb_metadata:
- qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata);
+ qcom_scm_pas_metadata_release(&pas->dtb_pas_metadata);
release_dtb_firmware:
- release_firmware(adsp->dtb_firmware);
+ release_firmware(pas->dtb_firmware);
return ret;
}
-static int adsp_start(struct rproc *rproc)
+static int qcom_pas_start(struct rproc *rproc)
{
- struct qcom_adsp *adsp = rproc->priv;
+ struct qcom_pas *pas = rproc->priv;
int ret;
- ret = qcom_q6v5_prepare(&adsp->q6v5);
+ ret = qcom_q6v5_prepare(&pas->q6v5);
if (ret)
return ret;
- ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ ret = qcom_pas_pds_enable(pas, pas->proxy_pds, pas->proxy_pd_count);
if (ret < 0)
goto disable_irqs;
- ret = clk_prepare_enable(adsp->xo);
+ ret = clk_prepare_enable(pas->xo);
if (ret)
goto disable_proxy_pds;
- ret = clk_prepare_enable(adsp->aggre2_clk);
+ ret = clk_prepare_enable(pas->aggre2_clk);
if (ret)
goto disable_xo_clk;
- if (adsp->cx_supply) {
- ret = regulator_enable(adsp->cx_supply);
+ if (pas->cx_supply) {
+ ret = regulator_enable(pas->cx_supply);
if (ret)
goto disable_aggre2_clk;
}
- if (adsp->px_supply) {
- ret = regulator_enable(adsp->px_supply);
+ if (pas->px_supply) {
+ ret = regulator_enable(pas->px_supply);
if (ret)
goto disable_cx_supply;
}
- if (adsp->dtb_pas_id) {
- ret = qcom_scm_pas_auth_and_reset(adsp->dtb_pas_id);
+ if (pas->dtb_pas_id) {
+ ret = qcom_scm_pas_auth_and_reset(pas->dtb_pas_id);
if (ret) {
- dev_err(adsp->dev,
+ dev_err(pas->dev,
"failed to authenticate dtb image and release reset\n");
goto disable_px_supply;
}
}
- ret = qcom_mdt_pas_init(adsp->dev, adsp->firmware, rproc->firmware, adsp->pas_id,
- adsp->mem_phys, &adsp->pas_metadata);
+ ret = qcom_mdt_pas_init(pas->dev, pas->firmware, rproc->firmware, pas->pas_id,
+ pas->mem_phys, &pas->pas_metadata);
if (ret)
goto disable_px_supply;
- ret = qcom_mdt_load_no_init(adsp->dev, adsp->firmware, rproc->firmware, adsp->pas_id,
- adsp->mem_region, adsp->mem_phys, adsp->mem_size,
- &adsp->mem_reloc);
+ ret = qcom_mdt_load_no_init(pas->dev, pas->firmware, rproc->firmware, pas->pas_id,
+ pas->mem_region, pas->mem_phys, pas->mem_size,
+ &pas->mem_reloc);
if (ret)
goto release_pas_metadata;
- qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
+ qcom_pil_info_store(pas->info_name, pas->mem_phys, pas->mem_size);
- ret = qcom_scm_pas_auth_and_reset(adsp->pas_id);
+ ret = qcom_scm_pas_auth_and_reset(pas->pas_id);
if (ret) {
- dev_err(adsp->dev,
+ dev_err(pas->dev,
"failed to authenticate image and release reset\n");
goto release_pas_metadata;
}
- ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5000));
+ ret = qcom_q6v5_wait_for_start(&pas->q6v5, msecs_to_jiffies(5000));
if (ret == -ETIMEDOUT) {
- dev_err(adsp->dev, "start timed out\n");
- qcom_scm_pas_shutdown(adsp->pas_id);
+ dev_err(pas->dev, "start timed out\n");
+ qcom_scm_pas_shutdown(pas->pas_id);
goto release_pas_metadata;
}
- qcom_scm_pas_metadata_release(&adsp->pas_metadata);
- if (adsp->dtb_pas_id)
- qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata);
+ qcom_scm_pas_metadata_release(&pas->pas_metadata);
+ if (pas->dtb_pas_id)
+ qcom_scm_pas_metadata_release(&pas->dtb_pas_metadata);
- /* Remove pointer to the loaded firmware, only valid in adsp_load() & adsp_start() */
- adsp->firmware = NULL;
+ /* firmware is used to pass reference from qcom_pas_start(), drop it now */
+ pas->firmware = NULL;
return 0;
release_pas_metadata:
- qcom_scm_pas_metadata_release(&adsp->pas_metadata);
- if (adsp->dtb_pas_id)
- qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata);
+ qcom_scm_pas_metadata_release(&pas->pas_metadata);
+ if (pas->dtb_pas_id)
+ qcom_scm_pas_metadata_release(&pas->dtb_pas_metadata);
disable_px_supply:
- if (adsp->px_supply)
- regulator_disable(adsp->px_supply);
+ if (pas->px_supply)
+ regulator_disable(pas->px_supply);
disable_cx_supply:
- if (adsp->cx_supply)
- regulator_disable(adsp->cx_supply);
+ if (pas->cx_supply)
+ regulator_disable(pas->cx_supply);
disable_aggre2_clk:
- clk_disable_unprepare(adsp->aggre2_clk);
+ clk_disable_unprepare(pas->aggre2_clk);
disable_xo_clk:
- clk_disable_unprepare(adsp->xo);
+ clk_disable_unprepare(pas->xo);
disable_proxy_pds:
- adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ qcom_pas_pds_disable(pas, pas->proxy_pds, pas->proxy_pd_count);
disable_irqs:
- qcom_q6v5_unprepare(&adsp->q6v5);
+ qcom_q6v5_unprepare(&pas->q6v5);
- /* Remove pointer to the loaded firmware, only valid in adsp_load() & adsp_start() */
- adsp->firmware = NULL;
+ /* firmware is used to pass reference from qcom_pas_start(), drop it now */
+ pas->firmware = NULL;
return ret;
}
static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
{
- struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
-
- if (adsp->px_supply)
- regulator_disable(adsp->px_supply);
- if (adsp->cx_supply)
- regulator_disable(adsp->cx_supply);
- clk_disable_unprepare(adsp->aggre2_clk);
- clk_disable_unprepare(adsp->xo);
- adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ struct qcom_pas *pas = container_of(q6v5, struct qcom_pas, q6v5);
+
+ if (pas->px_supply)
+ regulator_disable(pas->px_supply);
+ if (pas->cx_supply)
+ regulator_disable(pas->cx_supply);
+ clk_disable_unprepare(pas->aggre2_clk);
+ clk_disable_unprepare(pas->xo);
+ qcom_pas_pds_disable(pas, pas->proxy_pds, pas->proxy_pd_count);
}
-static int adsp_stop(struct rproc *rproc)
+static int qcom_pas_stop(struct rproc *rproc)
{
- struct qcom_adsp *adsp = rproc->priv;
+ struct qcom_pas *pas = rproc->priv;
int handover;
int ret;
- ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
+ ret = qcom_q6v5_request_stop(&pas->q6v5, pas->sysmon);
if (ret == -ETIMEDOUT)
- dev_err(adsp->dev, "timed out on wait\n");
+ dev_err(pas->dev, "timed out on wait\n");
- ret = qcom_scm_pas_shutdown(adsp->pas_id);
- if (ret && adsp->decrypt_shutdown)
- ret = adsp_shutdown_poll_decrypt(adsp);
+ ret = qcom_scm_pas_shutdown(pas->pas_id);
+ if (ret && pas->decrypt_shutdown)
+ ret = qcom_pas_shutdown_poll_decrypt(pas);
if (ret)
- dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
+ dev_err(pas->dev, "failed to shutdown: %d\n", ret);
- if (adsp->dtb_pas_id) {
- ret = qcom_scm_pas_shutdown(adsp->dtb_pas_id);
+ if (pas->dtb_pas_id) {
+ ret = qcom_scm_pas_shutdown(pas->dtb_pas_id);
if (ret)
- dev_err(adsp->dev, "failed to shutdown dtb: %d\n", ret);
+ dev_err(pas->dev, "failed to shutdown dtb: %d\n", ret);
}
- handover = qcom_q6v5_unprepare(&adsp->q6v5);
+ handover = qcom_q6v5_unprepare(&pas->q6v5);
if (handover)
- qcom_pas_handover(&adsp->q6v5);
+ qcom_pas_handover(&pas->q6v5);
- if (adsp->smem_host_id)
- ret = qcom_smem_bust_hwspin_lock_by_host(adsp->smem_host_id);
+ if (pas->smem_host_id)
+ ret = qcom_smem_bust_hwspin_lock_by_host(pas->smem_host_id);
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+static void *qcom_pas_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
- struct qcom_adsp *adsp = rproc->priv;
+ struct qcom_pas *pas = rproc->priv;
int offset;
- offset = da - adsp->mem_reloc;
- if (offset < 0 || offset + len > adsp->mem_size)
+ offset = da - pas->mem_reloc;
+ if (offset < 0 || offset + len > pas->mem_size)
return NULL;
if (is_iomem)
*is_iomem = true;
- return adsp->mem_region + offset;
+ return pas->mem_region + offset;
}
-static unsigned long adsp_panic(struct rproc *rproc)
+static unsigned long qcom_pas_panic(struct rproc *rproc)
{
- struct qcom_adsp *adsp = rproc->priv;
+ struct qcom_pas *pas = rproc->priv;
- return qcom_q6v5_panic(&adsp->q6v5);
+ return qcom_q6v5_panic(&pas->q6v5);
}
-static const struct rproc_ops adsp_ops = {
- .unprepare = adsp_unprepare,
- .start = adsp_start,
- .stop = adsp_stop,
- .da_to_va = adsp_da_to_va,
+static const struct rproc_ops qcom_pas_ops = {
+ .unprepare = qcom_pas_unprepare,
+ .start = qcom_pas_start,
+ .stop = qcom_pas_stop,
+ .da_to_va = qcom_pas_da_to_va,
.parse_fw = qcom_register_dump_segments,
- .load = adsp_load,
- .panic = adsp_panic,
+ .load = qcom_pas_load,
+ .panic = qcom_pas_panic,
};
-static const struct rproc_ops adsp_minidump_ops = {
- .unprepare = adsp_unprepare,
- .start = adsp_start,
- .stop = adsp_stop,
- .da_to_va = adsp_da_to_va,
+static const struct rproc_ops qcom_pas_minidump_ops = {
+ .unprepare = qcom_pas_unprepare,
+ .start = qcom_pas_start,
+ .stop = qcom_pas_stop,
+ .da_to_va = qcom_pas_da_to_va,
.parse_fw = qcom_register_dump_segments,
- .load = adsp_load,
- .panic = adsp_panic,
- .coredump = adsp_minidump,
+ .load = qcom_pas_load,
+ .panic = qcom_pas_panic,
+ .coredump = qcom_pas_minidump,
};
-static int adsp_init_clock(struct qcom_adsp *adsp)
+static int qcom_pas_init_clock(struct qcom_pas *pas)
{
- adsp->xo = devm_clk_get(adsp->dev, "xo");
- if (IS_ERR(adsp->xo))
- return dev_err_probe(adsp->dev, PTR_ERR(adsp->xo),
+ pas->xo = devm_clk_get(pas->dev, "xo");
+ if (IS_ERR(pas->xo))
+ return dev_err_probe(pas->dev, PTR_ERR(pas->xo),
"failed to get xo clock");
-
- adsp->aggre2_clk = devm_clk_get_optional(adsp->dev, "aggre2");
- if (IS_ERR(adsp->aggre2_clk))
- return dev_err_probe(adsp->dev, PTR_ERR(adsp->aggre2_clk),
+ pas->aggre2_clk = devm_clk_get_optional(pas->dev, "aggre2");
+ if (IS_ERR(pas->aggre2_clk))
+ return dev_err_probe(pas->dev, PTR_ERR(pas->aggre2_clk),
"failed to get aggre2 clock");
return 0;
}
-static int adsp_init_regulator(struct qcom_adsp *adsp)
+static int qcom_pas_init_regulator(struct qcom_pas *pas)
{
- adsp->cx_supply = devm_regulator_get_optional(adsp->dev, "cx");
- if (IS_ERR(adsp->cx_supply)) {
- if (PTR_ERR(adsp->cx_supply) == -ENODEV)
- adsp->cx_supply = NULL;
+ pas->cx_supply = devm_regulator_get_optional(pas->dev, "cx");
+ if (IS_ERR(pas->cx_supply)) {
+ if (PTR_ERR(pas->cx_supply) == -ENODEV)
+ pas->cx_supply = NULL;
else
- return PTR_ERR(adsp->cx_supply);
+ return PTR_ERR(pas->cx_supply);
}
- if (adsp->cx_supply)
- regulator_set_load(adsp->cx_supply, 100000);
+ if (pas->cx_supply)
+ regulator_set_load(pas->cx_supply, 100000);
- adsp->px_supply = devm_regulator_get_optional(adsp->dev, "px");
- if (IS_ERR(adsp->px_supply)) {
- if (PTR_ERR(adsp->px_supply) == -ENODEV)
- adsp->px_supply = NULL;
+ pas->px_supply = devm_regulator_get_optional(pas->dev, "px");
+ if (IS_ERR(pas->px_supply)) {
+ if (PTR_ERR(pas->px_supply) == -ENODEV)
+ pas->px_supply = NULL;
else
- return PTR_ERR(adsp->px_supply);
+ return PTR_ERR(pas->px_supply);
}
return 0;
}
-static int adsp_pds_attach(struct device *dev, struct device **devs,
- char **pd_names)
+static int qcom_pas_pds_attach(struct device *dev, struct device **devs, char **pd_names)
{
size_t num_pds = 0;
int ret;
@@ -528,10 +527,9 @@ unroll_attach:
return ret;
};
-static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
- size_t pd_count)
+static void qcom_pas_pds_detach(struct qcom_pas *pas, struct device **pds, size_t pd_count)
{
- struct device *dev = adsp->dev;
+ struct device *dev = pas->dev;
int i;
/* Handle single power domain */
@@ -544,62 +542,62 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
dev_pm_domain_detach(pds[i], false);
}
-static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
+static int qcom_pas_alloc_memory_region(struct qcom_pas *pas)
{
struct reserved_mem *rmem;
struct device_node *node;
- node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
+ node = of_parse_phandle(pas->dev->of_node, "memory-region", 0);
if (!node) {
- dev_err(adsp->dev, "no memory-region specified\n");
+ dev_err(pas->dev, "no memory-region specified\n");
return -EINVAL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
- dev_err(adsp->dev, "unable to resolve memory-region\n");
+ dev_err(pas->dev, "unable to resolve memory-region\n");
return -EINVAL;
}
- adsp->mem_phys = adsp->mem_reloc = rmem->base;
- adsp->mem_size = rmem->size;
- adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size);
- if (!adsp->mem_region) {
- dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
- &rmem->base, adsp->mem_size);
+ pas->mem_phys = pas->mem_reloc = rmem->base;
+ pas->mem_size = rmem->size;
+ pas->mem_region = devm_ioremap_wc(pas->dev, pas->mem_phys, pas->mem_size);
+ if (!pas->mem_region) {
+ dev_err(pas->dev, "unable to map memory region: %pa+%zx\n",
+ &rmem->base, pas->mem_size);
return -EBUSY;
}
- if (!adsp->dtb_pas_id)
+ if (!pas->dtb_pas_id)
return 0;
- node = of_parse_phandle(adsp->dev->of_node, "memory-region", 1);
+ node = of_parse_phandle(pas->dev->of_node, "memory-region", 1);
if (!node) {
- dev_err(adsp->dev, "no dtb memory-region specified\n");
+ dev_err(pas->dev, "no dtb memory-region specified\n");
return -EINVAL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
- dev_err(adsp->dev, "unable to resolve dtb memory-region\n");
+ dev_err(pas->dev, "unable to resolve dtb memory-region\n");
return -EINVAL;
}
- adsp->dtb_mem_phys = adsp->dtb_mem_reloc = rmem->base;
- adsp->dtb_mem_size = rmem->size;
- adsp->dtb_mem_region = devm_ioremap_wc(adsp->dev, adsp->dtb_mem_phys, adsp->dtb_mem_size);
- if (!adsp->dtb_mem_region) {
- dev_err(adsp->dev, "unable to map dtb memory region: %pa+%zx\n",
- &rmem->base, adsp->dtb_mem_size);
+ pas->dtb_mem_phys = pas->dtb_mem_reloc = rmem->base;
+ pas->dtb_mem_size = rmem->size;
+ pas->dtb_mem_region = devm_ioremap_wc(pas->dev, pas->dtb_mem_phys, pas->dtb_mem_size);
+ if (!pas->dtb_mem_region) {
+ dev_err(pas->dev, "unable to map dtb memory region: %pa+%zx\n",
+ &rmem->base, pas->dtb_mem_size);
return -EBUSY;
}
return 0;
}
-static int adsp_assign_memory_region(struct qcom_adsp *adsp)
+static int qcom_pas_assign_memory_region(struct qcom_pas *pas)
{
struct qcom_scm_vmperm perm[MAX_ASSIGN_COUNT];
struct device_node *node;
@@ -607,45 +605,45 @@ static int adsp_assign_memory_region(struct qcom_adsp *adsp)
int offset;
int ret;
- if (!adsp->region_assign_idx)
+ if (!pas->region_assign_idx)
return 0;
- for (offset = 0; offset < adsp->region_assign_count; ++offset) {
+ for (offset = 0; offset < pas->region_assign_count; ++offset) {
struct reserved_mem *rmem = NULL;
- node = of_parse_phandle(adsp->dev->of_node, "memory-region",
- adsp->region_assign_idx + offset);
+ node = of_parse_phandle(pas->dev->of_node, "memory-region",
+ pas->region_assign_idx + offset);
if (node)
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
- dev_err(adsp->dev, "unable to resolve shareable memory-region index %d\n",
+ dev_err(pas->dev, "unable to resolve shareable memory-region index %d\n",
offset);
return -EINVAL;
}
- if (adsp->region_assign_shared) {
+ if (pas->region_assign_shared) {
perm[0].vmid = QCOM_SCM_VMID_HLOS;
perm[0].perm = QCOM_SCM_PERM_RW;
- perm[1].vmid = adsp->region_assign_vmid;
+ perm[1].vmid = pas->region_assign_vmid;
perm[1].perm = QCOM_SCM_PERM_RW;
perm_size = 2;
} else {
- perm[0].vmid = adsp->region_assign_vmid;
+ perm[0].vmid = pas->region_assign_vmid;
perm[0].perm = QCOM_SCM_PERM_RW;
perm_size = 1;
}
- adsp->region_assign_phys[offset] = rmem->base;
- adsp->region_assign_size[offset] = rmem->size;
- adsp->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS);
+ pas->region_assign_phys[offset] = rmem->base;
+ pas->region_assign_size[offset] = rmem->size;
+ pas->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS);
- ret = qcom_scm_assign_mem(adsp->region_assign_phys[offset],
- adsp->region_assign_size[offset],
- &adsp->region_assign_owners[offset],
+ ret = qcom_scm_assign_mem(pas->region_assign_phys[offset],
+ pas->region_assign_size[offset],
+ &pas->region_assign_owners[offset],
perm, perm_size);
if (ret < 0) {
- dev_err(adsp->dev, "assign memory %d failed\n", offset);
+ dev_err(pas->dev, "assign memory %d failed\n", offset);
return ret;
}
}
@@ -653,35 +651,35 @@ static int adsp_assign_memory_region(struct qcom_adsp *adsp)
return 0;
}
-static void adsp_unassign_memory_region(struct qcom_adsp *adsp)
+static void qcom_pas_unassign_memory_region(struct qcom_pas *pas)
{
struct qcom_scm_vmperm perm;
int offset;
int ret;
- if (!adsp->region_assign_idx || adsp->region_assign_shared)
+ if (!pas->region_assign_idx || pas->region_assign_shared)
return;
- for (offset = 0; offset < adsp->region_assign_count; ++offset) {
+ for (offset = 0; offset < pas->region_assign_count; ++offset) {
perm.vmid = QCOM_SCM_VMID_HLOS;
perm.perm = QCOM_SCM_PERM_RW;
- ret = qcom_scm_assign_mem(adsp->region_assign_phys[offset],
- adsp->region_assign_size[offset],
- &adsp->region_assign_owners[offset],
+ ret = qcom_scm_assign_mem(pas->region_assign_phys[offset],
+ pas->region_assign_size[offset],
+ &pas->region_assign_owners[offset],
&perm, 1);
if (ret < 0)
- dev_err(adsp->dev, "unassign memory %d failed\n", offset);
+ dev_err(pas->dev, "unassign memory %d failed\n", offset);
}
}
-static int adsp_probe(struct platform_device *pdev)
+static int qcom_pas_probe(struct platform_device *pdev)
{
- const struct adsp_data *desc;
- struct qcom_adsp *adsp;
+ const struct qcom_pas_data *desc;
+ struct qcom_pas *pas;
struct rproc *rproc;
const char *fw_name, *dtb_fw_name = NULL;
- const struct rproc_ops *ops = &adsp_ops;
+ const struct rproc_ops *ops = &qcom_pas_ops;
int ret;
desc = of_device_get_match_data(&pdev->dev);
@@ -706,9 +704,9 @@ static int adsp_probe(struct platform_device *pdev)
}
if (desc->minidump_id)
- ops = &adsp_minidump_ops;
+ ops = &qcom_pas_minidump_ops;
- rproc = devm_rproc_alloc(&pdev->dev, desc->sysmon_name, ops, fw_name, sizeof(*adsp));
+ rproc = devm_rproc_alloc(&pdev->dev, desc->sysmon_name, ops, fw_name, sizeof(*pas));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
@@ -718,68 +716,65 @@ static int adsp_probe(struct platform_device *pdev)
rproc->auto_boot = desc->auto_boot;
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
- adsp = rproc->priv;
- adsp->dev = &pdev->dev;
- adsp->rproc = rproc;
- adsp->minidump_id = desc->minidump_id;
- adsp->pas_id = desc->pas_id;
- adsp->lite_pas_id = desc->lite_pas_id;
- adsp->info_name = desc->sysmon_name;
- adsp->smem_host_id = desc->smem_host_id;
- adsp->decrypt_shutdown = desc->decrypt_shutdown;
- adsp->region_assign_idx = desc->region_assign_idx;
- adsp->region_assign_count = min_t(int, MAX_ASSIGN_COUNT, desc->region_assign_count);
- adsp->region_assign_vmid = desc->region_assign_vmid;
- adsp->region_assign_shared = desc->region_assign_shared;
+ pas = rproc->priv;
+ pas->dev = &pdev->dev;
+ pas->rproc = rproc;
+ pas->minidump_id = desc->minidump_id;
+ pas->pas_id = desc->pas_id;
+ pas->lite_pas_id = desc->lite_pas_id;
+ pas->info_name = desc->sysmon_name;
+ pas->smem_host_id = desc->smem_host_id;
+ pas->decrypt_shutdown = desc->decrypt_shutdown;
+ pas->region_assign_idx = desc->region_assign_idx;
+ pas->region_assign_count = min_t(int, MAX_ASSIGN_COUNT, desc->region_assign_count);
+ pas->region_assign_vmid = desc->region_assign_vmid;
+ pas->region_assign_shared = desc->region_assign_shared;
if (dtb_fw_name) {
- adsp->dtb_firmware_name = dtb_fw_name;
- adsp->dtb_pas_id = desc->dtb_pas_id;
+ pas->dtb_firmware_name = dtb_fw_name;
+ pas->dtb_pas_id = desc->dtb_pas_id;
}
- platform_set_drvdata(pdev, adsp);
+ platform_set_drvdata(pdev, pas);
- ret = device_init_wakeup(adsp->dev, true);
+ ret = device_init_wakeup(pas->dev, true);
if (ret)
goto free_rproc;
- ret = adsp_alloc_memory_region(adsp);
+ ret = qcom_pas_alloc_memory_region(pas);
if (ret)
goto free_rproc;
- ret = adsp_assign_memory_region(adsp);
+ ret = qcom_pas_assign_memory_region(pas);
if (ret)
goto free_rproc;
- ret = adsp_init_clock(adsp);
+ ret = qcom_pas_init_clock(pas);
if (ret)
goto unassign_mem;
- ret = adsp_init_regulator(adsp);
+ ret = qcom_pas_init_regulator(pas);
if (ret)
goto unassign_mem;
- ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
- desc->proxy_pd_names);
+ ret = qcom_pas_pds_attach(&pdev->dev, pas->proxy_pds, desc->proxy_pd_names);
if (ret < 0)
goto unassign_mem;
- adsp->proxy_pd_count = ret;
+ pas->proxy_pd_count = ret;
- ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, desc->load_state,
- qcom_pas_handover);
+ ret = qcom_q6v5_init(&pas->q6v5, pdev, rproc, desc->crash_reason_smem,
+ desc->load_state, qcom_pas_handover);
if (ret)
goto detach_proxy_pds;
- qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
- qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
- qcom_add_pdm_subdev(rproc, &adsp->pdm_subdev);
- adsp->sysmon = qcom_add_sysmon_subdev(rproc,
- desc->sysmon_name,
- desc->ssctl_id);
- if (IS_ERR(adsp->sysmon)) {
- ret = PTR_ERR(adsp->sysmon);
+ qcom_add_glink_subdev(rproc, &pas->glink_subdev, desc->ssr_name);
+ qcom_add_smd_subdev(rproc, &pas->smd_subdev);
+ qcom_add_pdm_subdev(rproc, &pas->pdm_subdev);
+ pas->sysmon = qcom_add_sysmon_subdev(rproc, desc->sysmon_name, desc->ssctl_id);
+ if (IS_ERR(pas->sysmon)) {
+ ret = PTR_ERR(pas->sysmon);
goto deinit_remove_pdm_smd_glink;
}
- qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
+ qcom_add_ssr_subdev(rproc, &pas->ssr_subdev, desc->ssr_name);
ret = rproc_add(rproc);
if (ret)
goto remove_ssr_sysmon;
@@ -787,41 +782,41 @@ static int adsp_probe(struct platform_device *pdev)
return 0;
remove_ssr_sysmon:
- qcom_remove_ssr_subdev(rproc, &adsp->ssr_subdev);
- qcom_remove_sysmon_subdev(adsp->sysmon);
+ qcom_remove_ssr_subdev(rproc, &pas->ssr_subdev);
+ qcom_remove_sysmon_subdev(pas->sysmon);
deinit_remove_pdm_smd_glink:
- qcom_remove_pdm_subdev(rproc, &adsp->pdm_subdev);
- qcom_remove_smd_subdev(rproc, &adsp->smd_subdev);
- qcom_remove_glink_subdev(rproc, &adsp->glink_subdev);
- qcom_q6v5_deinit(&adsp->q6v5);
+ qcom_remove_pdm_subdev(rproc, &pas->pdm_subdev);
+ qcom_remove_smd_subdev(rproc, &pas->smd_subdev);
+ qcom_remove_glink_subdev(rproc, &pas->glink_subdev);
+ qcom_q6v5_deinit(&pas->q6v5);
detach_proxy_pds:
- adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ qcom_pas_pds_detach(pas, pas->proxy_pds, pas->proxy_pd_count);
unassign_mem:
- adsp_unassign_memory_region(adsp);
+ qcom_pas_unassign_memory_region(pas);
free_rproc:
- device_init_wakeup(adsp->dev, false);
+ device_init_wakeup(pas->dev, false);
return ret;
}
-static void adsp_remove(struct platform_device *pdev)
+static void qcom_pas_remove(struct platform_device *pdev)
{
- struct qcom_adsp *adsp = platform_get_drvdata(pdev);
-
- rproc_del(adsp->rproc);
-
- qcom_q6v5_deinit(&adsp->q6v5);
- adsp_unassign_memory_region(adsp);
- qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
- qcom_remove_sysmon_subdev(adsp->sysmon);
- qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev);
- qcom_remove_pdm_subdev(adsp->rproc, &adsp->pdm_subdev);
- qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
- adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
- device_init_wakeup(adsp->dev, false);
+ struct qcom_pas *pas = platform_get_drvdata(pdev);
+
+ rproc_del(pas->rproc);
+
+ qcom_q6v5_deinit(&pas->q6v5);
+ qcom_pas_unassign_memory_region(pas);
+ qcom_remove_glink_subdev(pas->rproc, &pas->glink_subdev);
+ qcom_remove_sysmon_subdev(pas->sysmon);
+ qcom_remove_smd_subdev(pas->rproc, &pas->smd_subdev);
+ qcom_remove_pdm_subdev(pas->rproc, &pas->pdm_subdev);
+ qcom_remove_ssr_subdev(pas->rproc, &pas->ssr_subdev);
+ qcom_pas_pds_detach(pas, pas->proxy_pds, pas->proxy_pd_count);
+ device_init_wakeup(pas->dev, false);
}
-static const struct adsp_data adsp_resource_init = {
+static const struct qcom_pas_data adsp_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
@@ -831,7 +826,7 @@ static const struct adsp_data adsp_resource_init = {
.ssctl_id = 0x14,
};
-static const struct adsp_data sa8775p_adsp_resource = {
+static const struct qcom_pas_data sa8775p_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mbn",
.pas_id = 1,
@@ -848,7 +843,7 @@ static const struct adsp_data sa8775p_adsp_resource = {
.ssctl_id = 0x14,
};
-static const struct adsp_data sdm845_adsp_resource_init = {
+static const struct qcom_pas_data sdm845_adsp_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
@@ -859,7 +854,7 @@ static const struct adsp_data sdm845_adsp_resource_init = {
.ssctl_id = 0x14,
};
-static const struct adsp_data sm6350_adsp_resource = {
+static const struct qcom_pas_data sm6350_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
@@ -875,7 +870,7 @@ static const struct adsp_data sm6350_adsp_resource = {
.ssctl_id = 0x14,
};
-static const struct adsp_data sm6375_mpss_resource = {
+static const struct qcom_pas_data sm6375_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
@@ -890,7 +885,7 @@ static const struct adsp_data sm6375_mpss_resource = {
.ssctl_id = 0x12,
};
-static const struct adsp_data sm8150_adsp_resource = {
+static const struct qcom_pas_data sm8150_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
@@ -905,7 +900,7 @@ static const struct adsp_data sm8150_adsp_resource = {
.ssctl_id = 0x14,
};
-static const struct adsp_data sm8250_adsp_resource = {
+static const struct qcom_pas_data sm8250_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
@@ -922,7 +917,7 @@ static const struct adsp_data sm8250_adsp_resource = {
.ssctl_id = 0x14,
};
-static const struct adsp_data sm8350_adsp_resource = {
+static const struct qcom_pas_data sm8350_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
@@ -938,7 +933,7 @@ static const struct adsp_data sm8350_adsp_resource = {
.ssctl_id = 0x14,
};
-static const struct adsp_data msm8996_adsp_resource = {
+static const struct qcom_pas_data msm8996_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
@@ -952,7 +947,7 @@ static const struct adsp_data msm8996_adsp_resource = {
.ssctl_id = 0x14,
};
-static const struct adsp_data cdsp_resource_init = {
+static const struct qcom_pas_data cdsp_resource_init = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
@@ -962,7 +957,7 @@ static const struct adsp_data cdsp_resource_init = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sa8775p_cdsp0_resource = {
+static const struct qcom_pas_data sa8775p_cdsp0_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp0.mbn",
.pas_id = 18,
@@ -980,7 +975,7 @@ static const struct adsp_data sa8775p_cdsp0_resource = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sa8775p_cdsp1_resource = {
+static const struct qcom_pas_data sa8775p_cdsp1_resource = {
.crash_reason_smem = 633,
.firmware_name = "cdsp1.mbn",
.pas_id = 30,
@@ -998,7 +993,7 @@ static const struct adsp_data sa8775p_cdsp1_resource = {
.ssctl_id = 0x20,
};
-static const struct adsp_data sdm845_cdsp_resource_init = {
+static const struct qcom_pas_data sdm845_cdsp_resource_init = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
@@ -1009,7 +1004,7 @@ static const struct adsp_data sdm845_cdsp_resource_init = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sm6350_cdsp_resource = {
+static const struct qcom_pas_data sm6350_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
@@ -1025,7 +1020,7 @@ static const struct adsp_data sm6350_cdsp_resource = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sm8150_cdsp_resource = {
+static const struct qcom_pas_data sm8150_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
@@ -1040,7 +1035,7 @@ static const struct adsp_data sm8150_cdsp_resource = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sm8250_cdsp_resource = {
+static const struct qcom_pas_data sm8250_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
@@ -1055,7 +1050,7 @@ static const struct adsp_data sm8250_cdsp_resource = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sc8280xp_nsp0_resource = {
+static const struct qcom_pas_data sc8280xp_nsp0_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
@@ -1069,7 +1064,7 @@ static const struct adsp_data sc8280xp_nsp0_resource = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sc8280xp_nsp1_resource = {
+static const struct qcom_pas_data sc8280xp_nsp1_resource = {
.crash_reason_smem = 633,
.firmware_name = "cdsp.mdt",
.pas_id = 30,
@@ -1083,7 +1078,7 @@ static const struct adsp_data sc8280xp_nsp1_resource = {
.ssctl_id = 0x20,
};
-static const struct adsp_data x1e80100_adsp_resource = {
+static const struct qcom_pas_data x1e80100_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.dtb_firmware_name = "adsp_dtb.mdt",
@@ -1103,7 +1098,7 @@ static const struct adsp_data x1e80100_adsp_resource = {
.ssctl_id = 0x14,
};
-static const struct adsp_data x1e80100_cdsp_resource = {
+static const struct qcom_pas_data x1e80100_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.dtb_firmware_name = "cdsp_dtb.mdt",
@@ -1123,7 +1118,7 @@ static const struct adsp_data x1e80100_cdsp_resource = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sm8350_cdsp_resource = {
+static const struct qcom_pas_data sm8350_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
@@ -1140,7 +1135,7 @@ static const struct adsp_data sm8350_cdsp_resource = {
.ssctl_id = 0x17,
};
-static const struct adsp_data sa8775p_gpdsp0_resource = {
+static const struct qcom_pas_data sa8775p_gpdsp0_resource = {
.crash_reason_smem = 640,
.firmware_name = "gpdsp0.mbn",
.pas_id = 39,
@@ -1157,7 +1152,7 @@ static const struct adsp_data sa8775p_gpdsp0_resource = {
.ssctl_id = 0x21,
};
-static const struct adsp_data sa8775p_gpdsp1_resource = {
+static const struct qcom_pas_data sa8775p_gpdsp1_resource = {
.crash_reason_smem = 641,
.firmware_name = "gpdsp1.mbn",
.pas_id = 40,
@@ -1174,7 +1169,7 @@ static const struct adsp_data sa8775p_gpdsp1_resource = {
.ssctl_id = 0x22,
};
-static const struct adsp_data mpss_resource_init = {
+static const struct qcom_pas_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
@@ -1191,7 +1186,7 @@ static const struct adsp_data mpss_resource_init = {
.ssctl_id = 0x12,
};
-static const struct adsp_data sc8180x_mpss_resource = {
+static const struct qcom_pas_data sc8180x_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
@@ -1206,7 +1201,7 @@ static const struct adsp_data sc8180x_mpss_resource = {
.ssctl_id = 0x12,
};
-static const struct adsp_data msm8996_slpi_resource_init = {
+static const struct qcom_pas_data msm8996_slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
@@ -1220,7 +1215,7 @@ static const struct adsp_data msm8996_slpi_resource_init = {
.ssctl_id = 0x16,
};
-static const struct adsp_data sdm845_slpi_resource_init = {
+static const struct qcom_pas_data sdm845_slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
@@ -1236,7 +1231,7 @@ static const struct adsp_data sdm845_slpi_resource_init = {
.ssctl_id = 0x16,
};
-static const struct adsp_data wcss_resource_init = {
+static const struct qcom_pas_data wcss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "wcnss.mdt",
.pas_id = 6,
@@ -1246,7 +1241,7 @@ static const struct adsp_data wcss_resource_init = {
.ssctl_id = 0x12,
};
-static const struct adsp_data sdx55_mpss_resource = {
+static const struct qcom_pas_data sdx55_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
@@ -1261,7 +1256,7 @@ static const struct adsp_data sdx55_mpss_resource = {
.ssctl_id = 0x22,
};
-static const struct adsp_data sm8450_mpss_resource = {
+static const struct qcom_pas_data sm8450_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.pas_id = 4,
@@ -1279,7 +1274,7 @@ static const struct adsp_data sm8450_mpss_resource = {
.ssctl_id = 0x12,
};
-static const struct adsp_data sm8550_adsp_resource = {
+static const struct qcom_pas_data sm8550_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.dtb_firmware_name = "adsp_dtb.mdt",
@@ -1299,7 +1294,7 @@ static const struct adsp_data sm8550_adsp_resource = {
.smem_host_id = 2,
};
-static const struct adsp_data sm8550_cdsp_resource = {
+static const struct qcom_pas_data sm8550_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.dtb_firmware_name = "cdsp_dtb.mdt",
@@ -1320,7 +1315,7 @@ static const struct adsp_data sm8550_cdsp_resource = {
.smem_host_id = 5,
};
-static const struct adsp_data sm8550_mpss_resource = {
+static const struct qcom_pas_data sm8550_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.dtb_firmware_name = "modem_dtb.mdt",
@@ -1344,7 +1339,7 @@ static const struct adsp_data sm8550_mpss_resource = {
.region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
};
-static const struct adsp_data sc7280_wpss_resource = {
+static const struct qcom_pas_data sc7280_wpss_resource = {
.crash_reason_smem = 626,
.firmware_name = "wpss.mdt",
.pas_id = 6,
@@ -1361,7 +1356,7 @@ static const struct adsp_data sc7280_wpss_resource = {
.ssctl_id = 0x19,
};
-static const struct adsp_data sm8650_cdsp_resource = {
+static const struct qcom_pas_data sm8650_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.dtb_firmware_name = "cdsp_dtb.mdt",
@@ -1386,7 +1381,7 @@ static const struct adsp_data sm8650_cdsp_resource = {
.region_assign_vmid = QCOM_SCM_VMID_CDSP,
};
-static const struct adsp_data sm8650_mpss_resource = {
+static const struct qcom_pas_data sm8650_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.dtb_firmware_name = "modem_dtb.mdt",
@@ -1410,7 +1405,7 @@ static const struct adsp_data sm8650_mpss_resource = {
.region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
};
-static const struct adsp_data sm8750_mpss_resource = {
+static const struct qcom_pas_data sm8750_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
.dtb_firmware_name = "modem_dtb.mdt",
@@ -1434,7 +1429,7 @@ static const struct adsp_data sm8750_mpss_resource = {
.region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
};
-static const struct of_device_id adsp_of_match[] = {
+static const struct of_device_id qcom_pas_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
@@ -1504,17 +1499,17 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,x1e80100-cdsp-pas", .data = &x1e80100_cdsp_resource},
{ },
};
-MODULE_DEVICE_TABLE(of, adsp_of_match);
+MODULE_DEVICE_TABLE(of, qcom_pas_of_match);
-static struct platform_driver adsp_driver = {
- .probe = adsp_probe,
- .remove = adsp_remove,
+static struct platform_driver qcom_pas_driver = {
+ .probe = qcom_pas_probe,
+ .remove = qcom_pas_remove,
.driver = {
.name = "qcom_q6v5_pas",
- .of_match_table = adsp_of_match,
+ .of_match_table = qcom_pas_of_match,
},
};
-module_platform_driver(adsp_driver);
-MODULE_DESCRIPTION("Qualcomm Hexagon v5 Peripheral Authentication Service driver");
+module_platform_driver(qcom_pas_driver);
+MODULE_DESCRIPTION("Qualcomm Peripheral Authentication Service remoteproc driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 81b2ccf988e8..825672100528 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -699,7 +699,7 @@ static int rproc_alloc_carveout(struct rproc *rproc,
return -ENOMEM;
}
- dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n",
+ dev_dbg(dev, "carveout va %p, dma %pad, len 0x%zx\n",
va, &dma, mem->len);
if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 25a655f33ec0..c5d46a878149 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -136,7 +136,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
size = vring_size(num, rvring->align);
memset(addr, 0, size);
- dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
+ dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
id, addr, num, rvring->notifyid);
/*
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 5412beb0a692..d083ecf02f5c 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -190,7 +190,7 @@ static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *
}
}
- dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n",
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n",
da, len, va);
return va;
diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c
index d5dccc81d460..d4f20900f33b 100644
--- a/drivers/remoteproc/ti_k3_common.c
+++ b/drivers/remoteproc/ti_k3_common.c
@@ -450,7 +450,7 @@ int k3_rproc_of_get_memories(struct platform_device *pdev,
kproc->mem[i].dev_addr = data->mems[i].dev_addr;
kproc->mem[i].size = resource_size(res);
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
data->mems[i].name, &kproc->mem[i].bus_addr,
kproc->mem[i].size, kproc->mem[i].cpu_addr,
kproc->mem[i].dev_addr);
@@ -528,7 +528,7 @@ int k3_reserved_mem_init(struct k3_rproc *kproc)
return -ENOMEM;
}
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %p da 0x%x\n",
i + 1, &kproc->rmem[i].bus_addr,
kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
kproc->rmem[i].dev_addr);
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index e34c04c135fc..ca5ff280d2dc 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1007,7 +1007,7 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
return -ENOMEM;
}
- dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %p da 0x%x\n",
i, &core->sram[i].bus_addr,
core->sram[i].size, core->sram[i].cpu_addr,
core->sram[i].dev_addr);
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 1af89782e116..0b7b173d0d26 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -68,7 +68,7 @@ struct zynqmp_sram_bank {
};
/**
- * struct mbox_info
+ * struct mbox_info - mailbox channel data
*
* @rx_mc_buf: to copy data from mailbox rx channel
* @tx_mc_buf: to copy data to mailbox tx channel
@@ -89,7 +89,7 @@ struct mbox_info {
};
/**
- * struct rsc_tbl_data
+ * struct rsc_tbl_data - resource table metadata
*
* Platform specific data structure used to sync resource table address.
* It's important to maintain order and size of each field on remote side.
@@ -128,7 +128,7 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
};
/**
- * struct zynqmp_r5_core
+ * struct zynqmp_r5_core - remoteproc core's internal data
*
* @rsc_tbl_va: resource table virtual address
* @sram: Array of sram memories assigned to this core
@@ -157,7 +157,7 @@ struct zynqmp_r5_core {
};
/**
- * struct zynqmp_r5_cluster
+ * struct zynqmp_r5_cluster - remoteproc cluster's internal data
*
* @dev: r5f subsystem cluster device node
* @mode: cluster mode of type zynqmp_r5_cluster_mode
@@ -732,7 +732,7 @@ static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
}
/**
- * zynqmp_r5_rproc_prepare()
+ * zynqmp_r5_rproc_prepare() - prepare core to boot/attach
* adds carveouts for TCM bank and reserved memory regions
*
* @rproc: Device node of each rproc
@@ -765,7 +765,7 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
}
/**
- * zynqmp_r5_rproc_unprepare()
+ * zynqmp_r5_rproc_unprepare() - programming sequence after stop/detach.
* Turns off TCM banks using power-domain id
*
* @rproc: Device node of each rproc
@@ -908,7 +908,7 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = {
};
/**
- * zynqmp_r5_add_rproc_core()
+ * zynqmp_r5_add_rproc_core() - Add core data to framework.
* Allocate and add struct rproc object for each r5f core
* This is called for each individual r5f core
*
@@ -938,6 +938,8 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM);
+ r5_rproc->recovery_disabled = true;
+ r5_rproc->has_iommu = false;
r5_rproc->auto_boot = false;
r5_core = r5_rproc->priv;
r5_core->dev = cdev;
@@ -1142,7 +1144,7 @@ static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
}
/**
- * zynqmp_r5_get_tcm_node()
+ * zynqmp_r5_get_tcm_node() - Get TCM info
* Ideally this function should parse tcm node and store information
* in r5_core instance. For now, Hardcoded TCM information is used.
* This approach is used as TCM bindings for system-dt is being developed
@@ -1329,19 +1331,23 @@ static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
/*
* Number of cores is decided by number of child nodes of
- * r5f subsystem node in dts. If Split mode is used in dts
- * 2 child nodes are expected.
+ * r5f subsystem node in dts.
+ * In split mode maximum two child nodes are expected.
+ * However, only single core can be enabled too.
+ * Driver can handle following configuration in split mode:
+ * 1) core0 enabled, core1 disabled
+ * 2) core0 disabled, core1 enabled
+ * 3) core0 and core1 both are enabled.
+ * For now, no more than two cores are expected per cluster
+ * in split mode.
* In lockstep mode if two child nodes are available,
* only use first child node and consider it as core0
* and ignore core1 dt node.
*/
core_count = of_get_available_child_count(dev_node);
- if (core_count == 0) {
+ if (core_count == 0 || core_count > 2) {
dev_err(dev, "Invalid number of r5 cores %d", core_count);
return -EINVAL;
- } else if (cluster_mode == SPLIT_MODE && core_count != 2) {
- dev_err(dev, "Invalid number of r5 cores for split mode\n");
- return -EINVAL;
} else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) {
dev_warn(dev, "Only r5 core0 will be used\n");
core_count = 1;
@@ -1464,6 +1470,45 @@ static void zynqmp_r5_cluster_exit(void *data)
}
/*
+ * zynqmp_r5_remoteproc_shutdown()
+ * Follow shutdown sequence in case of kexec call.
+ *
+ * @pdev: domain platform device for cluster
+ *
+ * Return: None.
+ */
+static void zynqmp_r5_remoteproc_shutdown(struct platform_device *pdev)
+{
+ const char *rproc_state_str = NULL;
+ struct zynqmp_r5_cluster *cluster;
+ struct zynqmp_r5_core *r5_core;
+ struct rproc *rproc;
+ int i, ret = 0;
+
+ cluster = platform_get_drvdata(pdev);
+
+ for (i = 0; i < cluster->core_count; i++) {
+ r5_core = cluster->r5_cores[i];
+ rproc = r5_core->rproc;
+
+ if (rproc->state == RPROC_RUNNING) {
+ ret = rproc_shutdown(rproc);
+ rproc_state_str = "shutdown";
+ } else if (rproc->state == RPROC_ATTACHED) {
+ ret = rproc_detach(rproc);
+ rproc_state_str = "detach";
+ } else {
+ ret = 0;
+ }
+
+ if (ret) {
+ dev_err(cluster->dev, "failed to %s rproc %d\n",
+ rproc_state_str, rproc->index);
+ }
+ }
+}
+
+/*
* zynqmp_r5_remoteproc_probe()
* parse device-tree, initialize hardware and allocate required resources
* and remoteproc ops
@@ -1524,6 +1569,7 @@ static struct platform_driver zynqmp_r5_remoteproc_driver = {
.name = "zynqmp_r5_remoteproc",
.of_match_table = zynqmp_r5_remoteproc_match,
},
+ .shutdown = zynqmp_r5_remoteproc_shutdown,
};
module_platform_driver(zynqmp_r5_remoteproc_driver);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 4730b1c8b322..484890b4a6a7 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -901,7 +901,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
goto vqs_del;
}
- dev_dbg(&vdev->dev, "buffers: va %pK, dma %pad\n",
+ dev_dbg(&vdev->dev, "buffers: va %p, dma %pad\n",
bufs_va, &vrp->bufs_dma);
/* half of the buffers is dedicated for RX */
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 9aec922613ce..64f6e9756aff 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -483,15 +483,6 @@ config RTC_DRV_PCF8523
This driver can also be built as a module. If so, the module
will be called rtc-pcf8523.
-config RTC_DRV_PCF85063
- tristate "NXP PCF85063"
- select REGMAP_I2C
- help
- If you say yes here you get support for the PCF85063 RTC chip
-
- This driver can also be built as a module. If so, the module
- will be called rtc-pcf85063.
-
config RTC_DRV_PCF85363
tristate "NXP PCF85363"
select REGMAP_I2C
@@ -971,6 +962,18 @@ config RTC_DRV_PCF2127
This driver can also be built as a module. If so, the module
will be called rtc-pcf2127.
+config RTC_DRV_PCF85063
+ tristate "NXP PCF85063"
+ depends on RTC_I2C_AND_SPI
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
+ help
+ If you say yes here you get support for the PCF85063 and RV8063
+ RTC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf85063.
+
config RTC_DRV_RV3029C2
tristate "Micro Crystal RV3029/3049"
depends on RTC_I2C_AND_SPI
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 4619aa2ac469..789bddfea99d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -15,7 +15,7 @@ rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += sysfs.o
-obj-$(CONFIG_RTC_LIB_KUNIT_TEST) += lib_test.o
+obj-$(CONFIG_RTC_LIB_KUNIT_TEST) += test_rtc_lib.o
# Keep the list ordered.
diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c
index 13b5b1f20465..f7051592a6e3 100644
--- a/drivers/rtc/lib.c
+++ b/drivers/rtc/lib.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(rtc_year_days);
*/
void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
{
- int days, secs;
+ int secs;
u64 u64tmp;
u32 u32tmp, udays, century, day_of_century, year_of_century, year,
@@ -59,28 +59,26 @@ void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
bool is_Jan_or_Feb, is_leap_year;
/*
- * Get days and seconds while preserving the sign to
- * handle negative time values (dates before 1970-01-01)
+ * The time represented by `time` is given in seconds since 1970-01-01
+ * (UTC). As the division done below might misbehave for negative
+ * values, we convert it to seconds since 0000-03-01 and then assume it
+ * will be non-negative.
+ * Below we do 4 * udays + 3 which should fit into a 32 bit unsigned
+ * variable. So the latest date this algorithm works for is 1073741823
+ * days after 0000-03-01 which is in the year 2939805.
*/
- days = div_s64_rem(time, 86400, &secs);
+ time += (u64)719468 * 86400;
+
+ udays = div_s64_rem(time, 86400, &secs);
/*
- * We need 0 <= secs < 86400 which isn't given for negative
- * values of time. Fixup accordingly.
+ * day of the week, 0000-03-01 was a Wednesday (in the proleptic
+ * Gregorian calendar)
*/
- if (secs < 0) {
- days -= 1;
- secs += 86400;
- }
-
- /* day of the week, 1970-01-01 was a Thursday */
- tm->tm_wday = (days + 4) % 7;
- /* Ensure tm_wday is always positive */
- if (tm->tm_wday < 0)
- tm->tm_wday += 7;
+ tm->tm_wday = (udays + 3) % 7;
/*
- * The following algorithm is, basically, Proposition 6.3 of Neri
+ * The following algorithm is, basically, Figure 12 of Neri
* and Schneider [1]. In a few words: it works on the computational
* (fictitious) calendar where the year starts in March, month = 2
* (*), and finishes in February, month = 13. This calendar is
@@ -100,15 +98,15 @@ void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
* (using just arithmetics) it's easy to convert it to the
* corresponding date in the Gregorian calendar.
*
- * [1] "Euclidean Affine Functions and Applications to Calendar
- * Algorithms". https://arxiv.org/abs/2102.06959
+ * [1] Neri C, Schneider L. Euclidean affine functions and their
+ * application to calendar algorithms. Softw Pract Exper.
+ * 2023;53(4):937-970. doi: 10.1002/spe.3172
+ * https://doi.org/10.1002/spe.3172
*
* (*) The numbering of months follows rtc_time more closely and
* thus, is slightly different from [1].
*/
- udays = days + 719468;
-
u32tmp = 4 * udays + 3;
century = u32tmp / 146097;
day_of_century = u32tmp % 146097 / 4;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 5efbe69bf5ca..7205c59ff729 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -279,6 +279,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
if (tmp & DS1340_BIT_OSF)
return -EINVAL;
break;
+ case ds_1341:
+ ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &tmp);
+ if (ret)
+ return ret;
+ if (tmp & DS1337_BIT_OSF)
+ return -EINVAL;
+ break;
case ds_1388:
ret = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &tmp);
if (ret)
@@ -377,6 +384,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG,
DS1340_BIT_OSF, 0);
break;
+ case ds_1341:
+ regmap_update_bits(ds1307->regmap, DS1337_REG_STATUS,
+ DS1337_BIT_OSF, 0);
+ break;
case ds_1388:
regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
DS1388_BIT_OSF, 0);
@@ -1456,16 +1467,21 @@ static unsigned long ds3231_clk_sqw_recalc_rate(struct clk_hw *hw,
return ds3231_clk_sqw_rates[rate_sel];
}
-static long ds3231_clk_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ds3231_clk_sqw_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
for (i = ARRAY_SIZE(ds3231_clk_sqw_rates) - 1; i >= 0; i--) {
- if (ds3231_clk_sqw_rates[i] <= rate)
- return ds3231_clk_sqw_rates[i];
+ if (ds3231_clk_sqw_rates[i] <= req->rate) {
+ req->rate = ds3231_clk_sqw_rates[i];
+
+ return 0;
+ }
}
+ req->rate = ds3231_clk_sqw_rates[ARRAY_SIZE(ds3231_clk_sqw_rates) - 1];
+
return 0;
}
@@ -1525,7 +1541,7 @@ static const struct clk_ops ds3231_clk_sqw_ops = {
.unprepare = ds3231_clk_sqw_unprepare,
.is_prepared = ds3231_clk_sqw_is_prepared,
.recalc_rate = ds3231_clk_sqw_recalc_rate,
- .round_rate = ds3231_clk_sqw_round_rate,
+ .determine_rate = ds3231_clk_sqw_determine_rate,
.set_rate = ds3231_clk_sqw_set_rate,
};
@@ -1813,10 +1829,8 @@ static int ds1307_probe(struct i2c_client *client)
regmap_write(ds1307->regmap, DS1337_REG_CONTROL,
regs[0]);
- /* oscillator fault? clear flag, and warn */
+ /* oscillator fault? warn */
if (regs[1] & DS1337_BIT_OSF) {
- regmap_write(ds1307->regmap, DS1337_REG_STATUS,
- regs[1] & ~DS1337_BIT_OSF);
dev_warn(ds1307->dev, "SET TIME!\n");
}
break;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 38e25f63597a..97423f1d0361 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -3,7 +3,7 @@
* An rtc driver for the Dallas/Maxim DS1685/DS1687 and related real-time
* chips.
*
- * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>.
+ * Copyright (C) 2011-2014 Joshua Kinard <linux@kumba.dev>.
* Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>.
*
* References:
@@ -1436,7 +1436,7 @@ EXPORT_SYMBOL_GPL(ds1685_rtc_poweroff);
/* ----------------------------------------------------------------------- */
-MODULE_AUTHOR("Joshua Kinard <kumba@gentoo.org>");
+MODULE_AUTHOR("Joshua Kinard <linux@kumba.dev>");
MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd-electronics.com>");
MODULE_DESCRIPTION("Dallas/Maxim DS1685/DS1687-series RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 63f11ea3589d..7a170c0f9710 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -285,14 +285,19 @@ static unsigned long hym8563_clkout_recalc_rate(struct clk_hw *hw,
return clkout_rates[ret];
}
-static long hym8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hym8563_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
- if (clkout_rates[i] <= rate)
- return clkout_rates[i];
+ if (clkout_rates[i] <= req->rate) {
+ req->rate = clkout_rates[i];
+
+ return 0;
+ }
+
+ req->rate = clkout_rates[0];
return 0;
}
@@ -363,7 +368,7 @@ static const struct clk_ops hym8563_clkout_ops = {
.unprepare = hym8563_clkout_unprepare,
.is_prepared = hym8563_clkout_is_prepared,
.recalc_rate = hym8563_clkout_recalc_rate,
- .round_rate = hym8563_clkout_round_rate,
+ .determine_rate = hym8563_clkout_determine_rate,
.set_rate = hym8563_clkout_set_rate,
};
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index c568639d2151..740cab013f59 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -72,7 +72,7 @@
static const struct i2c_device_id m41t80_id[] = {
{ "m41t62", M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT },
- { "m41t65", M41T80_FEATURE_HT | M41T80_FEATURE_WD },
+ { "m41t65", M41T80_FEATURE_WD },
{ "m41t80", M41T80_FEATURE_SQ },
{ "m41t81", M41T80_FEATURE_HT | M41T80_FEATURE_SQ},
{ "m41t81s", M41T80_FEATURE_HT | M41T80_FEATURE_BL | M41T80_FEATURE_SQ },
@@ -93,7 +93,7 @@ static const __maybe_unused struct of_device_id m41t80_of_match[] = {
},
{
.compatible = "st,m41t65",
- .data = (void *)(M41T80_FEATURE_HT | M41T80_FEATURE_WD)
+ .data = (void *)(M41T80_FEATURE_WD)
},
{
.compatible = "st,m41t80",
@@ -484,16 +484,17 @@ static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
return sqw_to_m41t80_data(hw)->freq;
}
-static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int m41t80_sqw_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate >= M41T80_SQW_MAX_FREQ)
- return M41T80_SQW_MAX_FREQ;
- if (rate >= M41T80_SQW_MAX_FREQ / 4)
- return M41T80_SQW_MAX_FREQ / 4;
- if (!rate)
- return 0;
- return 1 << ilog2(rate);
+ if (req->rate >= M41T80_SQW_MAX_FREQ)
+ req->rate = M41T80_SQW_MAX_FREQ;
+ else if (req->rate >= M41T80_SQW_MAX_FREQ / 4)
+ req->rate = M41T80_SQW_MAX_FREQ / 4;
+ else if (req->rate)
+ req->rate = 1 << ilog2(req->rate);
+
+ return 0;
}
static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -564,7 +565,7 @@ static const struct clk_ops m41t80_sqw_ops = {
.unprepare = m41t80_sqw_unprepare,
.is_prepared = m41t80_sqw_is_prepared,
.recalc_rate = m41t80_sqw_recalc_rate,
- .round_rate = m41t80_sqw_round_rate,
+ .determine_rate = m41t80_sqw_determine_rate,
.set_rate = m41t80_sqw_set_rate,
};
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
index a7bb37aaab9e..dfb5bad3a369 100644
--- a/drivers/rtc/rtc-max31335.c
+++ b/drivers/rtc/rtc-max31335.c
@@ -497,15 +497,17 @@ static unsigned long max31335_clkout_recalc_rate(struct clk_hw *hw,
return max31335_clkout_freq[reg & freq_mask];
}
-static long max31335_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int max31335_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int index;
- index = find_closest(rate, max31335_clkout_freq,
+ index = find_closest(req->rate, max31335_clkout_freq,
ARRAY_SIZE(max31335_clkout_freq));
- return max31335_clkout_freq[index];
+ req->rate = max31335_clkout_freq[index];
+
+ return 0;
}
static int max31335_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -554,7 +556,7 @@ static int max31335_clkout_is_enabled(struct clk_hw *hw)
static const struct clk_ops max31335_clkout_ops = {
.recalc_rate = max31335_clkout_recalc_rate,
- .round_rate = max31335_clkout_round_rate,
+ .determine_rate = max31335_clkout_determine_rate,
.set_rate = max31335_clkout_set_rate,
.enable = max31335_clkout_enable,
.disable = max31335_clkout_disable,
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index 76c5f464b2da..cd4b1db902e9 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -367,14 +367,19 @@ static unsigned long nct3018y_clkout_recalc_rate(struct clk_hw *hw,
return clkout_rates[flags];
}
-static long nct3018y_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int nct3018y_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
- if (clkout_rates[i] <= rate)
- return clkout_rates[i];
+ if (clkout_rates[i] <= req->rate) {
+ req->rate = clkout_rates[i];
+
+ return 0;
+ }
+
+ req->rate = clkout_rates[0];
return 0;
}
@@ -446,7 +451,7 @@ static const struct clk_ops nct3018y_clkout_ops = {
.unprepare = nct3018y_clkout_unprepare,
.is_prepared = nct3018y_clkout_is_prepared,
.recalc_rate = nct3018y_clkout_recalc_rate,
- .round_rate = nct3018y_clkout_round_rate,
+ .determine_rate = nct3018y_clkout_determine_rate,
.set_rate = nct3018y_clkout_set_rate,
};
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 4fa5c4ecdd5a..f643e0bd7351 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
+#include <linux/spi/spi.h>
/*
* Information for this driver was pulled from the following datasheets.
@@ -29,6 +30,9 @@
*
* https://www.microcrystal.com/fileadmin/Media/Products/RTC/App.Manual/RV-8263-C7_App-Manual.pdf
* RV8263 -- Rev. 1.0 — January 2019
+ *
+ * https://www.microcrystal.com/fileadmin/Media/Products/RTC/App.Manual/RV-8063-C7_App-Manual.pdf
+ * RV8063 -- Rev. 1.1 - October 2018
*/
#define PCF85063_REG_CTRL1 0x00 /* status */
@@ -401,14 +405,19 @@ static unsigned long pcf85063_clkout_recalc_rate(struct clk_hw *hw,
return clkout_rates[buf];
}
-static long pcf85063_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int pcf85063_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
- if (clkout_rates[i] <= rate)
- return clkout_rates[i];
+ if (clkout_rates[i] <= req->rate) {
+ req->rate = clkout_rates[i];
+
+ return 0;
+ }
+
+ req->rate = clkout_rates[0];
return 0;
}
@@ -482,7 +491,7 @@ static const struct clk_ops pcf85063_clkout_ops = {
.unprepare = pcf85063_clkout_unprepare,
.is_prepared = pcf85063_clkout_is_prepared,
.recalc_rate = pcf85063_clkout_recalc_rate,
- .round_rate = pcf85063_clkout_round_rate,
+ .determine_rate = pcf85063_clkout_determine_rate,
.set_rate = pcf85063_clkout_set_rate,
};
@@ -524,47 +533,12 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
}
#endif
-static const struct pcf85063_config config_pcf85063 = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x0a,
- },
-};
-
-static const struct pcf85063_config config_pcf85063tp = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x0a,
- },
-};
-
-static const struct pcf85063_config config_pcf85063a = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
-};
-
-static const struct pcf85063_config config_rv8263 = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
- .force_cap_7000 = 1,
-};
-
-static int pcf85063_probe(struct i2c_client *client)
+static int pcf85063_probe(struct device *dev, struct regmap *regmap, int irq,
+ const struct pcf85063_config *config)
{
struct pcf85063 *pcf85063;
unsigned int tmp;
int err;
- const struct pcf85063_config *config;
struct nvmem_config nvmem_cfg = {
.name = "pcf85063_nvram",
.reg_read = pcf85063_nvmem_read,
@@ -573,28 +547,22 @@ static int pcf85063_probe(struct i2c_client *client)
.size = 1,
};
- dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(dev, "%s\n", __func__);
- pcf85063 = devm_kzalloc(&client->dev, sizeof(struct pcf85063),
+ pcf85063 = devm_kzalloc(dev, sizeof(struct pcf85063),
GFP_KERNEL);
if (!pcf85063)
return -ENOMEM;
- config = i2c_get_match_data(client);
- if (!config)
- return -ENODEV;
-
- pcf85063->regmap = devm_regmap_init_i2c(client, &config->regmap);
- if (IS_ERR(pcf85063->regmap))
- return PTR_ERR(pcf85063->regmap);
+ pcf85063->regmap = regmap;
- i2c_set_clientdata(client, pcf85063);
+ dev_set_drvdata(dev, pcf85063);
err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp);
if (err)
- return dev_err_probe(&client->dev, err, "RTC chip is not present\n");
+ return dev_err_probe(dev, err, "RTC chip is not present\n");
- pcf85063->rtc = devm_rtc_allocate_device(&client->dev);
+ pcf85063->rtc = devm_rtc_allocate_device(dev);
if (IS_ERR(pcf85063->rtc))
return PTR_ERR(pcf85063->rtc);
@@ -605,19 +573,17 @@ static int pcf85063_probe(struct i2c_client *client)
* of the registers after the automatic power-on reset...
*/
if (tmp & PCF85063_REG_SC_OS) {
- dev_warn(&client->dev,
- "POR issue detected, sending a SW reset\n");
+ dev_warn(dev, "POR issue detected, sending a SW reset\n");
err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1,
PCF85063_REG_CTRL1_SWR);
if (err < 0)
- dev_warn(&client->dev,
- "SW reset failed, trying to continue\n");
+ dev_warn(dev, "SW reset failed, trying to continue\n");
}
- err = pcf85063_load_capacitance(pcf85063, client->dev.of_node,
+ err = pcf85063_load_capacitance(pcf85063, dev->of_node,
config->force_cap_7000 ? 7000 : 0);
if (err < 0)
- dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
+ dev_warn(dev, "failed to set xtal load capacitance: %d",
err);
pcf85063->rtc->ops = &pcf85063_rtc_ops;
@@ -627,13 +593,13 @@ static int pcf85063_probe(struct i2c_client *client)
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf85063->rtc->features);
clear_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
- if (config->has_alarms && client->irq > 0) {
+ if (config->has_alarms && irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
- if (dev_fwnode(&client->dev))
+ if (dev_fwnode(dev))
irqflags = 0;
- err = devm_request_threaded_irq(&client->dev, client->irq,
+ err = devm_request_threaded_irq(dev, irq,
NULL, pcf85063_rtc_handle_irq,
irqflags | IRQF_ONESHOT,
"pcf85063", pcf85063);
@@ -642,8 +608,8 @@ static int pcf85063_probe(struct i2c_client *client)
"unable to request IRQ, alarms disabled\n");
} else {
set_bit(RTC_FEATURE_ALARM, pcf85063->rtc->features);
- device_init_wakeup(&client->dev, true);
- err = dev_pm_set_wake_irq(&client->dev, client->irq);
+ device_init_wakeup(dev, true);
+ err = dev_pm_set_wake_irq(dev, irq);
if (err)
dev_err(&pcf85063->rtc->dev,
"failed to enable irq wake\n");
@@ -661,6 +627,43 @@ static int pcf85063_probe(struct i2c_client *client)
return devm_rtc_register_device(pcf85063->rtc);
}
+#if IS_ENABLED(CONFIG_I2C)
+
+static const struct pcf85063_config config_pcf85063 = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
+};
+
+static const struct pcf85063_config config_pcf85063tp = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
+};
+
+static const struct pcf85063_config config_pcf85063a = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+};
+
+static const struct pcf85063_config config_rv8263 = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ .force_cap_7000 = 1,
+};
+
static const struct i2c_device_id pcf85063_ids[] = {
{ "pca85073a", .driver_data = (kernel_ulong_t)&config_pcf85063a },
{ "pcf85063", .driver_data = (kernel_ulong_t)&config_pcf85063 },
@@ -683,16 +686,146 @@ static const struct of_device_id pcf85063_of_match[] = {
MODULE_DEVICE_TABLE(of, pcf85063_of_match);
#endif
+static int pcf85063_i2c_probe(struct i2c_client *client)
+{
+ const struct pcf85063_config *config;
+ struct regmap *regmap;
+
+ config = i2c_get_match_data(client);
+ if (!config)
+ return -ENODEV;
+
+ regmap = devm_regmap_init_i2c(client, &config->regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return pcf85063_probe(&client->dev, regmap, client->irq, config);
+}
+
static struct i2c_driver pcf85063_driver = {
.driver = {
.name = "rtc-pcf85063",
.of_match_table = of_match_ptr(pcf85063_of_match),
},
- .probe = pcf85063_probe,
+ .probe = pcf85063_i2c_probe,
.id_table = pcf85063_ids,
};
-module_i2c_driver(pcf85063_driver);
+static int pcf85063_register_driver(void)
+{
+ return i2c_add_driver(&pcf85063_driver);
+}
+
+static void pcf85063_unregister_driver(void)
+{
+ i2c_del_driver(&pcf85063_driver);
+}
+
+#else
+
+static int pcf85063_register_driver(void)
+{
+ return 0;
+}
+
+static void pcf85063_unregister_driver(void)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_I2C) */
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static const struct pcf85063_config config_rv8063 = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ .read_flag_mask = BIT(7) | BIT(5),
+ .write_flag_mask = BIT(5),
+ },
+ .has_alarms = 1,
+ .force_cap_7000 = 1,
+};
+
+static const struct spi_device_id rv8063_id[] = {
+ { "rv8063" },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, rv8063_id);
+
+static const struct of_device_id rv8063_of_match[] = {
+ { .compatible = "microcrystal,rv8063" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rv8063_of_match);
+
+static int rv8063_probe(struct spi_device *spi)
+{
+ const struct pcf85063_config *config = &config_rv8063;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &config->regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return pcf85063_probe(&spi->dev, regmap, spi->irq, config);
+}
+
+static struct spi_driver rv8063_driver = {
+ .driver = {
+ .name = "rv8063",
+ .of_match_table = rv8063_of_match,
+ },
+ .probe = rv8063_probe,
+ .id_table = rv8063_id,
+};
+
+static int __init rv8063_register_driver(void)
+{
+ return spi_register_driver(&rv8063_driver);
+}
+
+static void __exit rv8063_unregister_driver(void)
+{
+ spi_unregister_driver(&rv8063_driver);
+}
+
+#else
+
+static int __init rv8063_register_driver(void)
+{
+ return 0;
+}
+
+static void __exit rv8063_unregister_driver(void)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_SPI_MASTER) */
+
+static int __init pcf85063_init(void)
+{
+ int ret;
+
+ ret = pcf85063_register_driver();
+ if (ret)
+ return ret;
+
+ ret = rv8063_register_driver();
+ if (ret)
+ pcf85063_unregister_driver();
+
+ return ret;
+}
+module_init(pcf85063_init);
+
+static void __exit pcf85063_exit(void)
+{
+ rv8063_unregister_driver();
+ pcf85063_unregister_driver();
+}
+module_exit(pcf85063_exit);
MODULE_AUTHOR("Søren Andersen <san@rosetechnology.dk>");
MODULE_DESCRIPTION("PCF85063 RTC driver");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index b2611697fa5e..4e61011fb7a9 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -330,14 +330,19 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
return clkout_rates[buf];
}
-static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int pcf8563_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
- if (clkout_rates[i] <= rate)
- return clkout_rates[i];
+ if (clkout_rates[i] <= req->rate) {
+ req->rate = clkout_rates[i];
+
+ return 0;
+ }
+
+ req->rate = clkout_rates[0];
return 0;
}
@@ -413,7 +418,7 @@ static const struct clk_ops pcf8563_clkout_ops = {
.unprepare = pcf8563_clkout_unprepare,
.is_prepared = pcf8563_clkout_is_prepared,
.recalc_rate = pcf8563_clkout_recalc_rate,
- .round_rate = pcf8563_clkout_round_rate,
+ .determine_rate = pcf8563_clkout_determine_rate,
.set_rate = pcf8563_clkout_set_rate,
};
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 868d1b1eb0f4..c2a531f0e125 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -731,14 +731,19 @@ static unsigned long rv3028_clkout_recalc_rate(struct clk_hw *hw,
return clkout_rates[clkout];
}
-static long rv3028_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int rv3028_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
- if (clkout_rates[i] <= rate)
- return clkout_rates[i];
+ if (clkout_rates[i] <= req->rate) {
+ req->rate = clkout_rates[i];
+
+ return 0;
+ }
+
+ req->rate = clkout_rates[0];
return 0;
}
@@ -802,7 +807,7 @@ static const struct clk_ops rv3028_clkout_ops = {
.unprepare = rv3028_clkout_unprepare,
.is_prepared = rv3028_clkout_is_prepared,
.recalc_rate = rv3028_clkout_recalc_rate,
- .round_rate = rv3028_clkout_round_rate,
+ .determine_rate = rv3028_clkout_determine_rate,
.set_rate = rv3028_clkout_set_rate,
};
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 2c6a8918acba..b8376bd1d905 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -646,19 +646,24 @@ static unsigned long rv3032_clkout_recalc_rate(struct clk_hw *hw,
return clkout_xtal_rates[FIELD_GET(RV3032_CLKOUT2_FD_MSK, clkout)];
}
-static long rv3032_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int rv3032_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i, hfd;
- if (rate < RV3032_HFD_STEP)
+ if (req->rate < RV3032_HFD_STEP)
for (i = 0; i < ARRAY_SIZE(clkout_xtal_rates); i++)
- if (clkout_xtal_rates[i] <= rate)
- return clkout_xtal_rates[i];
+ if (clkout_xtal_rates[i] <= req->rate) {
+ req->rate = clkout_xtal_rates[i];
- hfd = DIV_ROUND_CLOSEST(rate, RV3032_HFD_STEP);
+ return 0;
+ }
+
+ hfd = DIV_ROUND_CLOSEST(req->rate, RV3032_HFD_STEP);
- return RV3032_HFD_STEP * clamp(hfd, 0, 8192);
+ req->rate = RV3032_HFD_STEP * clamp(hfd, 0, 8192);
+
+ return 0;
}
static int rv3032_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -738,7 +743,7 @@ static const struct clk_ops rv3032_clkout_ops = {
.unprepare = rv3032_clkout_unprepare,
.is_prepared = rv3032_clkout_is_prepared,
.recalc_rate = rv3032_clkout_recalc_rate,
- .round_rate = rv3032_clkout_round_rate,
+ .determine_rate = rv3032_clkout_determine_rate,
.set_rate = rv3032_clkout_set_rate,
};
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 5dd575865adf..79b2a16f15ad 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -549,25 +549,25 @@ static void s3c6410_rtc_irq(struct s3c_rtc *info, int mask)
writeb(mask, info->base + S3C2410_INTP);
}
-static struct s3c_rtc_data const s3c2410_rtc_data = {
+static const struct s3c_rtc_data s3c2410_rtc_data = {
.irq_handler = s3c24xx_rtc_irq,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
-static struct s3c_rtc_data const s3c2416_rtc_data = {
+static const struct s3c_rtc_data s3c2416_rtc_data = {
.irq_handler = s3c24xx_rtc_irq,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
-static struct s3c_rtc_data const s3c2443_rtc_data = {
+static const struct s3c_rtc_data s3c2443_rtc_data = {
.irq_handler = s3c24xx_rtc_irq,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
-static struct s3c_rtc_data const s3c6410_rtc_data = {
+static const struct s3c_rtc_data s3c6410_rtc_data = {
.needs_src_clk = true,
.irq_handler = s3c6410_rtc_irq,
.enable = s3c24xx_rtc_enable,
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index f15ef3aa82a0..619800a00479 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -455,7 +455,7 @@ static void __exit sh_rtc_remove(struct platform_device *pdev)
clk_disable(rtc->clk);
}
-static int __maybe_unused sh_rtc_suspend(struct device *dev)
+static int sh_rtc_suspend(struct device *dev)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
@@ -465,7 +465,7 @@ static int __maybe_unused sh_rtc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused sh_rtc_resume(struct device *dev)
+static int sh_rtc_resume(struct device *dev)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
@@ -475,7 +475,7 @@ static int __maybe_unused sh_rtc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(sh_rtc_pm_ops, sh_rtc_suspend, sh_rtc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sh_rtc_pm_ops, sh_rtc_suspend, sh_rtc_resume);
static const struct of_device_id sh_rtc_of_match[] = {
{ .compatible = "renesas,sh-rtc", },
@@ -492,7 +492,7 @@ MODULE_DEVICE_TABLE(of, sh_rtc_of_match);
static struct platform_driver sh_rtc_platform_driver __refdata = {
.driver = {
.name = DRV_NAME,
- .pm = &sh_rtc_pm_ops,
+ .pm = pm_sleep_ptr(&sh_rtc_pm_ops),
.of_match_table = sh_rtc_of_match,
},
.remove = __exit_p(sh_rtc_remove),
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index ef8fb88aab48..d4ebf3eb54aa 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -393,7 +393,7 @@ static const struct pinmux_ops stm32_rtc_pinmux_ops = {
.strict = true,
};
-static struct pinctrl_desc stm32_rtc_pdesc = {
+static const struct pinctrl_desc stm32_rtc_pdesc = {
.name = DRIVER_NAME,
.pins = stm32_rtc_pinctrl_pins,
.npins = ARRAY_SIZE(stm32_rtc_pinctrl_pins),
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index e3062c4d3f2c..4ab05e105a76 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -24,8 +24,8 @@
static ssize_t
name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%s %s\n", dev_driver_string(dev->parent),
- dev_name(dev->parent));
+ return sysfs_emit(buf, "%s %s\n", dev_driver_string(dev->parent),
+ dev_name(dev->parent));
}
static DEVICE_ATTR_RO(name);
@@ -39,7 +39,7 @@ date_show(struct device *dev, struct device_attribute *attr, char *buf)
if (retval)
return retval;
- return sprintf(buf, "%ptRd\n", &tm);
+ return sysfs_emit(buf, "%ptRd\n", &tm);
}
static DEVICE_ATTR_RO(date);
@@ -53,7 +53,7 @@ time_show(struct device *dev, struct device_attribute *attr, char *buf)
if (retval)
return retval;
- return sprintf(buf, "%ptRt\n", &tm);
+ return sysfs_emit(buf, "%ptRt\n", &tm);
}
static DEVICE_ATTR_RO(time);
@@ -64,21 +64,17 @@ since_epoch_show(struct device *dev, struct device_attribute *attr, char *buf)
struct rtc_time tm;
retval = rtc_read_time(to_rtc_device(dev), &tm);
- if (retval == 0) {
- time64_t time;
-
- time = rtc_tm_to_time64(&tm);
- retval = sprintf(buf, "%lld\n", time);
- }
+ if (retval)
+ return retval;
- return retval;
+ return sysfs_emit(buf, "%lld\n", rtc_tm_to_time64(&tm));
}
static DEVICE_ATTR_RO(since_epoch);
static ssize_t
max_user_freq_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", to_rtc_device(dev)->max_user_freq);
+ return sysfs_emit(buf, "%d\n", to_rtc_device(dev)->max_user_freq);
}
static ssize_t
@@ -118,9 +114,9 @@ hctosys_show(struct device *dev, struct device_attribute *attr, char *buf)
if (rtc_hctosys_ret == 0 &&
strcmp(dev_name(&to_rtc_device(dev)->dev),
CONFIG_RTC_HCTOSYS_DEVICE) == 0)
- return sprintf(buf, "1\n");
+ return sysfs_emit(buf, "1\n");
#endif
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static DEVICE_ATTR_RO(hctosys);
@@ -128,7 +124,6 @@ static ssize_t
wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
- time64_t alarm;
struct rtc_wkalrm alm;
/* Don't show disabled alarms. For uniformity, RTC alarms are
@@ -140,12 +135,13 @@ wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
* alarms after they trigger, to ensure one-shot semantics.
*/
retval = rtc_read_alarm(to_rtc_device(dev), &alm);
- if (retval == 0 && alm.enabled) {
- alarm = rtc_tm_to_time64(&alm.time);
- retval = sprintf(buf, "%lld\n", alarm);
- }
+ if (retval)
+ return retval;
- return retval;
+ if (alm.enabled)
+ return sysfs_emit(buf, "%lld\n", rtc_tm_to_time64(&alm.time));
+
+ return 0;
}
static ssize_t
@@ -222,10 +218,10 @@ offset_show(struct device *dev, struct device_attribute *attr, char *buf)
long offset;
retval = rtc_read_offset(to_rtc_device(dev), &offset);
- if (retval == 0)
- retval = sprintf(buf, "%ld\n", offset);
+ if (retval)
+ return retval;
- return retval;
+ return sysfs_emit(buf, "%ld\n", offset);
}
static ssize_t
@@ -246,8 +242,8 @@ static DEVICE_ATTR_RW(offset);
static ssize_t
range_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "[%lld,%llu]\n", to_rtc_device(dev)->range_min,
- to_rtc_device(dev)->range_max);
+ return sysfs_emit(buf, "[%lld,%llu]\n", to_rtc_device(dev)->range_min,
+ to_rtc_device(dev)->range_max);
}
static DEVICE_ATTR_RO(range);
@@ -302,11 +298,7 @@ static struct attribute_group rtc_attr_group = {
.is_visible = rtc_attr_is_visible,
.attrs = rtc_attrs,
};
-
-static const struct attribute_group *rtc_attr_groups[] = {
- &rtc_attr_group,
- NULL
-};
+__ATTRIBUTE_GROUPS(rtc_attr);
const struct attribute_group **rtc_get_dev_attribute_groups(void)
{
@@ -318,17 +310,21 @@ int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
size_t old_cnt = 0, add_cnt = 0, new_cnt;
const struct attribute_group **groups, **old;
- if (!grps)
+ if (grps) {
+ for (groups = grps; *groups; groups++)
+ add_cnt++;
+ /* No need to modify current groups if nothing new is provided */
+ if (add_cnt == 0)
+ return 0;
+ } else {
return -EINVAL;
+ }
groups = rtc->dev.groups;
if (groups)
for (; *groups; groups++)
old_cnt++;
- for (groups = grps; *groups; groups++)
- add_cnt++;
-
new_cnt = old_cnt + add_cnt + 1;
groups = devm_kcalloc(&rtc->dev, new_cnt, sizeof(*groups), GFP_KERNEL);
if (!groups)
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/test_rtc_lib.c
index 0eebad1fe2a0..0eebad1fe2a0 100644
--- a/drivers/rtc/lib_test.c
+++ b/drivers/rtc/test_rtc_lib.c
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f2e42c1d51aa..98e334724a62 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -77,6 +77,13 @@ unsigned long sclp_console_full;
/* The currently active SCLP command word. */
static sclp_cmdw_t active_cmd;
+static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int)
+{
+ if (sccb_int)
+ return __va(sccb_int);
+ return NULL;
+}
+
static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
{
struct sclp_trace_entry e;
@@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb)
static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
{
- struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
+ struct sccb_header *sccb = sclpint_to_sccb(sccb_int);
struct evbuf_header *evbuf;
u16 response;
@@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
/* INT: Interrupt received (a=intparm, b=cmd) */
sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
- (struct sccb_header *)__va(finished_sccb),
+ sclpint_to_sccb(finished_sccb),
!ok_response(finished_sccb, active_cmd));
if (finished_sccb) {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 88b625ba1978..4b7ffa840563 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -180,7 +180,7 @@ struct ap_card {
atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
-#define TAPQ_CARD_HWINFO_MASK 0xFEFF0000FFFF0F0FUL
+#define TAPQ_CARD_HWINFO_MASK 0xFFFF0000FFFF0F0FUL
#define ASSOC_IDX_INVALID 0x10000
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 28cf18955a08..726c8531b7d3 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -481,8 +481,7 @@ void aac_define_int_mode(struct aac_dev *dev)
pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
min_msix = 2;
i = pci_alloc_irq_vectors(dev->pdev,
- min_msix, msi_count,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ min_msix, msi_count, PCI_IRQ_MSIX);
if (i > 0) {
dev->msi_enabled = 1;
msi_count = i;
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index c2fdc6553e62..1199d701c3f5 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -323,8 +323,6 @@ enum fnic_state {
FNIC_IN_ETH_TRANS_FC_MODE,
};
-struct mempool;
-
enum fnic_role_e {
FNIC_ROLE_FCP_INITIATOR = 0,
};
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index adb9e7a94785..bcecb4911da9 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -252,7 +252,7 @@ static int sas_get_ata_command_set(struct domain_device *dev)
return ata_dev_classify(&tf);
}
-int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
+static int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
{
if (phy->attached_tproto & SAS_PROTOCOL_STP)
dev->tproto = phy->attached_tproto;
@@ -927,13 +927,7 @@ EXPORT_SYMBOL_GPL(sas_ata_schedule_reset);
void sas_ata_wait_eh(struct domain_device *dev)
{
- struct ata_port *ap;
-
- if (!dev_is_sata(dev))
- return;
-
- ap = dev->sata_dev.ap;
- ata_port_wait_eh(ap);
+ ata_port_wait_eh(dev->sata_dev.ap);
}
void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 951bdc554a10..b07062db50b2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -406,7 +406,7 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
}
}
-void sas_unregister_domain_devices(struct asd_sas_port *port, int gone)
+void sas_unregister_domain_devices(struct asd_sas_port *port, bool gone)
{
struct domain_device *dev, *n;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 03d6ec1eb970..6706f2be8d27 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -44,7 +44,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr);
int sas_discover_root_expander(struct domain_device *dev);
int sas_ex_revalidate_domain(struct domain_device *dev);
-void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
+void sas_unregister_domain_devices(struct asd_sas_port *port, bool gone);
void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port);
void sas_discover_event(struct asd_sas_port *port, enum discover_event ev);
@@ -70,7 +70,7 @@ void sas_enable_revalidation(struct sas_ha_struct *ha);
void sas_queue_deferred_work(struct sas_ha_struct *ha);
void __sas_drain_work(struct sas_ha_struct *ha);
-void sas_deform_port(struct asd_sas_phy *phy, int gone);
+void sas_deform_port(struct asd_sas_phy *phy, bool gone);
void sas_porte_bytes_dmaed(struct work_struct *work);
void sas_porte_broadcast_rcvd(struct work_struct *work);
@@ -222,4 +222,78 @@ static inline void sas_put_device(struct domain_device *dev)
kref_put(&dev->kref, sas_free_device);
}
+#ifdef CONFIG_SCSI_SAS_ATA
+
+int sas_ata_init(struct domain_device *dev);
+void sas_ata_task_abort(struct sas_task *task);
+int sas_discover_sata(struct domain_device *dev);
+int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id);
+void sas_ata_strategy_handler(struct Scsi_Host *shost);
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q);
+void sas_ata_end_eh(struct ata_port *ap);
+void sas_ata_wait_eh(struct domain_device *dev);
+void sas_probe_sata(struct asd_sas_port *port);
+void sas_suspend_sata(struct asd_sas_port *port);
+void sas_resume_sata(struct asd_sas_port *port);
+
+#else
+
+static inline int sas_ata_init(struct domain_device *dev)
+{
+ return 0;
+}
+
+static inline void sas_ata_task_abort(struct sas_task *task)
+{
+}
+
+static inline void sas_ata_strategy_handler(struct Scsi_Host *shost)
+{
+}
+
+static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
+{
+}
+
+static inline void sas_ata_end_eh(struct ata_port *ap)
+{
+}
+
+static inline void sas_ata_wait_eh(struct domain_device *dev)
+{
+}
+
+static inline void sas_probe_sata(struct asd_sas_port *port)
+{
+}
+
+static inline void sas_suspend_sata(struct asd_sas_port *port)
+{
+}
+
+static inline void sas_resume_sata(struct asd_sas_port *port)
+{
+}
+
+static inline void sas_ata_disabled_notice(void)
+{
+ pr_notice_once("ATA device seen but CONFIG_SCSI_SAS_ATA=N\n");
+}
+
+static inline int sas_discover_sata(struct domain_device *dev)
+{
+ sas_ata_disabled_notice();
+ return -ENXIO;
+}
+
+static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id)
+{
+ sas_ata_disabled_notice();
+ return -ENODEV;
+}
+
+#endif
+
#endif /* _SAS_INTERNAL_H_ */
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 57494ac97076..635835c28ecd 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -20,7 +20,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
struct asd_sas_phy *phy = ev->phy;
phy->error = 0;
- sas_deform_port(phy, 1);
+ sas_deform_port(phy, true);
}
static void sas_phye_oob_done(struct work_struct *work)
@@ -40,7 +40,7 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
- sas_deform_port(phy, 1);
+ sas_deform_port(phy, true);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
phy->error++;
@@ -85,7 +85,7 @@ static void sas_phye_resume_timeout(struct work_struct *work)
phy->error = 0;
phy->suspended = 0;
- sas_deform_port(phy, 1);
+ sas_deform_port(phy, true);
}
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e3f2ed913419..de7556070048 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -113,7 +113,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (port) {
if (!phy_is_wideport_member(port, phy))
- sas_deform_port(phy, 0);
+ sas_deform_port(phy, false);
else if (phy->suspended) {
phy->suspended = 0;
sas_resume_port(phy);
@@ -206,7 +206,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
* This is called when the physical link to the other phy has been
* lost (on this phy), in Event thread context. We cannot delay here.
*/
-void sas_deform_port(struct asd_sas_phy *phy, int gone)
+void sas_deform_port(struct asd_sas_phy *phy, bool gone)
{
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
@@ -301,7 +301,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- sas_deform_port(phy, 1);
+ sas_deform_port(phy, true);
}
void sas_porte_timer_event(struct work_struct *work)
@@ -309,7 +309,7 @@ void sas_porte_timer_event(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- sas_deform_port(phy, 1);
+ sas_deform_port(phy, true);
}
void sas_porte_hard_reset(struct work_struct *work)
@@ -317,7 +317,7 @@ void sas_porte_hard_reset(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- sas_deform_port(phy, 1);
+ sas_deform_port(phy, true);
}
/* ---------- SAS port registration ---------- */
@@ -358,8 +358,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
for (i = 0; i < sas_ha->num_phys; i++)
if (sas_ha->sas_phy[i]->port)
- sas_deform_port(sas_ha->sas_phy[i], 0);
-
+ sas_deform_port(sas_ha->sas_phy[i], false);
}
const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2db8d9529b8f..7c4d7bb3a56f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -6280,7 +6280,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
phba->nvmeio_trc_on = 1;
phba->nvmeio_trc_output_idx = 0;
- phba->nvmeio_trc = NULL;
} else {
nvmeio_off:
phba->nvmeio_trc_size = 0;
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 2797aa75a689..aff6c9d5e7c2 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -666,7 +666,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* Take early refcount for outstanding I/O requests we schedule during
* delete processing for unreg_vpi. Always keep this before
* scsi_remove_host() as we can no longer obtain a reference through
- * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
+ * scsi_host_get() after scsi_remove_host as shost is set to SHOST_DEL.
*/
if (!scsi_host_get(shost))
return VPORT_INVAL;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d7d8244dfedc..967af259118e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -10809,8 +10809,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
break;
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_pcie_topology_change_event(ioc, fw_event);
- ioc->current_event = NULL;
- return;
+ break;
}
out:
fw_event_work_put(fw_event);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a39f1da4ce47..a761c0aa5127 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
vfree(dst_addr);
+ if (IS_ERR(ep))
+ return NULL;
return ep;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 0847767d4d43..353cb60e1abe 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2674,8 +2674,10 @@ static int resp_rsup_tmfs(struct scsi_cmnd *scp,
static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
{ /* Read-Write Error Recovery page for mode_sense */
- unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
- 5, 0, 0xff, 0xff};
+ static const unsigned char err_recov_pg[] = {
+ 0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
+ 5, 0, 0xff, 0xff
+ };
memcpy(p, err_recov_pg, sizeof(err_recov_pg));
if (1 == pcontrol)
@@ -2685,8 +2687,10 @@ static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
{ /* Disconnect-Reconnect page for mode_sense */
- unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0};
+ static const unsigned char disconnect_pg[] = {
+ 0x2, 0xe, 128, 128, 0, 10, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ };
memcpy(p, disconnect_pg, sizeof(disconnect_pg));
if (1 == pcontrol)
@@ -2696,9 +2700,11 @@ static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
static int resp_format_pg(unsigned char *p, int pcontrol, int target)
{ /* Format device page for mode_sense */
- unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0x40, 0, 0, 0};
+ static const unsigned char format_pg[] = {
+ 0x3, 0x16, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0x40, 0, 0, 0
+ };
memcpy(p, format_pg, sizeof(format_pg));
put_unaligned_be16(sdebug_sectors_per, p + 10);
@@ -2716,10 +2722,14 @@ static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
{ /* Caching page for mode_sense */
- unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
- unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
- 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
+ static const unsigned char ch_caching_pg[] = {
+ /* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ static const unsigned char d_caching_pg[] = {
+ 0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
+ 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0
+ };
if (SDEBUG_OPT_N_WCE & sdebug_opts)
caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
@@ -2738,8 +2748,10 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
0, 0, 0, 0};
- unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
- 0, 0, 0x2, 0x4b};
+ static const unsigned char d_ctrl_m_pg[] = {
+ 0xa, 10, 2, 0, 0, 0, 0, 0,
+ 0, 0, 0x2, 0x4b
+ };
if (sdebug_dsense)
ctrl_m_pg[2] |= 0x4;
@@ -2794,10 +2806,14 @@ static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Informational Exceptions control mode page for mode_sense */
- unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
- 0, 0, 0x0, 0x0};
- unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
- 0, 0, 0x0, 0x0};
+ static const unsigned char ch_iec_m_pg[] = {
+ /* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
+ 0, 0, 0x0, 0x0
+ };
+ static const unsigned char d_iec_m_pg[] = {
+ 0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
+ 0, 0, 0x0, 0x0
+ };
memcpy(p, iec_m_pg, sizeof(iec_m_pg));
if (1 == pcontrol)
@@ -2809,8 +2825,9 @@ static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
{ /* SAS SSP mode page - short format for mode_sense */
- unsigned char sas_sf_m_pg[] = {0x19, 0x6,
- 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
+ static const unsigned char sas_sf_m_pg[] = {
+ 0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0
+ };
memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
if (1 == pcontrol)
@@ -2854,9 +2871,10 @@ static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
{ /* SAS SSP shared protocol specific port mode subpage */
- unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- };
+ static const unsigned char sas_sha_m_pg[] = {
+ 0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
if (1 == pcontrol)
@@ -2923,8 +2941,10 @@ static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
unsigned char dce)
{ /* Compression page for mode_sense (tape) */
- unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 00, 00};
+ static const unsigned char compression_pg[] = {
+ 0x0f, 14, 0x40, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0
+ };
memcpy(p, compression_pg, sizeof(compression_pg));
if (dce)
@@ -3282,9 +3302,10 @@ bad_pcode:
static int resp_temp_l_pg(unsigned char *arr)
{
- unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
- 0x0, 0x1, 0x3, 0x2, 0x0, 65,
- };
+ static const unsigned char temp_l_pg[] = {
+ 0x0, 0x0, 0x3, 0x2, 0x0, 38,
+ 0x0, 0x1, 0x3, 0x2, 0x0, 65,
+ };
memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
return sizeof(temp_l_pg);
@@ -3292,8 +3313,9 @@ static int resp_temp_l_pg(unsigned char *arr)
static int resp_ie_l_pg(unsigned char *arr)
{
- unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
- };
+ static const unsigned char ie_l_pg[] = {
+ 0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
+ };
memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
if (iec_m_pg[2] & 0x4) { /* TEST bit set */
@@ -3305,11 +3327,12 @@ static int resp_ie_l_pg(unsigned char *arr)
static int resp_env_rep_l_spg(unsigned char *arr)
{
- unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
- 0x0, 40, 72, 0xff, 45, 18, 0, 0,
- 0x1, 0x0, 0x23, 0x8,
- 0x0, 55, 72, 35, 55, 45, 0, 0,
- };
+ static const unsigned char env_rep_l_spg[] = {
+ 0x0, 0x0, 0x23, 0x8,
+ 0x0, 40, 72, 0xff, 45, 18, 0, 0,
+ 0x1, 0x0, 0x23, 0x8,
+ 0x0, 55, 72, 35, 55, 45, 0, 0,
+ };
memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
return sizeof(env_rep_l_spg);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 160c2f74c7e7..3c6e089e80c3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1900,7 +1900,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
return 0;
}
-
+EXPORT_SYMBOL(scsi_scan_host_selected);
static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 169af7d47ce7..15ba493d2138 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -265,7 +265,7 @@ show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
return show_shost_mode(supported_mode, buf);
}
-static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
+static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL);
static ssize_t
show_shost_active_mode(struct device *dev,
@@ -279,7 +279,7 @@ show_shost_active_mode(struct device *dev,
return show_shost_mode(shost->active_mode, buf);
}
-static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
+static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL);
static int check_reset_type(const char *str)
{
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index c75a806496d6..743b4c792ceb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2143,6 +2143,8 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
return 0;
iscsi_remove_conn(iscsi_dev_to_conn(dev));
+ iscsi_put_conn(iscsi_dev_to_conn(dev));
+
return 0;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 351b028ef893..d69c7c444a31 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -40,6 +40,8 @@
#include <scsi/scsi_transport_sas.h>
#include "scsi_sas_internal.h"
+#include "scsi_priv.h"
+
struct sas_host_attrs {
struct list_head rphy_list;
struct mutex lock;
@@ -1683,32 +1685,66 @@ int scsi_is_sas_rphy(const struct device *dev)
}
EXPORT_SYMBOL(scsi_is_sas_rphy);
-
-/*
- * SCSI scan helper
- */
-
-static int sas_user_scan(struct Scsi_Host *shost, uint channel,
- uint id, u64 lun)
+static void scan_channel_zero(struct Scsi_Host *shost, uint id, u64 lun)
{
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
struct sas_rphy *rphy;
- mutex_lock(&sas_host->lock);
list_for_each_entry(rphy, &sas_host->rphy_list, list) {
if (rphy->identify.device_type != SAS_END_DEVICE ||
rphy->scsi_target_id == -1)
continue;
- if ((channel == SCAN_WILD_CARD || channel == 0) &&
- (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
+ if (id == SCAN_WILD_CARD || id == rphy->scsi_target_id) {
scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id,
lun, SCSI_SCAN_MANUAL);
}
}
- mutex_unlock(&sas_host->lock);
+}
- return 0;
+/*
+ * SCSI scan helper
+ */
+
+static int sas_user_scan(struct Scsi_Host *shost, uint channel,
+ uint id, u64 lun)
+{
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ int res = 0;
+ int i;
+
+ switch (channel) {
+ case 0:
+ mutex_lock(&sas_host->lock);
+ scan_channel_zero(shost, id, lun);
+ mutex_unlock(&sas_host->lock);
+ break;
+
+ case SCAN_WILD_CARD:
+ mutex_lock(&sas_host->lock);
+ scan_channel_zero(shost, id, lun);
+ mutex_unlock(&sas_host->lock);
+
+ for (i = 1; i <= shost->max_channel; i++) {
+ res = scsi_scan_host_selected(shost, i, id, lun,
+ SCSI_SCAN_MANUAL);
+ if (res)
+ goto exit_scan;
+ }
+ break;
+
+ default:
+ if (channel < shost->max_channel) {
+ res = scsi_scan_host_selected(shost, channel, id, lun,
+ SCSI_SCAN_MANUAL);
+ } else {
+ res = -EINVAL;
+ }
+ break;
+ }
+
+exit_scan:
+ return res;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4a68b2ab2804..5b8668accf8e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -4173,7 +4173,9 @@ static void sd_shutdown(struct device *dev)
if ((system_state != SYSTEM_RESTART &&
sdkp->device->manage_system_start_stop) ||
(system_state == SYSTEM_POWER_OFF &&
- sdkp->device->manage_shutdown)) {
+ sdkp->device->manage_shutdown) ||
+ (system_state == SYSTEM_RUNNING &&
+ sdkp->device->manage_runtime_start_stop)) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 710a3a03758b..8df1e8fa86a5 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -321,8 +321,8 @@ static int __init qe_add_gpiochips(void)
gc->direction_input = qe_gpio_dir_in;
gc->direction_output = qe_gpio_dir_out;
gc->get = qe_gpio_get;
- gc->set_rv = qe_gpio_set;
- gc->set_multiple_rv = qe_gpio_set_multiple;
+ gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
if (ret)
diff --git a/drivers/soc/qcom/ubwc_config.c b/drivers/soc/qcom/ubwc_config.c
index bd0a98aad9f3..15d373bff231 100644
--- a/drivers/soc/qcom/ubwc_config.c
+++ b/drivers/soc/qcom/ubwc_config.c
@@ -12,6 +12,10 @@
#include <linux/soc/qcom/ubwc.h>
+static const struct qcom_ubwc_cfg_data no_ubwc_data = {
+ /* no UBWC, no HBB */
+};
+
static const struct qcom_ubwc_cfg_data msm8937_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
@@ -215,12 +219,20 @@ static const struct qcom_ubwc_cfg_data x1e80100_data = {
};
static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
+ { .compatible = "qcom,apq8016", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8026", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8074", .data = &no_ubwc_data },
{ .compatible = "qcom,apq8096", .data = &msm8998_data },
- { .compatible = "qcom,msm8917", .data = &msm8937_data },
+ { .compatible = "qcom,msm8226", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8916", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8917", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8937", .data = &msm8937_data },
+ { .compatible = "qcom,msm8929", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8939", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8953", .data = &msm8937_data },
- { .compatible = "qcom,msm8956", .data = &msm8937_data },
- { .compatible = "qcom,msm8976", .data = &msm8937_data },
+ { .compatible = "qcom,msm8956", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8974", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8976", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8996", .data = &msm8998_data },
{ .compatible = "qcom,msm8998", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290", .data = &qcm2290_data, },
@@ -233,7 +245,10 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
{ .compatible = "qcom,sc7280", .data = &sc7280_data, },
{ .compatible = "qcom,sc8180x", .data = &sc8180x_data, },
{ .compatible = "qcom,sc8280xp", .data = &sc8280xp_data, },
+ { .compatible = "qcom,sda660", .data = &msm8937_data },
+ { .compatible = "qcom,sdm450", .data = &msm8937_data },
{ .compatible = "qcom,sdm630", .data = &msm8937_data },
+ { .compatible = "qcom,sdm632", .data = &msm8937_data },
{ .compatible = "qcom,sdm636", .data = &msm8937_data },
{ .compatible = "qcom,sdm660", .data = &msm8937_data },
{ .compatible = "qcom,sdm670", .data = &sdm670_data, },
@@ -246,6 +261,8 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
{ .compatible = "qcom,sm6375", .data = &sm6350_data, },
{ .compatible = "qcom,sm7125", .data = &sc7180_data },
{ .compatible = "qcom,sm7150", .data = &sm7150_data, },
+ { .compatible = "qcom,sm7225", .data = &sm6350_data, },
+ { .compatible = "qcom,sm7325", .data = &sc7280_data, },
{ .compatible = "qcom,sm8150", .data = &sm8150_data, },
{ .compatible = "qcom,sm8250", .data = &sm8250_data, },
{ .compatible = "qcom,sm8350", .data = &sm8350_data, },
diff --git a/drivers/soc/renesas/pwc-rzv2m.c b/drivers/soc/renesas/pwc-rzv2m.c
index 4dbcb3d4a90c..6209168b3734 100644
--- a/drivers/soc/renesas/pwc-rzv2m.c
+++ b/drivers/soc/renesas/pwc-rzv2m.c
@@ -64,7 +64,7 @@ static const struct gpio_chip rzv2m_pwc_gc = {
.label = "gpio_rzv2m_pwc",
.owner = THIS_MODULE,
.get = rzv2m_pwc_gpio_get,
- .set_rv = rzv2m_pwc_gpio_set,
+ .set = rzv2m_pwc_gpio_set,
.direction_output = rzv2m_pwc_gpio_direction_output,
.can_sleep = false,
.ngpio = 2,
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 2a5f24ee858c..034a2a535a1e 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1232,7 +1232,7 @@ err:
}
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
- struct device_node *np, bool off)
+ struct device_node *np)
{
struct device *dev = pg->pmc->dev;
int err;
@@ -1247,22 +1247,6 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
err = reset_control_acquire(pg->reset);
if (err < 0) {
pr_err("failed to acquire resets: %d\n", err);
- goto out;
- }
-
- if (off) {
- err = reset_control_assert(pg->reset);
- } else {
- err = reset_control_deassert(pg->reset);
- if (err < 0)
- goto out;
-
- reset_control_release(pg->reset);
- }
-
-out:
- if (err) {
- reset_control_release(pg->reset);
reset_control_put(pg->reset);
}
@@ -1308,20 +1292,43 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
goto set_available;
}
- err = tegra_powergate_of_get_resets(pg, np, off);
+ err = tegra_powergate_of_get_resets(pg, np);
if (err < 0) {
dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
- if (off)
- WARN_ON(tegra_powergate_power_up(pg, true));
+ /*
+ * If the power-domain is off, then ensure the resets are asserted.
+ * If the power-domain is on, then power down to ensure that when is
+ * it turned on the power-domain, clocks and resets are all in the
+ * expected state.
+ */
+ if (off) {
+ err = reset_control_assert(pg->reset);
+ if (err) {
+ pr_err("failed to assert resets: %d\n", err);
+ goto remove_resets;
+ }
+ } else {
+ err = tegra_powergate_power_down(pg);
+ if (err) {
+ dev_err(dev, "failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
+ goto remove_resets;
+ }
+ }
+ /*
+ * If PM_GENERIC_DOMAINS is not enabled, power-on
+ * the domain and skip the genpd registration.
+ */
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ WARN_ON(tegra_powergate_power_up(pg, true));
goto remove_resets;
}
- err = pm_genpd_init(&pg->genpd, NULL, off);
+ err = pm_genpd_init(&pg->genpd, NULL, true);
if (err < 0) {
dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
err);
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 7a671a786197..5fd311ee4107 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -499,6 +499,7 @@ static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_pa
break;
case ACP70_PCI_REV_ID:
case ACP71_PCI_REV_ID:
+ case ACP72_PCI_REV_ID:
frame_fmt_reg = acp70_sdw_dp_reg[p_params->num].frame_fmt_reg;
break;
default:
@@ -551,6 +552,7 @@ static int amd_sdw_transport_params(struct sdw_bus *bus,
break;
case ACP70_PCI_REV_ID:
case ACP71_PCI_REV_ID:
+ case ACP72_PCI_REV_ID:
frame_fmt_reg = acp70_sdw_dp_reg[params->port_num].frame_fmt_reg;
sample_int_reg = acp70_sdw_dp_reg[params->port_num].sample_int_reg;
hctrl_dp0_reg = acp70_sdw_dp_reg[params->port_num].hctrl_dp0_reg;
@@ -614,6 +616,7 @@ static int amd_sdw_port_enable(struct sdw_bus *bus,
break;
case ACP70_PCI_REV_ID:
case ACP71_PCI_REV_ID:
+ case ACP72_PCI_REV_ID:
lane_ctrl_ch_en_reg = acp70_sdw_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
break;
default:
@@ -931,6 +934,9 @@ static void amd_sdw_irq_thread(struct work_struct *work)
status_change_8to11 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
status_change_0to7 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
+ if (!status_change_0to7 && !status_change_8to11)
+ return;
+
dev_dbg(amd_manager->dev, "[SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
amd_manager->instance, status_change_0to7, status_change_8to11);
if (status_change_8to11 & AMD_SDW_WAKE_STAT_MASK)
@@ -1035,6 +1041,7 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
break;
case ACP70_PCI_REV_ID:
case ACP71_PCI_REV_ID:
+ case ACP72_PCI_REV_ID:
amd_manager->num_dout_ports = AMD_ACP70_SDW_MAX_TX_PORTS;
amd_manager->num_din_ports = AMD_ACP70_SDW_MAX_RX_PORTS;
break;
@@ -1074,6 +1081,7 @@ static void amd_sdw_manager_remove(struct platform_device *pdev)
int ret;
pm_runtime_disable(&pdev->dev);
+ cancel_work_sync(&amd_manager->amd_sdw_work);
amd_disable_sdw_interrupts(amd_manager);
sdw_bus_master_delete(&amd_manager->bus);
ret = amd_disable_sdw_manager(amd_manager);
@@ -1178,10 +1186,10 @@ static int __maybe_unused amd_pm_prepare(struct device *dev)
* device is not in runtime suspend state, observed that device alerts are missing
* without pm_prepare on AMD platforms in clockstop mode0.
*/
- if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
- ret = pm_request_resume(dev);
+ if (amd_manager->power_mode_mask) {
+ ret = pm_runtime_resume(dev);
if (ret < 0) {
- dev_err(bus->dev, "pm_request_resume failed: %d\n", ret);
+ dev_err(bus->dev, "pm_runtime_resume failed: %d\n", ret);
return 0;
}
}
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 68db4b67a86f..4fd5cac799c5 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -1753,15 +1753,15 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
/* Update the Slave driver */
if (slave_notify) {
+ if (slave->prop.use_domain_irq && slave->irq)
+ handle_nested_irq(slave->irq);
+
mutex_lock(&slave->sdw_dev_lock);
if (slave->probed) {
struct device *dev = &slave->dev;
struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
- if (slave->prop.use_domain_irq && slave->irq)
- handle_nested_irq(slave->irq);
-
if (drv->ops && drv->ops->interrupt_callback) {
slave_intr.sdca_cascade = sdca_cascade;
slave_intr.control_port = clear;
diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
index 3099ea074f10..230a51489486 100644
--- a/drivers/soundwire/debugfs.c
+++ b/drivers/soundwire/debugfs.c
@@ -291,6 +291,9 @@ static int cmd_go(void *data, u64 value)
finish_t = ktime_get();
+ dev_dbg(&slave->dev, "command completed, num_byte %zu status %d, time %lld ms\n",
+ num_bytes, ret, div_u64(finish_t - start_t, NSEC_PER_MSEC));
+
out:
if (fw)
release_firmware(fw);
@@ -298,9 +301,6 @@ out:
pm_runtime_mark_last_busy(&slave->dev);
pm_runtime_put(&slave->dev);
- dev_dbg(&slave->dev, "command completed, num_byte %zu status %d, time %lld ms\n",
- num_bytes, ret, div_u64(finish_t - start_t, NSEC_PER_MSEC));
-
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(cmd_go_fops, NULL,
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index 5b31e1f69591..5d08364ad6d1 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -11,6 +11,7 @@
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_intel.h>
+#include <linux/string_choices.h>
#include <sound/hdaudio.h>
#include <sound/hda-mlink.h>
#include <sound/hda-sdw-bpt.h>
@@ -183,7 +184,7 @@ static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave *
return 0;
dev_err(cdns->dev, "%s: sdw_prepare_%s_dma_buffer failed %d\n",
- __func__, command ? "read" : "write", ret);
+ __func__, str_read_write(command), ret);
ret1 = hda_sdw_bpt_close(cdns->dev->parent, /* PCI device */
sdw->bpt_ctx.bpt_tx_stream, &sdw->bpt_ctx.dmab_tx_bdl,
@@ -245,7 +246,7 @@ static void intel_ace2x_bpt_close_stream(struct sdw_intel *sdw, struct sdw_slave
cdns->bus.bpt_stream = NULL;
}
-#define INTEL_BPT_MSG_BYTE_ALIGNMENT 32
+#define INTEL_BPT_MSG_BYTE_MIN 16
static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *slave,
struct sdw_bpt_msg *msg)
@@ -253,9 +254,9 @@ static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *s
struct sdw_cdns *cdns = &sdw->cdns;
int ret;
- if (msg->len % INTEL_BPT_MSG_BYTE_ALIGNMENT) {
- dev_err(cdns->dev, "BPT message length %d is not a multiple of %d bytes\n",
- msg->len, INTEL_BPT_MSG_BYTE_ALIGNMENT);
+ if (msg->len < INTEL_BPT_MSG_BYTE_MIN) {
+ dev_err(cdns->dev, "BPT message length %d is less than the minimum bytes %d\n",
+ msg->len, INTEL_BPT_MSG_BYTE_MIN);
return -EINVAL;
}
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 10a602d4843a..6df2601fff90 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -65,6 +65,7 @@ static struct wake_capable_part wake_capable_list[] = {
{0x025d, 0x715},
{0x025d, 0x716},
{0x025d, 0x717},
+ {0x025d, 0x721},
{0x025d, 0x722},
};
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 65afb28ef8fa..c69b78cd0b62 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -451,10 +451,10 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
"mipi-sdw-highPHY-capable");
prop->paging_support = mipi_device_property_read_bool(dev,
- "mipi-sdw-paging-support");
+ "mipi-sdw-paging-supported");
prop->bank_delay_support = mipi_device_property_read_bool(dev,
- "mipi-sdw-bank-delay-support");
+ "mipi-sdw-bank-delay-supported");
device_property_read_u32(dev,
"mipi-sdw-port15-read-behavior", &prop->p15_behave);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 0f45e3404756..bd2b293b44f2 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1622,9 +1622,9 @@ static int qcom_swrm_probe(struct platform_device *pdev)
if (ret)
goto err_master_add;
- dev_info(dev, "Qualcomm Soundwire controller v%x.%x.%x Registered\n",
- (ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff,
- ctrl->version & 0xffff);
+ dev_dbg(dev, "Qualcomm Soundwire controller v%x.%x.%x registered\n",
+ (ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff,
+ ctrl->version & 0xffff);
pm_runtime_set_autosuspend_delay(dev, 3000);
pm_runtime_use_autosuspend(dev);
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index a4bea742b5d9..38c9dbd35606 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1510,7 +1510,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
if (ret < 0) {
dev_err(bus->dev, "Prepare port(s) failed ret = %d\n",
ret);
- return ret;
+ goto restore_params;
}
}
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index b28a840b3b04..14307dd800b7 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -295,7 +295,7 @@ static struct spi_board_info *cs42l43_create_bridge_amp(struct cs42l43_spi *priv
struct spi_board_info *info;
if (spkid >= 0) {
- props = devm_kmalloc(priv->dev, sizeof(*props), GFP_KERNEL);
+ props = devm_kcalloc(priv->dev, 2, sizeof(*props), GFP_KERNEL);
if (!props)
return NULL;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 67d4000c3cef..313e444a34f3 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -330,13 +330,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
}
if (config.speed_hz > perclk_rate / 2) {
- dev_err(fsl_lpspi->dev,
- "per-clk should be at least two times of transfer speed");
- return -EINVAL;
+ div = 2;
+ } else {
+ div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
}
- div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
-
for (prescale = 0; prescale <= prescale_max; prescale++) {
scldiv = div / (1 << prescale) - 2;
if (scldiv >= 0 && scldiv < 256) {
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index d3b7e857b377..064b99204d9a 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -265,6 +265,9 @@ static bool spi_mem_internal_supports_op(struct spi_mem *mem,
*/
bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
+ /* Make sure the operation frequency is correct before going futher */
+ spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
+
if (spi_mem_check_op(op))
return false;
@@ -577,6 +580,7 @@ EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq);
* spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an
* operation. This helps finding the best variant
* among a list of possible choices.
+ * @mem: the SPI memory
* @op: the operation to benchmark
*
* Some chips have per-op frequency limitations, PCBs usually have their own
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index a8c4eb1cbde1..0ceaad7dba3c 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -210,13 +210,21 @@ static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
struct qpic_ecc *qecc = snandc->qspi->ecc;
- if (section > 1)
- return -ERANGE;
-
- oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
- oobregion->offset = mtd->oobsize - oobregion->length;
+ switch (section) {
+ case 0:
+ oobregion->offset = 0;
+ oobregion->length = qecc->bytes * (qecc->steps - 1) +
+ qecc->bbm_size;
+ return 0;
+ case 1:
+ oobregion->offset = qecc->bytes * (qecc->steps - 1) +
+ qecc->bbm_size +
+ qecc->steps * 4;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ return 0;
+ }
- return 0;
+ return -ERANGE;
}
static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
@@ -1196,7 +1204,7 @@ static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
- FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
+ FIELD_PREP(CW_PER_PAGE_MASK, 0);
cfg1 = ecc_cfg->cfg1;
ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 49ab4c515156..c07c61dc4938 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -378,7 +378,7 @@ static void spi_st_remove(struct platform_device *pdev)
pinctrl_pm_select_sleep_state(&pdev->dev);
}
-static int __maybe_unused spi_st_runtime_suspend(struct device *dev)
+static int spi_st_runtime_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct spi_st *spi_st = spi_controller_get_devdata(host);
@@ -391,7 +391,7 @@ static int __maybe_unused spi_st_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused spi_st_runtime_resume(struct device *dev)
+static int spi_st_runtime_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct spi_st *spi_st = spi_controller_get_devdata(host);
@@ -428,8 +428,8 @@ static int __maybe_unused spi_st_resume(struct device *dev)
}
static const struct dev_pm_ops spi_st_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
- SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
+ RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
};
static const struct of_device_id stm_spi_match[] = {
@@ -441,7 +441,7 @@ MODULE_DEVICE_TABLE(of, stm_spi_match);
static struct platform_driver spi_st_driver = {
.driver = {
.name = "spi-st",
- .pm = pm_sleep_ptr(&spi_st_pm),
+ .pm = pm_ptr(&spi_st_pm),
.of_match_table = of_match_ptr(stm_spi_match),
},
.probe = spi_st_probe,
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 1a40c4866ce1..33b78c537520 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -70,7 +70,7 @@ static int spi_xcomm_gpio_add(struct spi_xcomm *spi_xcomm)
return 0;
spi_xcomm->gc.get_direction = spi_xcomm_gpio_get_direction;
- spi_xcomm->gc.set_rv = spi_xcomm_gpio_set_value;
+ spi_xcomm->gc.set = spi_xcomm_gpio_set_value;
spi_xcomm->gc.can_sleep = 1;
spi_xcomm->gc.base = -1;
spi_xcomm->gc.ngpio = 1;
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index e1f5f0a9c8a2..905657c925bc 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -225,7 +225,7 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
chip->request = ssb_gpio_chipco_request;
chip->free = ssb_gpio_chipco_free;
chip->get = ssb_gpio_chipco_get_value;
- chip->set_rv = ssb_gpio_chipco_set_value;
+ chip->set = ssb_gpio_chipco_set_value;
chip->direction_input = ssb_gpio_chipco_direction_input;
chip->direction_output = ssb_gpio_chipco_direction_output;
#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
@@ -422,7 +422,7 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
chip->label = "ssb_extif_gpio";
chip->owner = THIS_MODULE;
chip->get = ssb_gpio_extif_get_value;
- chip->set_rv = ssb_gpio_extif_set_value;
+ chip->set = ssb_gpio_extif_set_value;
chip->direction_input = ssb_gpio_extif_direction_input;
chip->direction_output = ssb_gpio_extif_direction_output;
#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 1280530c8987..ac62b932e6a4 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -551,7 +551,7 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
gpio->direction_input = gb_gpio_direction_input;
gpio->direction_output = gb_gpio_direction_output;
gpio->get = gb_gpio_get;
- gpio->set_rv = gb_gpio_set;
+ gpio->set = gb_gpio_set;
gpio->set_config = gb_gpio_set_config;
gpio->base = -1; /* Allocate base dynamically */
gpio->ngpio = ggc->line_max + 1;
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 43f47e3aa448..ec7bc6e30228 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -257,11 +257,41 @@ static int iscsi_get_pr_transport_id_len(
return len;
}
-static char *iscsi_parse_pr_out_transport_id(
+static void sas_parse_pr_out_transport_id(char *buf, char *i_str)
+{
+ char hex[17] = {};
+
+ bin2hex(hex, buf + 4, 8);
+ snprintf(i_str, TRANSPORT_IQN_LEN, "naa.%s", hex);
+}
+
+static void srp_parse_pr_out_transport_id(char *buf, char *i_str)
+{
+ char hex[33] = {};
+
+ bin2hex(hex, buf + 8, 16);
+ snprintf(i_str, TRANSPORT_IQN_LEN, "0x%s", hex);
+}
+
+static void fcp_parse_pr_out_transport_id(char *buf, char *i_str)
+{
+ snprintf(i_str, TRANSPORT_IQN_LEN, "%8phC", buf + 8);
+}
+
+static void sbp_parse_pr_out_transport_id(char *buf, char *i_str)
+{
+ char hex[17] = {};
+
+ bin2hex(hex, buf + 8, 8);
+ snprintf(i_str, TRANSPORT_IQN_LEN, "%s", hex);
+}
+
+static bool iscsi_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
char *buf,
u32 *out_tid_len,
- char **port_nexus_ptr)
+ char **port_nexus_ptr,
+ char *i_str)
{
char *p;
int i;
@@ -282,7 +312,7 @@ static char *iscsi_parse_pr_out_transport_id(
if ((format_code != 0x00) && (format_code != 0x40)) {
pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
- return NULL;
+ return false;
}
/*
* If the caller wants the TransportID Length, we set that value for the
@@ -306,7 +336,7 @@ static char *iscsi_parse_pr_out_transport_id(
pr_err("Unable to locate \",i,0x\" separator"
" for Initiator port identifier: %s\n",
&buf[4]);
- return NULL;
+ return false;
}
*p = '\0'; /* Terminate iSCSI Name */
p += 5; /* Skip over ",i,0x" separator */
@@ -339,7 +369,8 @@ static char *iscsi_parse_pr_out_transport_id(
} else
*port_nexus_ptr = NULL;
- return &buf[4];
+ strscpy(i_str, &buf[4], TRANSPORT_IQN_LEN);
+ return true;
}
int target_get_pr_transport_id_len(struct se_node_acl *nacl,
@@ -387,33 +418,35 @@ int target_get_pr_transport_id(struct se_node_acl *nacl,
}
}
-const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
- char *buf, u32 *out_tid_len, char **port_nexus_ptr)
+bool target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+ char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str)
{
- u32 offset;
-
switch (tpg->proto_id) {
case SCSI_PROTOCOL_SAS:
/*
* Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
* for initiator ports using SCSI over SAS Serial SCSI Protocol.
*/
- offset = 4;
+ sas_parse_pr_out_transport_id(buf, i_str);
break;
- case SCSI_PROTOCOL_SBP:
case SCSI_PROTOCOL_SRP:
+ srp_parse_pr_out_transport_id(buf, i_str);
+ break;
case SCSI_PROTOCOL_FCP:
- offset = 8;
+ fcp_parse_pr_out_transport_id(buf, i_str);
+ break;
+ case SCSI_PROTOCOL_SBP:
+ sbp_parse_pr_out_transport_id(buf, i_str);
break;
case SCSI_PROTOCOL_ISCSI:
return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len,
- port_nexus_ptr);
+ port_nexus_ptr, i_str);
default:
pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id);
- return NULL;
+ return false;
}
*port_nexus_ptr = NULL;
*out_tid_len = 24;
- return buf + offset;
+ return true;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 73564efd11d2..66c292b7d74b 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -64,6 +64,7 @@ static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *nam
pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
+ ib_dev->ibd_exclusive = true;
ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
GFP_KERNEL);
@@ -95,6 +96,7 @@ static int iblock_configure_device(struct se_device *dev)
struct block_device *bd;
struct blk_integrity *bi;
blk_mode_t mode = BLK_OPEN_READ;
+ void *holder = ib_dev;
unsigned int max_write_zeroes_sectors;
int ret;
@@ -109,15 +111,18 @@ static int iblock_configure_device(struct se_device *dev)
goto out;
}
- pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
- ib_dev->ibd_udev_path);
+ pr_debug("IBLOCK: Claiming struct block_device: %s: %d\n",
+ ib_dev->ibd_udev_path, ib_dev->ibd_exclusive);
if (!ib_dev->ibd_readonly)
mode |= BLK_OPEN_WRITE;
else
dev->dev_flags |= DF_READ_ONLY;
- bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
+ if (!ib_dev->ibd_exclusive)
+ holder = NULL;
+
+ bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, holder,
NULL);
if (IS_ERR(bdev_file)) {
ret = PTR_ERR(bdev_file);
@@ -560,13 +565,14 @@ fail:
}
enum {
- Opt_udev_path, Opt_readonly, Opt_force, Opt_err
+ Opt_udev_path, Opt_readonly, Opt_force, Opt_exclusive, Opt_err,
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
{Opt_readonly, "readonly=%d"},
{Opt_force, "force=%d"},
+ {Opt_exclusive, "exclusive=%d"},
{Opt_err, NULL}
};
@@ -576,7 +582,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token;
+ int ret = 0, token, tmp_exclusive;
unsigned long tmp_readonly;
opts = kstrdup(page, GFP_KERNEL);
@@ -623,6 +629,22 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
ib_dev->ibd_readonly = tmp_readonly;
pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
break;
+ case Opt_exclusive:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = kstrtoint(arg_p, 0, &tmp_exclusive);
+ kfree(arg_p);
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for exclusive=\n");
+ goto out;
+ }
+ ib_dev->ibd_exclusive = tmp_exclusive;
+ pr_debug("IBLOCK: exclusive: %d\n",
+ ib_dev->ibd_exclusive);
+ break;
case Opt_force:
break;
default:
@@ -647,6 +669,7 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
bl += sprintf(b + bl, " UDEV PATH: %s",
ib_dev->ibd_udev_path);
bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
+ bl += sprintf(b + bl, " exclusive: %d\n", ib_dev->ibd_exclusive);
bl += sprintf(b + bl, " ");
if (bd) {
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 91f6f4280666..e2f28a69a11c 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -34,6 +34,7 @@ struct iblock_dev {
struct block_device *ibd_bd;
struct file *ibd_bdev_file;
bool ibd_readonly;
+ bool ibd_exclusive;
struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 408be26d2e9b..20aab1f50565 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -103,8 +103,8 @@ int target_get_pr_transport_id_len(struct se_node_acl *nacl,
int target_get_pr_transport_id(struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg, int *format_code,
unsigned char *buf);
-const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
- char *buf, u32 *out_tid_len, char **port_nexus_ptr);
+bool target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+ char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 70905805cb17..83e172c92238 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1478,11 +1478,12 @@ core_scsi3_decode_spec_i_port(
LIST_HEAD(tid_dest_list);
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
unsigned char *buf, *ptr, proto_ident;
- const unsigned char *i_str = NULL;
+ unsigned char i_str[TRANSPORT_IQN_LEN];
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
u32 tpdl, tid_len = 0;
u32 dest_rtpi = 0;
+ bool tid_found;
/*
* Allocate a struct pr_transport_id_holder and setup the
@@ -1571,9 +1572,9 @@ core_scsi3_decode_spec_i_port(
dest_rtpi = tmp_lun->lun_tpg->tpg_rtpi;
iport_ptr = NULL;
- i_str = target_parse_pr_out_transport_id(tmp_tpg,
- ptr, &tid_len, &iport_ptr);
- if (!i_str)
+ tid_found = target_parse_pr_out_transport_id(tmp_tpg,
+ ptr, &tid_len, &iport_ptr, i_str);
+ if (!tid_found)
continue;
/*
* Determine if this SCSI device server requires that
@@ -3153,13 +3154,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
- const unsigned char *initiator_str;
+ unsigned char initiator_str[TRANSPORT_IQN_LEN];
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { };
u32 tid_len, tmp_tid_len;
int new_reg = 0, type, scope, matching_iname;
sense_reason_t ret;
unsigned short rtpi;
unsigned char proto_ident;
+ bool tid_found;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@@ -3278,9 +3280,9 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- initiator_str = target_parse_pr_out_transport_id(dest_se_tpg,
- &buf[24], &tmp_tid_len, &iport_ptr);
- if (!initiator_str) {
+ tid_found = target_parse_pr_out_transport_id(dest_se_tpg,
+ &buf[24], &tmp_tid_len, &iport_ptr, initiator_str);
+ if (!tid_found) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
ret = TCM_INVALID_PARAMETER_LIST;
diff --git a/drivers/tty/serial/8250/8250_rsa.c b/drivers/tty/serial/8250/8250_rsa.c
index d34093cc03ad..12a65b79583c 100644
--- a/drivers/tty/serial/8250/8250_rsa.c
+++ b/drivers/tty/serial/8250/8250_rsa.c
@@ -147,7 +147,7 @@ void rsa_enable(struct uart_8250_port *up)
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
serial_out(up, UART_RSA_FRR, 0);
}
-EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_enable, "8250_base");
+EXPORT_SYMBOL_FOR_MODULES(rsa_enable, "8250_base");
/*
* Attempts to turn off the RSA FIFO and resets the RSA board back to 115kbps compat mode. It is
@@ -179,7 +179,7 @@ void rsa_disable(struct uart_8250_port *up)
up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
uart_port_unlock_irq(&up->port);
}
-EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_disable, "8250_base");
+EXPORT_SYMBOL_FOR_MODULES(rsa_disable, "8250_base");
void rsa_autoconfig(struct uart_8250_port *up)
{
@@ -192,7 +192,7 @@ void rsa_autoconfig(struct uart_8250_port *up)
if (__rsa_enable(up))
up->port.type = PORT_RSA;
}
-EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_autoconfig, "8250_base");
+EXPORT_SYMBOL_FOR_MODULES(rsa_autoconfig, "8250_base");
void rsa_reset(struct uart_8250_port *up)
{
@@ -201,7 +201,7 @@ void rsa_reset(struct uart_8250_port *up)
serial_out(up, UART_RSA_FRR, 0);
}
-EXPORT_SYMBOL_GPL_FOR_MODULES(rsa_reset, "8250_base");
+EXPORT_SYMBOL_FOR_MODULES(rsa_reset, "8250_base");
#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS
#ifndef MODULE
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 541c790c0109..ce260e9949c3 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1414,7 +1414,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
s->gpio.direction_input = max310x_gpio_direction_input;
s->gpio.get = max310x_gpio_get;
s->gpio.direction_output= max310x_gpio_direction_output;
- s->gpio.set_rv = max310x_gpio_set;
+ s->gpio.set = max310x_gpio_set;
s->gpio.set_config = max310x_gpio_set_config;
s->gpio.base = -1;
s->gpio.ngpio = devtype->nr * 4;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 5ea8aadb6e69..3f38fba8f6ea 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1425,7 +1425,7 @@ static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
s->gpio.direction_input = sc16is7xx_gpio_direction_input;
s->gpio.get = sc16is7xx_gpio_get;
s->gpio.direction_output = sc16is7xx_gpio_direction_output;
- s->gpio.set_rv = sc16is7xx_gpio_set;
+ s->gpio.set = sc16is7xx_gpio_set;
s->gpio.base = -1;
s->gpio.ngpio = s->devtype->nr_gpio;
s->gpio.can_sleep = 1;
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 00948378a719..4bd7d491e3c5 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -5,6 +5,7 @@
#include <linux/string.h>
#include <linux/bitfield.h>
#include <linux/unaligned.h>
+#include <linux/string_choices.h>
#include <ufs/ufs.h>
#include <ufs/unipro.h>
@@ -1516,7 +1517,7 @@ static ssize_t _name##_show(struct device *dev, \
ret = -EINVAL; \
goto out; \
} \
- ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
+ ret = sysfs_emit(buf, "%s\n", str_true_false(flag)); \
out: \
up(&hba->host_sem); \
return ret; \
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index fd8015ed36a4..9a43102b2b21 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -364,6 +364,34 @@ void ufshcd_disable_irq(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
+/**
+ * ufshcd_enable_intr - enable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val | intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
+/**
+ * ufshcd_disable_intr - disable interrupts
+ * @hba: per adapter instance
+ * @intrs: interrupt bits
+ */
+static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
+{
+ u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ u32 new_val = old_val & ~intrs;
+
+ if (new_val != old_val)
+ ufshcd_writel(hba, new_val, REG_INTERRUPT_ENABLE);
+}
+
static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
@@ -1275,7 +1303,7 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
-static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
+static int ufshcd_wait_for_pending_cmds(struct ufs_hba *hba,
u64 wait_timeout_us)
{
int ret = 0;
@@ -1403,7 +1431,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
- ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
+ ufshcd_wait_for_pending_cmds(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
@@ -2596,6 +2624,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ unsigned long flags;
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
@@ -2605,6 +2634,10 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -2682,32 +2715,6 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_enable_intr - enable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set |= intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_disable_intr - disable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- set &= ~intrs;
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
* ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @hba: per adapter instance
@@ -3192,7 +3199,8 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
/*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
@@ -3268,7 +3276,6 @@ retry:
}
}
- WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3287,7 +3294,8 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
}
/*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
@@ -3310,7 +3318,8 @@ static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
* @cmd_type: specifies the type (NOP, Query...)
* @timeout: timeout in milliseconds
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
@@ -3356,6 +3365,10 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
(*request)->upiu_req.selector = selector;
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
@@ -3376,7 +3389,6 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
__func__, opcode, idn, ret, retries);
- WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
@@ -3388,7 +3400,8 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
- * Return: 0 for success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
@@ -3444,7 +3457,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
ufshcd_dev_man_unlock(hba);
- WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3457,8 +3469,9 @@ out_unlock:
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
- * Return: 0 upon success; < 0 upon failure.
-*/
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
@@ -3506,7 +3519,6 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
ufshcd_dev_man_unlock(hba);
- WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3521,8 +3533,9 @@ out_unlock:
* @attr_val: the attribute value after the query request
* completes
*
- * Return: 0 for success; < 0 upon failure.
-*/
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
@@ -3544,12 +3557,12 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
dev_err(hba->dev,
"%s: query attribute, idn %d, failed with error %d after %d retries\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
- WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
/*
- * Return: 0 if successful; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
@@ -3608,7 +3621,6 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
ufshcd_dev_man_unlock(hba);
- WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3625,7 +3637,8 @@ out_unlock:
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*
- * Return: 0 for success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -3643,7 +3656,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
break;
}
- WARN_ONCE(err > 0, "Incorrect return value %d > 0\n", err);
return err;
}
@@ -3656,7 +3668,8 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return: 0 in case of success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
@@ -3723,7 +3736,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
out:
if (is_kmalloc)
kfree(desc_buf);
- WARN_ONCE(ret > 0, "Incorrect return value %d > 0\n", ret);
return ret;
}
@@ -4318,7 +4330,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
- bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -4329,15 +4340,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
goto out_unlock;
}
hba->uic_async_done = &uic_async_done;
- if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
- ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
- /*
- * Make sure UIC command completion interrupt is disabled before
- * issuing UIC command.
- */
- ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- reenable_intr = true;
- }
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
@@ -4381,9 +4384,7 @@ out:
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
- if (reenable_intr)
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
- if (ret) {
+ if (ret && !hba->pm_op_in_progress) {
ufshcd_set_link_broken(hba);
ufshcd_schedule_eh_work(hba);
}
@@ -4391,6 +4392,14 @@ out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
+ /*
+ * If the h8 exit fails during the runtime resume process, it becomes
+ * stuck and cannot be recovered through the error handler. To fix
+ * this, use link recovery instead of the error handler.
+ */
+ if (ret && hba->pm_op_in_progress)
+ ret = ufshcd_link_recovery(hba);
+
return ret;
}
@@ -4405,28 +4414,17 @@ int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
+ if (uic_cmd->argument1 != UIC_ARG_MIB(PA_PWRMODE) ||
+ uic_cmd->command != UIC_CMD_DME_SET)
+ return ufshcd_send_uic_cmd(hba, uic_cmd);
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
ufshcd_hold(hba);
-
- if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
- uic_cmd->command == UIC_CMD_DME_SET) {
- ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
- goto out;
- }
-
- mutex_lock(&hba->uic_cmd_mutex);
- ufshcd_add_delay_before_dme_cmd(hba);
-
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
- if (!ret)
- ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
-
- mutex_unlock(&hba->uic_cmd_mutex);
-
-out:
+ ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
ufshcd_release(hba);
+
return ret;
}
@@ -4788,7 +4786,8 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
*
* Set fDeviceInit flag and poll until device toggles it.
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
@@ -5142,7 +5141,8 @@ out:
* not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
* and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
*
- * Return: 0 upon success; < 0 upon failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_verify_dev_init(struct ufs_hba *hba)
{
@@ -5566,9 +5566,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
irqreturn_t retval = IRQ_NONE;
struct uic_command *cmd;
- spin_lock(hba->host->host_lock);
+ guard(spinlock_irqsave)(hba->host->host_lock);
cmd = hba->active_uic_cmd;
- if (WARN_ON_ONCE(!cmd))
+ if (!cmd)
goto unlock;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
@@ -5593,8 +5593,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
unlock:
- spin_unlock(hba->host->host_lock);
-
return retval;
}
@@ -5876,7 +5874,8 @@ static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
* as the device is allowed to manage its own way of handling background
* operations.
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
{
@@ -5915,7 +5914,8 @@ out:
* host is idle so that BKOPS are managed effectively without any negative
* impacts.
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
{
@@ -6065,6 +6065,10 @@ out:
__func__, err);
}
+/*
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
+ */
int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
{
struct utp_upiu_query_v4_0 *upiu_resp;
@@ -6927,7 +6931,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
- spin_lock(hba->host->host_lock);
+ guard(spinlock_irqsave)(hba->host->host_lock);
hba->errors |= UFSHCD_ERROR_MASK & intr_status;
if (hba->errors & INT_FATAL_ERRORS) {
@@ -6986,7 +6990,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
*/
hba->errors = 0;
hba->uic_error = 0;
- spin_unlock(hba->host->host_lock);
+
return retval;
}
@@ -7145,14 +7149,19 @@ static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
struct ufs_hba *hba = __hba;
+ u32 intr_status, enabled_intr_status;
/* Move interrupt handling to thread when MCQ & ESI are not enabled */
if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
return IRQ_WAKE_THREAD;
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+
/* Directly handle interrupts since MCQ ESI handlers does the hard job */
- return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) &
- ufshcd_readl(hba, REG_INTERRUPT_ENABLE));
+ return ufshcd_sl_intr(hba, enabled_intr_status);
}
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
@@ -7456,7 +7465,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
* @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
* @dir: DMA direction
*
- * Return: zero on success, non-zero on failure.
+ * Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
+ * < 0 if another error occurred.
*/
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
@@ -10523,8 +10533,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = devm_add_action_or_reset(dev, ufshcd_devres_release,
host);
if (err)
- return dev_err_probe(dev, err,
- "failed to add ufshcd dealloc action\n");
+ return err;
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 182f58d0c9db..f902ce08c95a 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -50,6 +50,7 @@ static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
static const struct of_device_id ufs_mtk_of_match[] = {
{ .compatible = "mediatek,mt8183-ufshci" },
+ { .compatible = "mediatek,mt8195-ufshci" },
{},
};
MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
@@ -96,49 +97,59 @@ static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+ return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
}
static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
+ return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL;
}
static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+ return host->caps & UFS_MTK_CAP_BROKEN_VCC;
}
static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
+ return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO;
}
static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_TX_SKEW_FIX);
+ return host->caps & UFS_MTK_CAP_TX_SKEW_FIX;
}
static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_RTFF_MTCMOS);
+ return host->caps & UFS_MTK_CAP_RTFF_MTCMOS;
}
static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- return (host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM);
+ return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM;
+}
+
+static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+
+ return mclk->ufs_sel_clki &&
+ mclk->ufs_sel_max_clki &&
+ mclk->ufs_sel_min_clki;
}
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
@@ -267,6 +278,13 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
+
+ /* DDR_EN setting */
+ if (host->ip_ver >= IP_VER_MT6989) {
+ ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
+ 0x453000, REG_UFS_MMIO_OPT_CTRL_0);
+ }
+
}
return 0;
@@ -344,7 +362,16 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
+ /*
+ * If clock on timeout, assume clock is off, notify tfa do clock
+ * off setting.(keep DIFN disable, release resource)
+ * If clock off timeout, assume clock will off finally,
+ * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
+ */
+ if (on)
+ ufs_mtk_ref_clk_notify(false, POST_CHANGE, res);
+ else
+ host->ref_clk_enabled = false;
return -ETIMEDOUT;
@@ -663,6 +690,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
+ if (of_property_read_bool(np, "mediatek,ufs-broken-rtc"))
+ host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -779,6 +809,91 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
return ret;
}
+static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct blk_mq_tag_set *tag_set = &hba->host->tag_set;
+ struct blk_mq_queue_map *map = &tag_set->map[HCTX_TYPE_DEFAULT];
+ unsigned int nr = map->nr_queues;
+ unsigned int q_index;
+
+ q_index = map->mq_map[cpu];
+ if (q_index >= nr) {
+ dev_err(hba->dev, "hwq index %d exceed %d\n",
+ q_index, nr);
+ return MTK_MCQ_INVALID_IRQ;
+ }
+
+ return host->mcq_intr_info[q_index].irq;
+}
+
+static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
+{
+ unsigned int irq, _cpu;
+ int ret;
+
+ irq = ufs_mtk_mcq_get_irq(hba, cpu);
+ if (irq == MTK_MCQ_INVALID_IRQ) {
+ dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
+ return;
+ }
+
+ /* force migrate irq of cpu0 to cpu3 */
+ _cpu = (cpu == 0) ? 3 : cpu;
+ ret = irq_set_affinity(irq, cpumask_of(_cpu));
+ if (ret) {
+ dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n",
+ irq, _cpu);
+ return;
+ }
+ dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu);
+}
+
+static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver)
+{
+ bool is_legacy = false;
+
+ switch (hw_ip_ver) {
+ case IP_LEGACY_VER_MT6893:
+ case IP_LEGACY_VER_MT6781:
+ /* can add other legacy chipset ID here accordingly */
+ is_legacy = true;
+ break;
+ default:
+ break;
+ }
+ dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
+
+ return is_legacy;
+}
+
+/*
+ * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since
+ * project MT6878. In order to perform correct version comparison,
+ * version number is changed by SW for the following projects.
+ * IP_VER_MT6983 0x00360000 to 0x10360000
+ * IP_VER_MT6897 0x01440000 to 0x10440000
+ * IP_VER_MT6989 0x01450000 to 0x10450000
+ * IP_VER_MT6991 0x01460000 to 0x10460000
+ */
+static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 hw_ip_ver;
+
+ hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+
+ if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) ||
+ ((hw_ip_ver & (0xFF << 24)) == 0)) {
+ hw_ip_ver &= ~(0xFF << 24);
+ hw_ip_ver |= (0x1 << 28);
+ }
+
+ host->ip_ver = hw_ip_ver;
+
+ host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver);
+}
+
static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -818,8 +933,10 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
- struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki, *clki_tmp;
+ struct device *dev = hba->dev;
+ struct regulator *reg;
+ u32 volt;
/*
* Find private clocks and store them in struct ufs_mtk_clk.
@@ -837,15 +954,57 @@ static void ufs_mtk_init_clocks(struct ufs_hba *hba)
host->mclk.ufs_sel_min_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde")) {
+ host->mclk.ufs_fde_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_fde_max_src")) {
+ host->mclk.ufs_fde_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_fde_min_src")) {
+ host->mclk.ufs_fde_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
}
}
- if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
- !mclk->ufs_sel_min_clki) {
+ list_for_each_entry(clki, head, list) {
+ dev_info(hba->dev, "clk \"%s\" present", clki->name);
+ }
+
+ if (!ufs_mtk_is_clk_scale_ready(hba)) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
dev_info(hba->dev,
"%s: Clk-scaling not ready. Feature disabled.",
__func__);
+ return;
+ }
+
+ /*
+ * Default get vcore if dts have these settings.
+ * No matter clock scaling support or not. (may disable by customer)
+ */
+ reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
+ if (IS_ERR(reg)) {
+ dev_info(dev, "failed to get dvfsrc-vcore: %ld",
+ PTR_ERR(reg));
+ return;
+ }
+
+ if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
+ &volt)) {
+ dev_info(dev, "failed to get clk-scale-up-vcore-min");
+ return;
+ }
+
+ host->mclk.reg_vcore = reg;
+ host->mclk.vcore_volt = volt;
+
+ /* If default boot is max gear, request vcore */
+ if (reg && volt && host->clk_scale_up) {
+ if (regulator_set_voltage(reg, volt, INT_MAX)) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ }
}
}
@@ -1014,13 +1173,17 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clk scaling*/
hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ host->clk_scale_up = true; /* default is max freq */
/* Set runtime pm delay to replace default */
shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
+
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
- hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+ if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC)
+ hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
+
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
@@ -1050,7 +1213,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
- host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+ ufs_mtk_get_hw_ip_version(hba);
goto out;
@@ -1505,6 +1668,13 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
+ unsigned int cpu;
+
+ if (hba->mcq_enabled) {
+ /* Iterate all cpus to set affinity for mcq irqs */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ ufs_mtk_mcq_set_irq_affinity(hba, cpu);
+ }
if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
@@ -1598,24 +1768,30 @@ static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
hba->vps->ondemand_data.downdifferential = 20;
}
-/**
- * ufs_mtk_clk_scale - Internal clk scaling operation
- *
- * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
- * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
- * Max and min clocks rate of ufs_sel defined in dts should match rate of
- * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
- * This prevent changing rate of pll clock that is shared between modules.
- *
- * @hba: per adapter instance
- * @scale_up: True for scaling up and false for scaling down
- */
-static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki = mclk->ufs_sel_clki;
- int ret = 0;
+ struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki;
+ struct regulator *reg;
+ int volt, ret = 0;
+ bool clk_bind_vcore = false;
+ bool clk_fde_scale = false;
+
+ if (!hba->clk_scaling.is_initialized)
+ return;
+
+ if (!clki || !fde_clki)
+ return;
+
+ reg = host->mclk.reg_vcore;
+ volt = host->mclk.vcore_volt;
+ if (reg && volt != 0)
+ clk_bind_vcore = true;
+
+ if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
+ clk_fde_scale = true;
ret = clk_prepare_enable(clki->clk);
if (ret) {
@@ -1624,21 +1800,109 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
return;
}
+ if (clk_fde_scale) {
+ ret = clk_prepare_enable(fde_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "fde clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+ }
+
if (scale_up) {
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, volt, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set vcore to %d\n", volt);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
- clki->curr_freq = clki->max_freq;
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ }
+
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_max_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ }
+ }
} else {
+ if (clk_fde_scale) {
+ ret = clk_set_parent(fde_clki->clk,
+ mclk->ufs_fde_min_clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set fde clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
- clki->curr_freq = clki->min_freq;
- }
+ if (ret) {
+ dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
+ ret);
+ goto out;
+ }
- if (ret) {
- dev_info(hba->dev,
- "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ if (clk_bind_vcore) {
+ ret = regulator_set_voltage(reg, 0, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to MIN\n");
+ }
+ }
}
+out:
clk_disable_unprepare(clki->clk);
+ if (clk_fde_scale)
+ clk_disable_unprepare(fde_clki->clk);
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+
+ if (host->clk_scale_up == scale_up)
+ goto out;
+
+ if (scale_up)
+ _ufs_mtk_clk_scale(hba, true);
+ else
+ _ufs_mtk_clk_scale(hba, false);
+
+ host->clk_scale_up = scale_up;
+
+ /* Must always set before clk_set_rate() */
+ if (scale_up)
+ clki->curr_freq = clki->max_freq;
+ else
+ clki->curr_freq = clki->min_freq;
+out:
trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
}
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 05d76a6bd772..e46dc5fa209d 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -133,6 +133,8 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_DISABLE_MCQ = 1 << 8,
/* Control MTCMOS with RTFF */
UFS_MTK_CAP_RTFF_MTCMOS = 1 << 9,
+
+ UFS_MTK_CAP_MCQ_BROKEN_RTC = 1 << 10,
};
struct ufs_mtk_crypt_cfg {
@@ -147,6 +149,11 @@ struct ufs_mtk_clk {
struct ufs_clk_info *ufs_sel_clki; /* Mux */
struct ufs_clk_info *ufs_sel_max_clki; /* Max src */
struct ufs_clk_info *ufs_sel_min_clki; /* Min src */
+ struct ufs_clk_info *ufs_fde_clki; /* Mux */
+ struct ufs_clk_info *ufs_fde_max_clki; /* Max src */
+ struct ufs_clk_info *ufs_fde_min_clki; /* Min src */
+ struct regulator *reg_vcore;
+ int vcore_volt;
};
struct ufs_mtk_hw_ver {
@@ -176,9 +183,11 @@ struct ufs_mtk_host {
bool mphy_powered_on;
bool unipro_lpm;
bool ref_clk_enabled;
+ bool clk_scale_up;
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
u32 ip_ver;
+ bool legacy_ip_ver;
bool mcq_set_intr;
bool is_mcq_intr_enabled;
@@ -192,4 +201,27 @@ struct ufs_mtk_host {
/* MTK RTT support number */
#define MTK_MAX_NUM_RTT 2
+/* UFSHCI MTK ip version value */
+enum {
+ /* UFSHCI 3.1 */
+ IP_VER_MT6983 = 0x10360000,
+ IP_VER_MT6878 = 0x10420200,
+
+ /* UFSHCI 4.0 */
+ IP_VER_MT6897 = 0x10440000,
+ IP_VER_MT6989 = 0x10450000,
+ IP_VER_MT6899 = 0x10450100,
+ IP_VER_MT6991_A0 = 0x10460000,
+ IP_VER_MT6991_B0 = 0x10470000,
+ IP_VER_MT6993 = 0x10480000,
+
+ IP_VER_NONE = 0xFFFFFFFF
+};
+
+enum ip_ver_legacy {
+ IP_LEGACY_VER_MT6781 = 0x10380000,
+ IP_LEGACY_VER_MT6879 = 0x10360000,
+ IP_LEGACY_VER_MT6893 = 0x20160706
+};
+
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 2a72e7c1d131..9574fdc2bb0f 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -532,6 +532,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
goto out_disable_phy;
}
+ ret = phy_calibrate(phy);
+ if (ret) {
+ dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret);
+ goto out_disable_phy;
+ }
+
ufs_qcom_select_unipro_mode(host);
return 0;
@@ -726,26 +732,17 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
if (status == PRE_CHANGE)
return 0;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
+ if (!ufs_qcom_is_link_active(hba))
ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- /* reset the connected UFS device during power down */
- ufs_qcom_device_reset_ctrl(hba, true);
- } else if (!ufs_qcom_is_link_active(hba)) {
- ufs_qcom_disable_lane_clks(host);
- }
+ /* reset the connected UFS device during power down */
+ if (ufs_qcom_is_link_off(hba) && host->device_reset)
+ ufs_qcom_device_reset_ctrl(hba, true);
return ufs_qcom_ice_suspend(host);
}
@@ -753,26 +750,11 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
int err;
- if (ufs_qcom_is_link_off(hba)) {
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed PHY power on: %d\n",
- __func__, err);
- return err;
- }
-
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
-
- } else if (!ufs_qcom_is_link_active(hba)) {
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
- }
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err)
+ return err;
return ufs_qcom_ice_resume(host);
}
@@ -1151,12 +1133,20 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
+ * There are certain clocks which comes from the PHY so it needs
+ * to be managed together along with controller clocks which also
+ * provides a better power saving. Hence keep phy_power_off/on calls
+ * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be
+ * turned on/off along with UFS's clocks.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy;
+ int err;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1166,6 +1156,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (!host)
return 0;
+ phy = host->generic_phy;
+
switch (status) {
case PRE_CHANGE:
if (on) {
@@ -1175,10 +1167,22 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
+
+ err = phy_power_off(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power off failed, ret=%d\n", err);
+ return err;
+ }
}
break;
case POST_CHANGE:
if (on) {
+ err = phy_power_on(phy);
+ if (err) {
+ dev_err(hba->dev, "phy power on failed, ret = %d\n", err);
+ return err;
+ }
+
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1894,7 +1898,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
return 0;
}
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_dev_profile *p,
struct devfreq_simple_ondemand_data *d)
@@ -1906,13 +1909,6 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
hba->clk_scaling.suspend_on_no_request = true;
}
-#else
-static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
- struct devfreq_dev_profile *p,
- struct devfreq_simple_ondemand_data *data)
-{
-}
-#endif
/* Resources */
static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
@@ -2074,17 +2070,6 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi)
-{
- for (struct ufs_qcom_irq *q = uqi; q->irq; q++)
- devm_free_irq(q->hba->dev, q->irq, q->hba);
-
- platform_device_msi_free_irqs_all(uqi->hba->dev);
- devm_kfree(uqi->hba->dev, uqi);
-}
-
-DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T))
-
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -2099,18 +2084,18 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
- struct ufs_qcom_irq *qi __free(ufs_qcom_irq) =
- devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
- if (!qi)
- return -ENOMEM;
- /* Preset so __free() has a pointer to hba in all error paths */
- qi[0].hba = hba;
-
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
- dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
- return ret;
+ dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n");
+ return ret; /* Continue without ESI */
+ }
+
+ struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
+
+ if (!qi) {
+ platform_device_msi_free_irqs_all(hba->dev);
+ return -ENOMEM;
}
for (int idx = 0; idx < nr_irqs; idx++) {
@@ -2121,15 +2106,17 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
IRQF_SHARED, "qcom-mcq-esi", qi + idx);
if (ret) {
- dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
+ dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n",
__func__, qi[idx].irq, ret);
- qi[idx].irq = 0;
+ /* Free previously allocated IRQs */
+ for (int j = 0; j < idx; j++)
+ devm_free_irq(hba->dev, qi[j].irq, qi + j);
+ platform_device_msi_free_irqs_all(hba->dev);
+ devm_kfree(hba->dev, qi);
return ret;
}
}
- retain_and_null_ptr(qi);
-
if (host->hw_ver.major >= 6) {
ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 996387906aa1..b87e03777395 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -22,17 +22,12 @@
#define MAX_SUPP_MAC 64
-struct ufs_host {
- void (*late_init)(struct ufs_hba *hba);
-};
-
enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
struct intel_host {
- struct ufs_host ufs_host;
u32 dsm_fns;
u32 active_ltr;
u32 idle_ltr;
@@ -408,8 +403,14 @@ static int ufs_intel_ehl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
-static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
+static int ufs_intel_lkf_init(struct ufs_hba *hba)
{
+ int err;
+
+ hba->nop_out_timeout = 200;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+ err = ufs_intel_common_init(hba);
/* LKF always needs a full reset, so set PM accordingly */
if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
hba->spm_lvl = UFS_PM_LVL_6;
@@ -418,19 +419,6 @@ static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
hba->spm_lvl = UFS_PM_LVL_5;
hba->rpm_lvl = UFS_PM_LVL_5;
}
-}
-
-static int ufs_intel_lkf_init(struct ufs_hba *hba)
-{
- struct ufs_host *ufs_host;
- int err;
-
- hba->nop_out_timeout = 200;
- hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
- hba->caps |= UFSHCD_CAP_CRYPTO;
- err = ufs_intel_common_init(hba);
- ufs_host = ufshcd_get_variant(hba);
- ufs_host->late_init = ufs_intel_lkf_late_init;
return err;
}
@@ -444,6 +432,8 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
static int ufs_intel_mtl_init(struct ufs_hba *hba)
{
+ hba->rpm_lvl = UFS_PM_LVL_2;
+ hba->spm_lvl = UFS_PM_LVL_2;
hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
@@ -574,7 +564,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
static int
ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct ufs_host *ufs_host;
struct ufs_hba *hba;
void __iomem *mmio_base;
int err;
@@ -607,10 +596,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- ufs_host = ufshcd_get_variant(hba);
- if (ufs_host && ufs_host->late_init)
- ufs_host->late_init(hba);
-
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
@@ -645,6 +630,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x4D47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index e1ec9b38f5b9..d7c2a1a3c271 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -338,7 +338,8 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
schedule_work(&ci->usb_phy->chg_work);
break;
case CI_HDRC_CONTROLLER_PULLUP_EVENT:
- if (ci->role == CI_ROLE_GADGET)
+ if (ci->role == CI_ROLE_GADGET &&
+ ci->gadget.speed == USB_SPEED_HIGH)
imx_usbmisc_pullup(data->usbmisc_data,
ci->gadget.connected);
break;
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 3d20c5e76c6a..b1418885707c 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -1068,15 +1068,24 @@ static void usbmisc_imx7d_pullup(struct imx_usbmisc_data *data, bool on)
unsigned long flags;
u32 val;
+ if (on)
+ return;
+
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
- if (!on) {
- val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
- val |= MX7D_USBNC_USB_CTRL2_OPMODE(1);
- val |= MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN;
- } else {
- val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN;
- }
+ val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
+ val |= MX7D_USBNC_USB_CTRL2_OPMODE(1);
+ val |= MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN;
+ writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ /* Last for at least 1 micro-frame to let host see disconnect signal */
+ usleep_range(125, 150);
+
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
+ val |= MX7D_USBNC_USB_CTRL2_OPMODE(0);
+ val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN;
writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2);
spin_unlock_irqrestore(&usbmisc->lock, flags);
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 03771bbc6c01..9dd79769cad1 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1636,7 +1636,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
- unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1654,14 +1653,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
/*
- * Only collect coverage in the softirq context and disable interrupts
- * to avoid scenarios with nested remote coverage collection sections
- * that KCOV does not support.
- * See the comment next to kcov_remote_start_usb_softirq() for details.
+ * This function can be called in task context inside another remote
+ * coverage collection section, but kcov doesn't support that kind of
+ * recursion yet. Only collect coverage in softirq context for now.
*/
- flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
+ kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- kcov_remote_stop_softirq(flags);
+ kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
@@ -1719,10 +1717,10 @@ static void usb_giveback_urb_bh(struct work_struct *work)
* @urb: urb being returned to the USB device driver.
* @status: completion status code for the URB.
*
- * Context: atomic. The completion callback is invoked in caller's context.
- * For HCDs with HCD_BH flag set, the completion callback is invoked in BH
- * context (except for URBs submitted to the root hub which always complete in
- * caller's context).
+ * Context: atomic. The completion callback is invoked either in a work queue
+ * (BH) context or in the caller's context, depending on whether the HCD_BH
+ * flag is set in the @hcd structure, except that URBs submitted to the
+ * root hub always complete in BH context.
*
* This hands the URB from HCD to its USB device driver, using its
* completion function. The HCD has freed all per-urb resources
@@ -2166,7 +2164,7 @@ static struct urb *request_single_step_set_feature_urb(
urb->complete = usb_ehset_completion;
urb->status = -EINPROGRESS;
urb->actual_length = 0;
- urb->transfer_flags = URB_DIR_IN;
+ urb->transfer_flags = URB_DIR_IN | URB_NO_TRANSFER_DMA_MAP;
usb_get_urb(urb);
atomic_inc(&urb->use_count);
atomic_inc(&urb->dev->urbnum);
@@ -2230,9 +2228,15 @@ int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
/* Complete remaining DATA and STATUS stages using the same URB */
urb->status = -EINPROGRESS;
+ urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
usb_get_urb(urb);
atomic_inc(&urb->use_count);
atomic_inc(&urb->dev->urbnum);
+ if (map_urb_for_dma(hcd, urb, GFP_KERNEL)) {
+ usb_put_urb(urb);
+ goto out1;
+ }
+
retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 0);
if (!retval && !wait_for_completion_timeout(&done,
msecs_to_jiffies(2000))) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ff0ff95d5cca..f5bc53875330 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -371,6 +371,7 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
/* SanDisk Corp. SanDisk 3.2Gen1 */
+ { USB_DEVICE(0x0781, 0x5596), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT },
/* SanDisk Extreme 55AE */
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 54a4ee2b90b7..39c72cb52ce7 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -41,6 +41,7 @@
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
+#define PCI_DEVICE_ID_INTEL_WCL 0x4d7e
#define PCI_DEVICE_ID_INTEL_ADL 0x460e
#define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLN 0x465e
@@ -431,6 +432,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGPLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGPH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, JSP, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, WCL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ADL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ADL_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ADLN, &dwc3_pci_intel_swnode) },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 666ac432f52d..b4229aa13f37 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -288,7 +288,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
ret = dwc3_ep0_start_trans(dep);
- WARN_ON(ret < 0);
+ if (ret < 0)
+ dev_err(dwc->dev, "ep0 out start transfer failed: %d\n", ret);
+
for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) {
struct dwc3_ep *dwc3_ep;
@@ -1061,7 +1063,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
ret = dwc3_ep0_start_trans(dep);
}
- WARN_ON(ret < 0);
+ if (ret < 0)
+ dev_err(dwc->dev,
+ "ep0 data phase start transfer failed: %d\n", ret);
}
static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
@@ -1078,7 +1082,12 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
{
- WARN_ON(dwc3_ep0_start_control_status(dep));
+ int ret;
+
+ ret = dwc3_ep0_start_control_status(dep);
+ if (ret)
+ dev_err(dwc->dev,
+ "ep0 status phase start transfer failed: %d\n", ret);
}
static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
@@ -1121,7 +1130,10 @@ void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
- WARN_ON_ONCE(ret);
+ if (ret)
+ dev_err_ratelimited(dwc->dev,
+ "ep0 data phase end transfer failed: %d\n", ret);
+
dep->resource_index = 0;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 25db36c63951..554f997eb8c4 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1772,7 +1772,11 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
dep->flags |= DWC3_EP_DELAY_STOP;
return 0;
}
- WARN_ON_ONCE(ret);
+
+ if (ret)
+ dev_err_ratelimited(dep->dwc->dev,
+ "end transfer failed: %d\n", ret);
+
dep->resource_index = 0;
if (!interrupt)
@@ -3777,6 +3781,15 @@ static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
+ /*
+ * During a device-initiated disconnect, a late xferNotReady event can
+ * be generated after the End Transfer command resets the event filter,
+ * but before the controller is halted. Ignore it to prevent a new
+ * transfer from starting.
+ */
+ if (!dep->dwc->connected)
+ return;
+
dwc3_gadget_endpoint_frame_from_event(dep, event);
/*
@@ -4039,7 +4052,9 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
dep->flags &= ~DWC3_EP_STALL;
ret = dwc3_send_clear_stall_ep_cmd(dep);
- WARN_ON_ONCE(ret);
+ if (ret)
+ dev_err_ratelimited(dwc->dev,
+ "failed to clear STALL on %s\n", dep->name);
}
}
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 2957316fd3d0..1d3085cc9d22 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -502,6 +502,7 @@ struct tegra_xudc {
struct clk_bulk_data *clks;
bool device_mode;
+ bool current_device_mode;
struct work_struct usb_role_sw_work;
struct phy **usb3_phy;
@@ -715,6 +716,8 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
USB_ROLE_DEVICE);
+
+ xudc->current_device_mode = true;
}
static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
@@ -725,6 +728,8 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
dev_dbg(xudc->dev, "device mode off\n");
+ xudc->current_device_mode = false;
+
connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
reinit_completion(&xudc->disconnect_complete);
@@ -4044,10 +4049,10 @@ static int __maybe_unused tegra_xudc_resume(struct device *dev)
spin_lock_irqsave(&xudc->lock, flags);
xudc->suspended = false;
+ if (xudc->device_mode != xudc->current_device_mode)
+ schedule_work(&xudc->usb_role_sw_work);
spin_unlock_irqrestore(&xudc->lock, flags);
- schedule_work(&xudc->usb_role_sw_work);
-
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 92bb84f8132a..b3a59ce1b3f4 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -704,8 +704,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
if (!xhci->devs[i])
continue;
- retval = xhci_disable_slot(xhci, i);
- xhci_free_virt_device(xhci, i);
+ retval = xhci_disable_and_free_slot(xhci, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 07289333a1e8..81eaad87a3d9 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -865,21 +865,20 @@ free_tts:
* will be manipulated by the configure endpoint, allocate device, or update
* hub functions while this function is removing the TT entries from the list.
*/
-void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev,
+ int slot_id)
{
- struct xhci_virt_device *dev;
int i;
int old_active_eps = 0;
/* Slot ID 0 is reserved */
- if (slot_id == 0 || !xhci->devs[slot_id])
+ if (slot_id == 0 || !dev)
return;
- dev = xhci->devs[slot_id];
-
- xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
- if (!dev)
- return;
+ /* If device ctx array still points to _this_ device, clear it */
+ if (dev->out_ctx &&
+ xhci->dcbaa->dev_context_ptrs[slot_id] == cpu_to_le64(dev->out_ctx->dma))
+ xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
trace_xhci_free_virt_device(dev);
@@ -920,8 +919,9 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
dev->udev->slot_id = 0;
if (dev->rhub_port && dev->rhub_port->slot_id == slot_id)
dev->rhub_port->slot_id = 0;
- kfree(xhci->devs[slot_id]);
- xhci->devs[slot_id] = NULL;
+ if (xhci->devs[slot_id] == dev)
+ xhci->devs[slot_id] = NULL;
+ kfree(dev);
}
/*
@@ -962,7 +962,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i
out:
/* we are now at a leaf device */
xhci_debugfs_remove_slot(xhci, slot_id);
- xhci_free_virt_device(xhci, slot_id);
+ xhci_free_virt_device(xhci, vdev, slot_id);
}
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index 620f8f0febb8..86df80399c9f 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -47,8 +47,9 @@
#define RENESAS_ROM_ERASE_MAGIC 0x5A65726F
#define RENESAS_ROM_WRITE_MAGIC 0x53524F4D
-#define RENESAS_RETRY 10000
-#define RENESAS_DELAY 10
+#define RENESAS_RETRY 50000 /* 50000 * RENESAS_DELAY ~= 500ms */
+#define RENESAS_CHIP_ERASE_RETRY 500000 /* 500000 * RENESAS_DELAY ~= 5s */
+#define RENESAS_DELAY 10
#define RENESAS_FW_NAME "renesas_usb_fw.mem"
@@ -407,7 +408,7 @@ static void renesas_rom_erase(struct pci_dev *pdev)
/* sleep a bit while ROM is erased */
msleep(20);
- for (i = 0; i < RENESAS_RETRY; i++) {
+ for (i = 0; i < RENESAS_CHIP_ERASE_RETRY; i++) {
retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS,
&status);
status &= RENESAS_ROM_STATUS_ERASE;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ecd757d482c5..4f8f5aab109d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1592,7 +1592,8 @@ static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *comman
command->slot_id = 0;
}
-static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
+static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id,
+ u32 cmd_comp_code)
{
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
@@ -1607,6 +1608,10 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
/* Delete default control endpoint resources */
xhci_free_device_endpoint_resources(xhci, virt_dev, true);
+ if (cmd_comp_code == COMP_SUCCESS) {
+ xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
+ xhci->devs[slot_id] = NULL;
+ }
}
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id)
@@ -1856,7 +1861,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code);
break;
case TRB_DISABLE_SLOT:
- xhci_handle_cmd_disable_slot(xhci, slot_id);
+ xhci_handle_cmd_disable_slot(xhci, slot_id, cmd_comp_code);
break;
case TRB_CONFIG_EP:
if (!cmd->completion)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 47151ca527bf..742c23826e17 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -309,6 +309,7 @@ int xhci_enable_interrupter(struct xhci_interrupter *ir)
return -EINVAL;
iman = readl(&ir->ir_set->iman);
+ iman &= ~IMAN_IP;
iman |= IMAN_IE;
writel(iman, &ir->ir_set->iman);
@@ -325,6 +326,7 @@ int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
return -EINVAL;
iman = readl(&ir->ir_set->iman);
+ iman &= ~IMAN_IP;
iman &= ~IMAN_IE;
writel(iman, &ir->ir_set->iman);
@@ -3932,8 +3934,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
* Obtaining a new device slot to inform the xHCI host that
* the USB device has been reset.
*/
- ret = xhci_disable_slot(xhci, udev->slot_id);
- xhci_free_virt_device(xhci, udev->slot_id);
+ ret = xhci_disable_and_free_slot(xhci, udev->slot_id);
if (!ret) {
ret = xhci_alloc_dev(hcd, udev);
if (ret == 1)
@@ -4090,7 +4091,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_disable_slot(xhci, udev->slot_id);
spin_lock_irqsave(&xhci->lock, flags);
- xhci_free_virt_device(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, virt_dev, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
}
@@ -4139,6 +4140,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
return 0;
}
+int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id)
+{
+ struct xhci_virt_device *vdev = xhci->devs[slot_id];
+ int ret;
+
+ ret = xhci_disable_slot(xhci, slot_id);
+ xhci_free_virt_device(xhci, vdev, slot_id);
+ return ret;
+}
+
/*
* Checks if we have enough host controller resources for the default control
* endpoint.
@@ -4245,8 +4256,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
return 1;
disable_slot:
- xhci_disable_slot(xhci, udev->slot_id);
- xhci_free_virt_device(xhci, udev->slot_id);
+ xhci_disable_and_free_slot(xhci, udev->slot_id);
return 0;
}
@@ -4382,8 +4392,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
mutex_unlock(&xhci->mutex);
- ret = xhci_disable_slot(xhci, udev->slot_id);
- xhci_free_virt_device(xhci, udev->slot_id);
+ ret = xhci_disable_and_free_slot(xhci, udev->slot_id);
if (!ret) {
if (xhci_alloc_dev(hcd, udev) == 1)
xhci_setup_addressable_virt_dev(xhci, udev);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a20f4e7cd43a..85d5b964bf1e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1791,7 +1791,7 @@ void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
-void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
+void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev, int slot_id);
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
@@ -1888,6 +1888,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
+int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index cfa1d68c7919..36b25418b214 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1962,7 +1962,7 @@ static int cp210x_gpio_init(struct usb_serial *serial)
priv->gc.direction_input = cp210x_gpio_direction_input;
priv->gc.direction_output = cp210x_gpio_direction_output;
priv->gc.get = cp210x_gpio_get;
- priv->gc.set_rv = cp210x_gpio_set;
+ priv->gc.set = cp210x_gpio_set;
priv->gc.set_config = cp210x_gpio_set_config;
priv->gc.init_valid_mask = cp210x_gpio_init_valid_mask;
priv->gc.owner = THIS_MODULE;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7737285a84ba..49666c33b41f 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2150,9 +2150,9 @@ static int ftdi_gpio_init(struct usb_serial_port *port)
priv->gc.direction_output = ftdi_gpio_direction_output;
priv->gc.init_valid_mask = ftdi_gpio_init_valid_mask;
priv->gc.get = ftdi_gpio_get;
- priv->gc.set_rv = ftdi_gpio_set;
+ priv->gc.set = ftdi_gpio_set;
priv->gc.get_multiple = ftdi_gpio_get_multiple;
- priv->gc.set_multiple_rv = ftdi_gpio_set_multiple;
+ priv->gc.set_multiple = ftdi_gpio_set_multiple;
priv->gc.owner = THIS_MODULE;
priv->gc.parent = &serial->interface->dev;
priv->gc.base = -1;
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 7dea28c2b8ee..cb5bbb19060e 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -252,7 +252,7 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
return USB_STOR_TRANSPORT_ERROR;
}
- residue = bcs->Residue;
+ residue = le32_to_cpu(bcs->Residue);
if (bcs->Tag != us->tag)
return USB_STOR_TRANSPORT_ERROR;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 54f0b1c83317..dfa5276a5a43 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -934,6 +934,13 @@ UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE ),
+/* Added by Maël GUERIN <mael.guerin@murena.io> */
+UNUSUAL_DEV( 0x0603, 0x8611, 0x0000, 0xffff,
+ "Novatek",
+ "NTK96550-based camera",
+ USB_SC_SCSI, USB_PR_BULK, NULL,
+ US_FL_BULK_IGNORE_TAG ),
+
/*
* Reported by Hanno Boeck <hanno@gmx.de>
* Taken from the Lycoris Kernel
@@ -1494,6 +1501,28 @@ UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_WP_DETECT ),
+/*
+ * Reported by Zenm Chen <zenmchen@gmail.com>
+ * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch
+ * the device into Wi-Fi mode.
+ */
+UNUSUAL_DEV( 0x0bda, 0x1a2b, 0x0000, 0xffff,
+ "Realtek",
+ "DISK",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_DEVICE ),
+
+/*
+ * Reported by Zenm Chen <zenmchen@gmail.com>
+ * Ignore driver CD mode, otherwise usb_modeswitch may fail to switch
+ * the device into Wi-Fi mode.
+ */
+UNUSUAL_DEV( 0x0bda, 0xa192, 0x0000, 0xffff,
+ "Realtek",
+ "DISK",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_DEVICE ),
+
UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
"Maxtor",
"USB to SATA",
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index a4ff2403ddd6..870a71f953f6 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1485,6 +1485,9 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
struct fusb302_chip *chip = dev_id;
unsigned long flags;
+ /* Disable our level triggered IRQ until our irq_work has cleared it */
+ disable_irq_nosync(chip->gpio_int_n_irq);
+
spin_lock_irqsave(&chip->irq_lock, flags);
if (chip->irq_suspended)
chip->irq_while_suspended = true;
@@ -1627,6 +1630,7 @@ static void fusb302_irq_work(struct work_struct *work)
}
done:
mutex_unlock(&chip->lock);
+ enable_irq(chip->gpio_int_n_irq);
}
static int init_gpio(struct fusb302_chip *chip)
@@ -1751,10 +1755,9 @@ static int fusb302_probe(struct i2c_client *client)
goto destroy_workqueue;
}
- ret = devm_request_threaded_irq(dev, chip->gpio_int_n_irq,
- NULL, fusb302_irq_intn,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
- "fsc_interrupt_int_n", chip);
+ ret = request_irq(chip->gpio_int_n_irq, fusb302_irq_intn,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "fsc_interrupt_int_n", chip);
if (ret < 0) {
dev_err(dev, "cannot request IRQ for GPIO Int_N, ret=%d", ret);
goto tcpm_unregister_port;
@@ -1779,6 +1782,7 @@ static void fusb302_remove(struct i2c_client *client)
struct fusb302_chip *chip = i2c_get_clientdata(client);
disable_irq_wake(chip->gpio_int_n_irq);
+ free_irq(chip->gpio_int_n_irq, chip);
cancel_work_sync(&chip->irq_work);
cancel_delayed_work_sync(&chip->bc_lvl_handler);
tcpm_unregister_port(chip->tcpm_port);
diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
index 0cdda06592fd..af8da6dc60ae 100644
--- a/drivers/usb/typec/tcpm/maxim_contaminant.c
+++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
@@ -188,6 +188,11 @@ static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *ven
if (ret < 0)
return ret;
+ /* Disable low power mode */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL,
+ FIELD_PREP(CCLPMODESEL,
+ LOW_POWER_MODE_DISABLE));
+
/* Sleep to allow comparators settle */
usleep_range(5000, 6000);
ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1);
@@ -324,6 +329,39 @@ static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
return 0;
}
+static int max_contaminant_enable_toggling(struct max_tcpci_chip *chip)
+{
+ struct regmap *regmap = chip->data.regmap;
+ int ret;
+
+ /* Disable dry detection if enabled. */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL,
+ FIELD_PREP(CCLPMODESEL,
+ LOW_POWER_MODE_DISABLE));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, 0);
+ if (ret)
+ return ret;
+
+ ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP |
+ FIELD_PREP(TCPC_ROLE_CTRL_CC1,
+ TCPC_ROLE_CTRL_CC_RD) |
+ FIELD_PREP(TCPC_ROLE_CTRL_CC2,
+ TCPC_ROLE_CTRL_CC_RD));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL,
+ TCPC_TCPC_CTRL_EN_LK4CONN_ALRT,
+ TCPC_TCPC_CTRL_EN_LK4CONN_ALRT);
+ if (ret)
+ return ret;
+
+ return max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION);
+}
+
bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce,
bool *cc_handled)
{
@@ -340,6 +378,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
if (ret < 0)
return false;
+ if (cc_status & TCPC_CC_STATUS_TOGGLING) {
+ if (chip->contaminant_state == DETECTED)
+ return true;
+ return false;
+ }
+
if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) {
if (!disconnect_while_debounce)
msleep(100);
@@ -372,6 +416,12 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
max_contaminant_enable_dry_detection(chip);
return true;
}
+
+ ret = max_contaminant_enable_toggling(chip);
+ if (ret)
+ dev_err(chip->dev,
+ "Failed to enable toggling, ret=%d",
+ ret);
}
} else if (chip->contaminant_state == DETECTED) {
if (!(cc_status & TCPC_CC_STATUS_TOGGLING)) {
@@ -379,6 +429,14 @@ bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect
if (chip->contaminant_state == DETECTED) {
max_contaminant_enable_dry_detection(chip);
return true;
+ } else {
+ ret = max_contaminant_enable_toggling(chip);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to enable toggling, ret=%d",
+ ret);
+ return true;
+ }
}
}
}
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
index 76270d5c2838..b33540a42a95 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.h
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
@@ -21,6 +21,7 @@
#define CCOVPDIS BIT(6)
#define SBURPCTRL BIT(5)
#define CCLPMODESEL GENMASK(4, 3)
+#define LOW_POWER_MODE_DISABLE 0
#define ULTRA_LOW_POWER_MODE 1
#define CCRPCTRL GENMASK(2, 0)
#define UA_1_SRC 1
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 61424342c096..c7a20278bc3c 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -908,6 +908,9 @@ void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_mr_resources *mres = &mvdev->mres;
+ if (!mres->wq_gc)
+ return;
+
atomic_set(&mres->shutdown, 1);
flush_delayed_work(&mres->gc_dwork_ent);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index cccc49a08a1a..0ed2fc28e1ce 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -2491,7 +2491,7 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
}
mvq = &ndev->vqs[idx];
- ndev->needs_teardown = num != mvq->num_ent;
+ ndev->needs_teardown |= num != mvq->num_ent;
mvq->num_ent = num;
}
@@ -3432,15 +3432,17 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev);
+ /* Functions called here should be able to work with
+ * uninitialized resources.
+ */
free_fixed_resources(ndev);
mlx5_vdpa_clean_mrs(mvdev);
mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
- mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
-
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
}
+ mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx);
mlx5_vdpa_free_resources(&ndev->mvdev);
free_irqs(ndev);
kfree(ndev->event_cbs);
@@ -3888,6 +3890,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
mvdev->actual_features =
(device_features & BIT_ULL(VIRTIO_F_VERSION_1));
+ mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
+
ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
if (!ndev->vqs || !ndev->event_cbs) {
@@ -3960,8 +3964,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->rqt_size = 1;
}
- mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx);
-
ndev->mvdev.mlx_features = device_features;
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 6a9a37351310..04620bb77203 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -2216,6 +2216,7 @@ static void vduse_exit(void)
cdev_del(&vduse_ctrl_cdev);
unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
class_unregister(&vduse_class);
+ idr_destroy(&vduse_idr);
}
module_exit(vduse_exit);
diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
index 281a8dc3ed49..480cac3a0c27 100644
--- a/drivers/vfio/device_cdev.c
+++ b/drivers/vfio/device_cdev.c
@@ -60,22 +60,50 @@ static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
spin_unlock(&df->kvm_ref_lock);
}
+static int vfio_df_check_token(struct vfio_device *device,
+ const struct vfio_device_bind_iommufd *bind)
+{
+ uuid_t uuid;
+
+ if (!device->ops->match_token_uuid) {
+ if (bind->flags & VFIO_DEVICE_BIND_FLAG_TOKEN)
+ return -EINVAL;
+ return 0;
+ }
+
+ if (!(bind->flags & VFIO_DEVICE_BIND_FLAG_TOKEN))
+ return device->ops->match_token_uuid(device, NULL);
+
+ if (copy_from_user(&uuid, u64_to_user_ptr(bind->token_uuid_ptr),
+ sizeof(uuid)))
+ return -EFAULT;
+ return device->ops->match_token_uuid(device, &uuid);
+}
+
long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
struct vfio_device_bind_iommufd __user *arg)
{
+ const u32 VALID_FLAGS = VFIO_DEVICE_BIND_FLAG_TOKEN;
struct vfio_device *device = df->device;
struct vfio_device_bind_iommufd bind;
unsigned long minsz;
+ u32 user_size;
int ret;
static_assert(__same_type(arg->out_devid, df->devid));
minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
- if (copy_from_user(&bind, arg, minsz))
- return -EFAULT;
+ ret = get_user(user_size, &arg->argsz);
+ if (ret)
+ return ret;
+ if (user_size < minsz)
+ return -EINVAL;
+ ret = copy_struct_from_user(&bind, minsz, arg, user_size);
+ if (ret)
+ return ret;
- if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
+ if (bind.iommufd < 0 || bind.flags & ~VALID_FLAGS)
return -EINVAL;
/* BIND_IOMMUFD only allowed for cdev fds */
@@ -93,6 +121,10 @@ long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
goto out_unlock;
}
+ ret = vfio_df_check_token(device, &bind);
+ if (ret)
+ goto out_unlock;
+
df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
if (IS_ERR(df->iommufd)) {
ret = PTR_ERR(df->iommufd);
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index c321d442f0da..c376a6279de0 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -192,11 +192,10 @@ static int vfio_df_group_open(struct vfio_device_file *df)
* implies they expected translation to exist
*/
if (!capable(CAP_SYS_RAWIO) ||
- vfio_iommufd_device_has_compat_ioas(device, df->iommufd))
+ vfio_iommufd_device_has_compat_ioas(device, df->iommufd)) {
ret = -EPERM;
- else
- ret = 0;
- goto out_put_kvm;
+ goto out_put_kvm;
+ }
}
ret = vfio_df_open(df);
diff --git a/drivers/vfio/iommufd.c b/drivers/vfio/iommufd.c
index c8c3a2d53f86..a38d262c6028 100644
--- a/drivers/vfio/iommufd.c
+++ b/drivers/vfio/iommufd.c
@@ -25,6 +25,10 @@ int vfio_df_iommufd_bind(struct vfio_device_file *df)
lockdep_assert_held(&vdev->dev_set->lock);
+ /* Returns 0 to permit device opening under noiommu mode */
+ if (vfio_device_is_noiommu(vdev))
+ return 0;
+
return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
}
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 2149f49aeec7..397f5e445136 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1583,6 +1583,7 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 5b919a0b2524..a92b095b90f6 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -1523,8 +1523,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size);
max_msg_size = (1ULL << log_max_msg_size);
/* The RQ must hold at least 4 WQEs/messages for successful QP creation */
- if (rq_size < 4 * max_msg_size)
- rq_size = 4 * max_msg_size;
+ if (rq_size < 4ULL * max_msg_size)
+ rq_size = 4ULL * max_msg_size;
memset(tracker, 0, sizeof(*tracker));
tracker->uar = mlx5_get_uars_page(mdev);
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 93f894fe60d2..7ec47e736a8e 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -1372,6 +1372,7 @@ static const struct vfio_device_ops mlx5vf_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
index e5ac39c4cc6b..d95761dcdd58 100644
--- a/drivers/vfio/pci/nvgrace-gpu/main.c
+++ b/drivers/vfio/pci/nvgrace-gpu/main.c
@@ -696,6 +696,7 @@ static const struct vfio_device_ops nvgrace_gpu_pci_ops = {
.mmap = nvgrace_gpu_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
@@ -715,6 +716,7 @@ static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
index 76a80ae7087b..f3ccb0008f67 100644
--- a/drivers/vfio/pci/pds/vfio_dev.c
+++ b/drivers/vfio/pci/pds/vfio_dev.c
@@ -201,9 +201,11 @@ static const struct vfio_device_ops pds_vfio_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
};
const struct vfio_device_ops *pds_vfio_ops_info(void)
diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c
index 845ed15b6771..a19b68043eb2 100644
--- a/drivers/vfio/pci/qat/main.c
+++ b/drivers/vfio/pci/qat/main.c
@@ -614,6 +614,7 @@ static const struct vfio_device_ops qat_vf_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
@@ -675,6 +676,8 @@ static const struct pci_device_id qat_vf_vfio_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) },
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) },
+ /* Intel QAT GEN6 6xxx VF device */
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4949) },
{}
};
MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
@@ -696,5 +699,5 @@ module_pci_driver(qat_vf_vfio_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Xin Zeng <xin.zeng@intel.com>");
-MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family");
+MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT device family");
MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 5ba39f7623bb..ac10f14417f2 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -138,6 +138,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 31bdb9110cc0..7dcf5439dedc 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1818,9 +1818,13 @@ void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
}
EXPORT_SYMBOL_GPL(vfio_pci_core_request);
-static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
- bool vf_token, uuid_t *uuid)
+int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
+ const uuid_t *uuid)
+
{
+ struct vfio_pci_core_device *vdev =
+ container_of(core_vdev, struct vfio_pci_core_device, vdev);
+
/*
* There's always some degree of trust or collaboration between SR-IOV
* PF and VFs, even if just that the PF hosts the SR-IOV capability and
@@ -1851,7 +1855,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
bool match;
if (!pf_vdev) {
- if (!vf_token)
+ if (!uuid)
return 0; /* PF is not vfio-pci, no VF token */
pci_info_ratelimited(vdev->pdev,
@@ -1859,7 +1863,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
return -EINVAL;
}
- if (!vf_token) {
+ if (!uuid) {
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
return -EACCES;
@@ -1877,7 +1881,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
} else if (vdev->vf_token) {
mutex_lock(&vdev->vf_token->lock);
if (vdev->vf_token->users) {
- if (!vf_token) {
+ if (!uuid) {
mutex_unlock(&vdev->vf_token->lock);
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
@@ -1890,12 +1894,12 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
"Incorrect VF token provided for device\n");
return -EACCES;
}
- } else if (vf_token) {
+ } else if (uuid) {
uuid_copy(&vdev->vf_token->uuid, uuid);
}
mutex_unlock(&vdev->vf_token->lock);
- } else if (vf_token) {
+ } else if (uuid) {
pci_info_ratelimited(vdev->pdev,
"VF token incorrectly provided, not a PF or VF\n");
return -EINVAL;
@@ -1903,6 +1907,7 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
return 0;
}
+EXPORT_SYMBOL_GPL(vfio_pci_core_match_token_uuid);
#define VF_TOKEN_ARG "vf_token="
@@ -1949,7 +1954,8 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
}
}
- ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
+ ret = core_vdev->ops->match_token_uuid(core_vdev,
+ vf_token ? &uuid : NULL);
if (ret)
return ret;
@@ -2146,7 +2152,7 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
return -EBUSY;
}
- if (pci_is_root_bus(pdev->bus)) {
+ if (pci_is_root_bus(pdev->bus) || pdev->is_virtfn) {
ret = vfio_assign_device_set(&vdev->vdev, vdev);
} else if (!pci_probe_reset_slot(pdev->slot)) {
ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index ef490a4545f4..988b6919c2c3 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -437,8 +437,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
bool vfio_pci_is_intel_display(struct pci_dev *pdev)
{
- return (pdev->vendor == PCI_VENDOR_ID_INTEL) &&
- ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY);
+ return (pdev->vendor == PCI_VENDOR_ID_INTEL) && pci_is_display(pdev);
}
int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
diff --git a/drivers/vfio/pci/virtio/main.c b/drivers/vfio/pci/virtio/main.c
index 515fe1b9f94d..8084f3e36a9f 100644
--- a/drivers/vfio/pci/virtio/main.c
+++ b/drivers/vfio/pci/virtio/main.c
@@ -94,6 +94,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_lm_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
@@ -114,6 +115,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_tran_lm_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
@@ -134,6 +136,7 @@ static const struct vfio_device_ops virtiovf_vfio_pci_ops = {
.mmap = vfio_pci_core_mmap,
.request = vfio_pci_core_request,
.match = vfio_pci_core_match,
+ .match_token_uuid = vfio_pci_core_match_token_uuid,
.bind_iommufd = vfio_iommufd_physical_bind,
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 1136d7ac6b59..f8d68fe77b41 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -647,6 +647,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
while (npage) {
if (!batch->size) {
+ /*
+ * Large mappings may take a while to repeatedly refill
+ * the batch, so conditionally relinquish the CPU when
+ * needed to avoid stalls.
+ */
+ cond_resched();
+
/* Empty batch, so refill it. */
ret = vaddr_get_pfns(mm, vaddr, npage, dma->prot,
&pfn, batch);
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 1fd261efc582..5046cae05222 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -583,7 +583,8 @@ void vfio_df_close(struct vfio_device_file *df)
lockdep_assert_held(&device->dev_set->lock);
- vfio_assert_device_open(device);
+ if (!vfio_assert_device_open(device))
+ return;
if (device->open_count == 1)
vfio_df_device_last_close(df);
device->open_count--;
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 020d4fbb947c..bc0f38574497 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -95,4 +95,22 @@ config VHOST_CROSS_ENDIAN_LEGACY
If unsure, say "N".
+config VHOST_ENABLE_FORK_OWNER_CONTROL
+ bool "Enable VHOST_ENABLE_FORK_OWNER_CONTROL"
+ default y
+ help
+ This option enables two IOCTLs: VHOST_SET_FORK_FROM_OWNER and
+ VHOST_GET_FORK_FROM_OWNER. These allow userspace applications
+ to modify the vhost worker mode for vhost devices.
+
+ Also expose module parameter 'fork_from_owner_default' to allow users
+ to configure the default mode for vhost workers.
+
+ By default, `VHOST_ENABLE_FORK_OWNER_CONTROL` is set to `y`,
+ users can change the worker thread mode as needed.
+ If this config is disabled (n),the related IOCTLs and parameters will
+ be unavailable.
+
+ If unsure, say "Y".
+
endif
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index bfb774c273ea..c6508fe0d5c8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -74,7 +74,8 @@ static const u64 vhost_net_features[VIRTIO_FEATURES_DWORDS] = {
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
(1ULL << VIRTIO_NET_F_MRG_RXBUF) |
(1ULL << VIRTIO_F_ACCESS_PLATFORM) |
- (1ULL << VIRTIO_F_RING_RESET),
+ (1ULL << VIRTIO_F_RING_RESET) |
+ (1ULL << VIRTIO_F_IN_ORDER),
VIRTIO_BIT(VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO) |
VIRTIO_BIT(VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO),
};
@@ -98,6 +99,7 @@ struct vhost_net_ubuf_ref {
atomic_t refcount;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
+ struct rcu_head rcu;
};
#define VHOST_NET_BATCH 64
@@ -249,9 +251,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
- int r = atomic_sub_return(1, &ubufs->refcount);
+ int r;
+
+ rcu_read_lock();
+ r = atomic_sub_return(1, &ubufs->refcount);
if (unlikely(!r))
wake_up(&ubufs->wait);
+ rcu_read_unlock();
return r;
}
@@ -264,7 +270,7 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
{
vhost_net_ubuf_put_and_wait(ubufs);
- kfree(ubufs);
+ kfree_rcu(ubufs, rcu);
}
static void vhost_net_clear_ubuf_info(struct vhost_net *n)
@@ -376,7 +382,8 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net,
while (j) {
add = min(UIO_MAXIOV - nvq->done_idx, j);
vhost_add_used_and_signal_n(vq->dev, vq,
- &vq->heads[nvq->done_idx], add);
+ &vq->heads[nvq->done_idx],
+ NULL, add);
nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
j -= add;
}
@@ -451,7 +458,8 @@ static int vhost_net_enable_vq(struct vhost_net *n,
return vhost_poll_start(poll, sock->file);
}
-static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
+static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq,
+ unsigned int count)
{
struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_dev *dev = vq->dev;
@@ -459,7 +467,8 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
if (!nvq->done_idx)
return;
- vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
+ vhost_add_used_and_signal_n(dev, vq, vq->heads,
+ vq->nheads, count);
nvq->done_idx = 0;
}
@@ -468,6 +477,8 @@ static void vhost_tx_batch(struct vhost_net *net,
struct socket *sock,
struct msghdr *msghdr)
{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
struct tun_msg_ctl ctl = {
.type = TUN_MSG_PTR,
.num = nvq->batched_xdp,
@@ -475,6 +486,11 @@ static void vhost_tx_batch(struct vhost_net *net,
};
int i, err;
+ if (in_order) {
+ vq->heads[0].len = 0;
+ vq->nheads[0] = nvq->done_idx;
+ }
+
if (nvq->batched_xdp == 0)
goto signal_used;
@@ -496,7 +512,7 @@ static void vhost_tx_batch(struct vhost_net *net,
}
signal_used:
- vhost_net_signal_used(nvq);
+ vhost_net_signal_used(nvq, in_order ? 1 : nvq->done_idx);
nvq->batched_xdp = 0;
}
@@ -750,6 +766,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
bool busyloop_intr;
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
do {
busyloop_intr = false;
@@ -786,11 +803,13 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
break;
}
- /* We can't build XDP buff, go for single
- * packet path but let's flush batched
- * packets.
- */
- vhost_tx_batch(net, nvq, sock, &msg);
+ if (nvq->batched_xdp) {
+ /* We can't build XDP buff, go for single
+ * packet path but let's flush batched
+ * packets.
+ */
+ vhost_tx_batch(net, nvq, sock, &msg);
+ }
msg.msg_control = NULL;
} else {
if (tx_can_batch(vq, total_len))
@@ -811,8 +830,12 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
pr_debug("Truncated TX packet: len %d != %zd\n",
err, len);
done:
- vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
- vq->heads[nvq->done_idx].len = 0;
+ if (in_order) {
+ vq->heads[0].id = cpu_to_vhost32(vq, head);
+ } else {
+ vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
+ vq->heads[nvq->done_idx].len = 0;
+ }
++nvq->done_idx;
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
@@ -991,7 +1014,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
}
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
- bool *busyloop_intr)
+ bool *busyloop_intr, unsigned int count)
{
struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
@@ -1001,7 +1024,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
if (!len && rvq->busyloop_timeout) {
/* Flush batched heads first */
- vhost_net_signal_used(rnvq);
+ vhost_net_signal_used(rnvq, count);
/* Both tx vq and rx socket were polled here */
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
@@ -1013,7 +1036,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
/* This is a multi-buffer version of vhost_get_desc, that works if
* vq has read descriptors only.
- * @vq - the relevant virtqueue
+ * @nvq - the relevant vhost_net virtqueue
* @datalen - data length we'll be reading
* @iovcount - returned count of io vectors we fill
* @log - vhost log
@@ -1021,14 +1044,17 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
* @quota - headcount quota, 1 for big buffer
* returns number of buffer heads allocated, negative on error
*/
-static int get_rx_bufs(struct vhost_virtqueue *vq,
+static int get_rx_bufs(struct vhost_net_virtqueue *nvq,
struct vring_used_elem *heads,
+ u16 *nheads,
int datalen,
unsigned *iovcount,
struct vhost_log *log,
unsigned *log_num,
unsigned int quota)
{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
unsigned int out, in;
int seg = 0;
int headcount = 0;
@@ -1065,14 +1091,16 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
nlogs += *log_num;
log += *log_num;
}
- heads[headcount].id = cpu_to_vhost32(vq, d);
len = iov_length(vq->iov + seg, in);
- heads[headcount].len = cpu_to_vhost32(vq, len);
- datalen -= len;
+ if (!in_order) {
+ heads[headcount].id = cpu_to_vhost32(vq, d);
+ heads[headcount].len = cpu_to_vhost32(vq, len);
+ }
++headcount;
+ datalen -= len;
seg += in;
}
- heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
+
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
@@ -1082,6 +1110,15 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
r = UIO_MAXIOV + 1;
goto err;
}
+
+ if (!in_order)
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
+ else {
+ heads[0].len = cpu_to_vhost32(vq, len + datalen);
+ heads[0].id = cpu_to_vhost32(vq, d);
+ nheads[0] = headcount;
+ }
+
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
@@ -1094,6 +1131,8 @@ static void handle_rx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_virtqueue *vq = &nvq->vq;
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ unsigned int count = 0;
unsigned in, log;
struct vhost_log *vq_log;
struct msghdr msg = {
@@ -1141,12 +1180,13 @@ static void handle_rx(struct vhost_net *net)
do {
sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
- &busyloop_intr);
+ &busyloop_intr, count);
if (!sock_len)
break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
- headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
+ headcount = get_rx_bufs(nvq, vq->heads + count,
+ vq->nheads + count,
vhost_len, &in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
@@ -1222,8 +1262,11 @@ static void handle_rx(struct vhost_net *net)
goto out;
}
nvq->done_idx += headcount;
- if (nvq->done_idx > VHOST_NET_BATCH)
- vhost_net_signal_used(nvq);
+ count += in_order ? 1 : headcount;
+ if (nvq->done_idx > VHOST_NET_BATCH) {
+ vhost_net_signal_used(nvq, count);
+ count = 0;
+ }
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len,
vq->iov, in);
@@ -1235,7 +1278,7 @@ static void handle_rx(struct vhost_net *net)
else if (!sock_len)
vhost_net_enable_vq(net, vq);
out:
- vhost_net_signal_used(nvq);
+ vhost_net_signal_used(nvq, count);
mutex_unlock(&vq->mutex);
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c12a0d4e6386..abf51332a5c5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -71,7 +71,7 @@ static int vhost_scsi_set_inline_sg_cnt(const char *buf,
if (ret)
return ret;
- if (ret > VHOST_SCSI_PREALLOC_SGLS) {
+ if (cnt > VHOST_SCSI_PREALLOC_SGLS) {
pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS);
return -EINVAL;
}
@@ -152,7 +152,7 @@ struct vhost_scsi_nexus {
struct vhost_scsi_tpg {
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt;
- /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
+ /* Used to track number of TPG Port/Lun Links wrt to explicit I_T Nexus shutdown */
int tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count;
@@ -311,12 +311,12 @@ static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
mutex_lock(&vq->mutex);
- /* store old infight */
+ /* store old inflight */
idx = vs->vqs[i].inflight_idx;
if (old_inflight)
old_inflight[i] = &vs->vqs[i].inflights[idx];
- /* setup new infight */
+ /* setup new inflight */
vs->vqs[i].inflight_idx = idx ^ 1;
new_inflight = &vs->vqs[i].inflights[idx ^ 1];
kref_init(&new_inflight->kref);
@@ -1226,10 +1226,8 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
/* validated at handler entry */
vs_tpg = vhost_vq_get_backend(vq);
tpg = READ_ONCE(vs_tpg[*vc->target]);
- if (unlikely(!tpg)) {
- vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
+ if (unlikely(!tpg))
goto out;
- }
}
if (tpgp)
@@ -1249,7 +1247,7 @@ vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs,
if (!in_iovs_cnt)
return 0;
/*
- * Initiator's normally just put the virtio_scsi_cmd_resp in the first
+ * Initiators normally just put the virtio_scsi_cmd_resp in the first
* iov, but just in case they wedged in some data with it we check for
* greater than or equal to the response struct.
*/
@@ -1457,7 +1455,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
cmd = vhost_scsi_get_cmd(vq, tag);
if (IS_ERR(cmd)) {
ret = PTR_ERR(cmd);
- vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret);
+ vq_err(vq, "vhost_scsi_get_tag failed %d\n", ret);
goto err;
}
cmd->tvc_vq = vq;
@@ -2609,7 +2607,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
return -ENOMEM;
}
/*
- * Since we are running in 'demo mode' this call with generate a
+ * Since we are running in 'demo mode' this call will generate a
* struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
@@ -2915,7 +2913,7 @@ static ssize_t
vhost_scsi_wwn_version_show(struct config_item *item, char *page)
{
return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
- "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
+ " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
}
@@ -2983,13 +2981,13 @@ out_vhost_scsi_deregister:
vhost_scsi_deregister();
out:
return ret;
-};
+}
static void vhost_scsi_exit(void)
{
target_unregister_template(&vhost_scsi_ops);
vhost_scsi_deregister();
-};
+}
MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
MODULE_ALIAS("tcm_vhost");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1094256a943c..8570fdf2e14a 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
+#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/sched/mm.h>
@@ -41,6 +42,13 @@ static int max_iotlb_entries = 2048;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
"Maximum number of iotlb entries. (default: 2048)");
+static bool fork_from_owner_default = VHOST_FORK_OWNER_TASK;
+
+#ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL
+module_param(fork_from_owner_default, bool, 0444);
+MODULE_PARM_DESC(fork_from_owner_default,
+ "Set task mode as the default(default: Y)");
+#endif
enum {
VHOST_MEMORY_F_LOG = 0x1,
@@ -242,7 +250,7 @@ static void vhost_worker_queue(struct vhost_worker *worker,
* test_and_set_bit() implies a memory barrier.
*/
llist_add(&work->node, &worker->work_list);
- vhost_task_wake(worker->vtsk);
+ worker->ops->wakeup(worker);
}
}
@@ -364,6 +372,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
+ vq->next_avail_head = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
@@ -388,6 +397,44 @@ static void vhost_vq_reset(struct vhost_dev *dev,
__vhost_vq_meta_reset(vq);
}
+static int vhost_run_work_kthread_list(void *data)
+{
+ struct vhost_worker *worker = data;
+ struct vhost_work *work, *work_next;
+ struct vhost_dev *dev = worker->dev;
+ struct llist_node *node;
+
+ kthread_use_mm(dev->mm);
+
+ for (;;) {
+ /* mb paired w/ kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ }
+ node = llist_del_all(&worker->work_list);
+ if (!node)
+ schedule();
+
+ node = llist_reverse_order(node);
+ /* make sure flag is seen after deletion */
+ smp_wmb();
+ llist_for_each_entry_safe(work, work_next, node, node) {
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
+ __set_current_state(TASK_RUNNING);
+ kcov_remote_start_common(worker->kcov_handle);
+ work->fn(work);
+ kcov_remote_stop();
+ cond_resched();
+ }
+ }
+ kthread_unuse_mm(dev->mm);
+
+ return 0;
+}
+
static bool vhost_run_work_list(void *data)
{
struct vhost_worker *worker = data;
@@ -455,6 +502,8 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
vq->log = NULL;
kfree(vq->heads);
vq->heads = NULL;
+ kfree(vq->nheads);
+ vq->nheads = NULL;
}
/* Helper to allocate iovec buffers for all vqs. */
@@ -472,7 +521,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
GFP_KERNEL);
vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
GFP_KERNEL);
- if (!vq->indirect || !vq->log || !vq->heads)
+ vq->nheads = kmalloc_array(dev->iov_limit, sizeof(*vq->nheads),
+ GFP_KERNEL);
+ if (!vq->indirect || !vq->log || !vq->heads || !vq->nheads)
goto err_nomem;
}
return 0;
@@ -552,6 +603,7 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->byte_weight = byte_weight;
dev->use_worker = use_worker;
dev->msg_handler = msg_handler;
+ dev->fork_owner = fork_from_owner_default;
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
INIT_LIST_HEAD(&dev->pending_list);
@@ -563,6 +615,7 @@ void vhost_dev_init(struct vhost_dev *dev,
vq->log = NULL;
vq->indirect = NULL;
vq->heads = NULL;
+ vq->nheads = NULL;
vq->dev = dev;
mutex_init(&vq->mutex);
vhost_vq_reset(dev, vq);
@@ -581,6 +634,46 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_task_to_cgroups(struct vhost_worker *worker)
+{
+ struct vhost_attach_cgroups_struct attach;
+ int saved_cnt;
+
+ attach.owner = current;
+
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_worker_queue(worker, &attach.work);
+
+ mutex_lock(&worker->mutex);
+
+ /*
+ * Bypass attachment_cnt check in __vhost_worker_flush:
+ * Temporarily change it to INT_MAX to bypass the check
+ */
+ saved_cnt = worker->attachment_cnt;
+ worker->attachment_cnt = INT_MAX;
+ __vhost_worker_flush(worker);
+ worker->attachment_cnt = saved_cnt;
+
+ mutex_unlock(&worker->mutex);
+
+ return attach.ret;
+}
+
/* Caller should have device mutex */
bool vhost_dev_has_owner(struct vhost_dev *dev)
{
@@ -594,10 +687,10 @@ static void vhost_attach_mm(struct vhost_dev *dev)
if (dev->use_worker) {
dev->mm = get_task_mm(current);
} else {
- /* vDPA device does not use worker thead, so there's
- * no need to hold the address space for mm. This help
+ /* vDPA device does not use worker thread, so there's
+ * no need to hold the address space for mm. This helps
* to avoid deadlock in the case of mmap() which may
- * held the refcnt of the file and depends on release
+ * hold the refcnt of the file and depends on release
* method to remove vma.
*/
dev->mm = current->mm;
@@ -626,7 +719,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev,
WARN_ON(!llist_empty(&worker->work_list));
xa_erase(&dev->worker_xa, worker->id);
- vhost_task_stop(worker->vtsk);
+ worker->ops->stop(worker);
kfree(worker);
}
@@ -649,42 +742,115 @@ static void vhost_workers_free(struct vhost_dev *dev)
xa_destroy(&dev->worker_xa);
}
+static void vhost_task_wakeup(struct vhost_worker *worker)
+{
+ return vhost_task_wake(worker->vtsk);
+}
+
+static void vhost_kthread_wakeup(struct vhost_worker *worker)
+{
+ wake_up_process(worker->kthread_task);
+}
+
+static void vhost_task_do_stop(struct vhost_worker *worker)
+{
+ return vhost_task_stop(worker->vtsk);
+}
+
+static void vhost_kthread_do_stop(struct vhost_worker *worker)
+{
+ kthread_stop(worker->kthread_task);
+}
+
+static int vhost_task_worker_create(struct vhost_worker *worker,
+ struct vhost_dev *dev, const char *name)
+{
+ struct vhost_task *vtsk;
+ u32 id;
+ int ret;
+
+ vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
+ worker, name);
+ if (IS_ERR(vtsk))
+ return PTR_ERR(vtsk);
+
+ worker->vtsk = vtsk;
+ vhost_task_start(vtsk);
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0) {
+ vhost_task_do_stop(worker);
+ return ret;
+ }
+ worker->id = id;
+ return 0;
+}
+
+static int vhost_kthread_worker_create(struct vhost_worker *worker,
+ struct vhost_dev *dev, const char *name)
+{
+ struct task_struct *task;
+ u32 id;
+ int ret;
+
+ task = kthread_create(vhost_run_work_kthread_list, worker, "%s", name);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ worker->kthread_task = task;
+ wake_up_process(task);
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0)
+ goto stop_worker;
+
+ ret = vhost_attach_task_to_cgroups(worker);
+ if (ret)
+ goto stop_worker;
+
+ worker->id = id;
+ return 0;
+
+stop_worker:
+ vhost_kthread_do_stop(worker);
+ return ret;
+}
+
+static const struct vhost_worker_ops kthread_ops = {
+ .create = vhost_kthread_worker_create,
+ .stop = vhost_kthread_do_stop,
+ .wakeup = vhost_kthread_wakeup,
+};
+
+static const struct vhost_worker_ops vhost_task_ops = {
+ .create = vhost_task_worker_create,
+ .stop = vhost_task_do_stop,
+ .wakeup = vhost_task_wakeup,
+};
+
static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
{
struct vhost_worker *worker;
- struct vhost_task *vtsk;
char name[TASK_COMM_LEN];
int ret;
- u32 id;
+ const struct vhost_worker_ops *ops = dev->fork_owner ? &vhost_task_ops :
+ &kthread_ops;
worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
if (!worker)
return NULL;
worker->dev = dev;
+ worker->ops = ops;
snprintf(name, sizeof(name), "vhost-%d", current->pid);
- vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
- worker, name);
- if (IS_ERR(vtsk))
- goto free_worker;
-
mutex_init(&worker->mutex);
init_llist_head(&worker->work_list);
worker->kcov_handle = kcov_common_handle();
- worker->vtsk = vtsk;
-
- vhost_task_start(vtsk);
-
- ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ ret = ops->create(worker, dev, name);
if (ret < 0)
- goto stop_worker;
- worker->id = id;
+ goto free_worker;
return worker;
-stop_worker:
- vhost_task_stop(vtsk);
free_worker:
kfree(worker);
return NULL;
@@ -731,7 +897,7 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
* We don't want to call synchronize_rcu for every vq during setup
* because it will slow down VM startup. If we haven't done
* VHOST_SET_VRING_KICK and not done the driver specific
- * SET_ENDPOINT/RUNNUNG then we can skip the sync since there will
+ * SET_ENDPOINT/RUNNING then we can skip the sync since there will
* not be any works queued for scsi and net.
*/
mutex_lock(&vq->mutex);
@@ -865,6 +1031,14 @@ long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
switch (ioctl) {
/* dev worker ioctls */
case VHOST_NEW_WORKER:
+ /*
+ * vhost_tasks will account for worker threads under the parent's
+ * NPROC value but kthreads do not. To avoid userspace overflowing
+ * the system with worker threads fork_owner must be true.
+ */
+ if (!dev->fork_owner)
+ return -EFAULT;
+
ret = vhost_new_worker(dev, &state);
if (!ret && copy_to_user(argp, &state, sizeof(state)))
ret = -EFAULT;
@@ -982,6 +1156,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
vhost_dev_cleanup(dev);
+ dev->fork_owner = fork_from_owner_default;
dev->umem = umem;
/* We don't need VQ locks below since vhost_dev_cleanup makes sure
* VQs aren't running.
@@ -1990,14 +2165,15 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
break;
}
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
- vq->last_avail_idx = s.num & 0xffff;
+ vq->next_avail_head = vq->last_avail_idx =
+ s.num & 0xffff;
vq->last_used_idx = (s.num >> 16) & 0xffff;
} else {
if (s.num > 0xffff) {
r = -EINVAL;
break;
}
- vq->last_avail_idx = s.num;
+ vq->next_avail_head = vq->last_avail_idx = s.num;
}
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
@@ -2135,6 +2311,45 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
goto done;
}
+#ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL
+ if (ioctl == VHOST_SET_FORK_FROM_OWNER) {
+ /* Only allow modification before owner is set */
+ if (vhost_dev_has_owner(d)) {
+ r = -EBUSY;
+ goto done;
+ }
+ u8 fork_owner_val;
+
+ if (get_user(fork_owner_val, (u8 __user *)argp)) {
+ r = -EFAULT;
+ goto done;
+ }
+ if (fork_owner_val != VHOST_FORK_OWNER_TASK &&
+ fork_owner_val != VHOST_FORK_OWNER_KTHREAD) {
+ r = -EINVAL;
+ goto done;
+ }
+ d->fork_owner = !!fork_owner_val;
+ r = 0;
+ goto done;
+ }
+ if (ioctl == VHOST_GET_FORK_FROM_OWNER) {
+ u8 fork_owner_val = d->fork_owner;
+
+ if (fork_owner_val != VHOST_FORK_OWNER_TASK &&
+ fork_owner_val != VHOST_FORK_OWNER_KTHREAD) {
+ r = -EINVAL;
+ goto done;
+ }
+ if (put_user(fork_owner_val, (u8 __user *)argp)) {
+ r = -EFAULT;
+ goto done;
+ }
+ r = 0;
+ goto done;
+ }
+#endif
+
/* You must be the owner to do anything else */
r = vhost_dev_check_owner(d);
if (r)
@@ -2590,11 +2805,12 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
{
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
struct vring_desc desc;
unsigned int i, head, found = 0;
u16 last_avail_idx = vq->last_avail_idx;
__virtio16 ring_head;
- int ret, access;
+ int ret, access, c = 0;
if (vq->avail_idx == vq->last_avail_idx) {
ret = vhost_get_avail_idx(vq);
@@ -2605,17 +2821,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
return vq->num;
}
- /* Grab the next descriptor number they're advertising, and increment
- * the index we've seen. */
- if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
- vq_err(vq, "Failed to read head: idx %d address %p\n",
- last_avail_idx,
- &vq->avail->ring[last_avail_idx % vq->num]);
- return -EFAULT;
+ if (in_order)
+ head = vq->next_avail_head & (vq->num - 1);
+ else {
+ /* Grab the next descriptor number they're
+ * advertising, and increment the index we've seen. */
+ if (unlikely(vhost_get_avail_head(vq, &ring_head,
+ last_avail_idx))) {
+ vq_err(vq, "Failed to read head: idx %d address %p\n",
+ last_avail_idx,
+ &vq->avail->ring[last_avail_idx % vq->num]);
+ return -EFAULT;
+ }
+ head = vhost16_to_cpu(vq, ring_head);
}
- head = vhost16_to_cpu(vq, ring_head);
-
/* If their number is silly, that's an error. */
if (unlikely(head >= vq->num)) {
vq_err(vq, "Guest says index %u > %u is available",
@@ -2658,6 +2878,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
"in indirect descriptor at idx %d\n", i);
return ret;
}
+ ++c;
continue;
}
@@ -2693,10 +2914,12 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
}
*out_num += ret;
}
+ ++c;
} while ((i = next_desc(vq, &desc)) != -1);
/* On success, increment avail index. */
vq->last_avail_idx++;
+ vq->next_avail_head += c;
/* Assume notifications from guest are disabled at this point,
* if they aren't we would need to update avail_event index. */
@@ -2720,8 +2943,9 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
cpu_to_vhost32(vq, head),
cpu_to_vhost32(vq, len)
};
+ u16 nheads = 1;
- return vhost_add_used_n(vq, &heads, 1);
+ return vhost_add_used_n(vq, &heads, &nheads, 1);
}
EXPORT_SYMBOL_GPL(vhost_add_used);
@@ -2757,10 +2981,9 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
return 0;
}
-/* After we've used one of their buffers, we tell them about it. We'll then
- * want to notify the guest, using eventfd. */
-int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
- unsigned count)
+static int vhost_add_used_n_ooo(struct vhost_virtqueue *vq,
+ struct vring_used_elem *heads,
+ unsigned count)
{
int start, n, r;
@@ -2773,7 +2996,72 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
heads += n;
count -= n;
}
- r = __vhost_add_used_n(vq, heads, count);
+ return __vhost_add_used_n(vq, heads, count);
+}
+
+static int vhost_add_used_n_in_order(struct vhost_virtqueue *vq,
+ struct vring_used_elem *heads,
+ const u16 *nheads,
+ unsigned count)
+{
+ vring_used_elem_t __user *used;
+ u16 old, new = vq->last_used_idx;
+ int start, i;
+
+ if (!nheads)
+ return -EINVAL;
+
+ start = vq->last_used_idx & (vq->num - 1);
+ used = vq->used->ring + start;
+
+ for (i = 0; i < count; i++) {
+ if (vhost_put_used(vq, &heads[i], start, 1)) {
+ vq_err(vq, "Failed to write used");
+ return -EFAULT;
+ }
+ start += nheads[i];
+ new += nheads[i];
+ if (start >= vq->num)
+ start -= vq->num;
+ }
+
+ if (unlikely(vq->log_used)) {
+ /* Make sure data is seen before log. */
+ smp_wmb();
+ /* Log used ring entry write. */
+ log_used(vq, ((void __user *)used - (void __user *)vq->used),
+ (vq->num - start) * sizeof *used);
+ if (start + count > vq->num)
+ log_used(vq, 0,
+ (start + count - vq->num) * sizeof *used);
+ }
+
+ old = vq->last_used_idx;
+ vq->last_used_idx = new;
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
+ vq->signalled_used_valid = false;
+ return 0;
+}
+
+/* After we've used one of their buffers, we tell them about it. We'll then
+ * want to notify the guest, using eventfd. */
+int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
+ u16 *nheads, unsigned count)
+{
+ bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
+ int r;
+
+ if (!in_order || !nheads)
+ r = vhost_add_used_n_ooo(vq, heads, count);
+ else
+ r = vhost_add_used_n_in_order(vq, heads, nheads, count);
+
+ if (r < 0)
+ return r;
/* Make sure buffer is written before we update index. */
smp_wmb();
@@ -2853,14 +3141,16 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
/* multi-buffer version of vhost_add_used_and_signal */
void vhost_add_used_and_signal_n(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
- struct vring_used_elem *heads, unsigned count)
+ struct vring_used_elem *heads,
+ u16 *nheads,
+ unsigned count)
{
- vhost_add_used_n(vq, heads, count);
+ vhost_add_used_n(vq, heads, nheads, count);
vhost_signal(dev, vq);
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
-/* return true if we're sure that avaiable ring is empty */
+/* return true if we're sure that available ring is empty */
bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
int r;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d1aed35c4b07..621a6d9a8791 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -26,7 +26,18 @@ struct vhost_work {
unsigned long flags;
};
+struct vhost_worker;
+struct vhost_dev;
+
+struct vhost_worker_ops {
+ int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
+ const char *name);
+ void (*stop)(struct vhost_worker *worker);
+ void (*wakeup)(struct vhost_worker *worker);
+};
+
struct vhost_worker {
+ struct task_struct *kthread_task;
struct vhost_task *vtsk;
struct vhost_dev *dev;
/* Used to serialize device wide flushing with worker swapping. */
@@ -36,6 +47,7 @@ struct vhost_worker {
u32 id;
int attachment_cnt;
bool killed;
+ const struct vhost_worker_ops *ops;
};
/* Poll a file (eventfd or socket) */
@@ -103,6 +115,8 @@ struct vhost_virtqueue {
* Values are limited to 0x7fff, and the high bit is used as
* a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_avail_idx;
+ /* Next avail ring head when VIRTIO_F_IN_ORDER is negoitated */
+ u16 next_avail_head;
/* Caches available index value from user. */
u16 avail_idx;
@@ -129,6 +143,7 @@ struct vhost_virtqueue {
struct iovec iotlb_iov[64];
struct iovec *indirect;
struct vring_used_elem *heads;
+ u16 *nheads;
/* Protected by virtqueue mutex. */
struct vhost_iotlb *umem;
struct vhost_iotlb *iotlb;
@@ -176,6 +191,16 @@ struct vhost_dev {
int byte_weight;
struct xarray worker_xa;
bool use_worker;
+ /*
+ * If fork_owner is true we use vhost_tasks to create
+ * the worker so all settings/limits like cgroups, NPROC,
+ * scheduler, etc are inherited from the owner. If false,
+ * we use kthreads and only attach to the same cgroups
+ * as the owner for compat with older kernels.
+ * here we use true as default value.
+ * The default value is set by fork_from_owner_default
+ */
+ bool fork_owner;
int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg);
};
@@ -213,11 +238,12 @@ bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
int vhost_vq_init_access(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
- unsigned count);
+ u16 *nheads, unsigned count);
void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
unsigned int id, int len);
void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
- struct vring_used_elem *heads, unsigned count);
+ struct vring_used_elem *heads, u16 *nheads,
+ unsigned count);
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index bbce65452701..9f27c3f6091b 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -780,22 +780,6 @@ ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
EXPORT_SYMBOL(vringh_iov_push_user);
/**
- * vringh_abandon_user - we've decided not to handle the descriptor(s).
- * @vrh: the vring.
- * @num: the number of descriptors to put back (ie. num
- * vringh_get_user() to undo).
- *
- * The next vringh_get_user() will return the old descriptor(s) again.
- */
-void vringh_abandon_user(struct vringh *vrh, unsigned int num)
-{
- /* We only update vring_avail_event(vr) when we want to be notified,
- * so we haven't changed that yet. */
- vrh->last_avail_idx -= num;
-}
-EXPORT_SYMBOL(vringh_abandon_user);
-
-/**
* vringh_complete_user - we've finished with descriptor, publish it.
* @vrh: the vring.
* @head: the head as filled in by vringh_getdesc_user.
@@ -900,20 +884,6 @@ static inline int putused_kern(const struct vringh *vrh,
return 0;
}
-static inline int xfer_kern(const struct vringh *vrh, void *src,
- void *dst, size_t len)
-{
- memcpy(dst, src, len);
- return 0;
-}
-
-static inline int kern_xfer(const struct vringh *vrh, void *dst,
- void *src, size_t len)
-{
- memcpy(dst, src, len);
- return 0;
-}
-
/**
* vringh_init_kern - initialize a vringh for a kernelspace vring.
* @vrh: the vringh to initialize.
@@ -999,51 +969,6 @@ int vringh_getdesc_kern(struct vringh *vrh,
EXPORT_SYMBOL(vringh_getdesc_kern);
/**
- * vringh_iov_pull_kern - copy bytes from vring_iov.
- * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
- * @dst: the place to copy.
- * @len: the maximum length to copy.
- *
- * Returns the bytes copied <= len or a negative errno.
- */
-ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
-{
- return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
-}
-EXPORT_SYMBOL(vringh_iov_pull_kern);
-
-/**
- * vringh_iov_push_kern - copy bytes into vring_iov.
- * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
- * @src: the place to copy from.
- * @len: the maximum length to copy.
- *
- * Returns the bytes copied <= len or a negative errno.
- */
-ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
- const void *src, size_t len)
-{
- return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
-}
-EXPORT_SYMBOL(vringh_iov_push_kern);
-
-/**
- * vringh_abandon_kern - we've decided not to handle the descriptor(s).
- * @vrh: the vring.
- * @num: the number of descriptors to put back (ie. num
- * vringh_get_kern() to undo).
- *
- * The next vringh_get_kern() will return the old descriptor(s) again.
- */
-void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
-{
- /* We only update vring_avail_event(vr) when we want to be notified,
- * so we haven't changed that yet. */
- vrh->last_avail_idx -= num;
-}
-EXPORT_SYMBOL(vringh_abandon_kern);
-
-/**
* vringh_complete_kern - we've finished with descriptor, publish it.
* @vrh: the vring.
* @head: the head as filled in by vringh_getdesc_kern.
@@ -1535,23 +1460,6 @@ ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
EXPORT_SYMBOL(vringh_iov_push_iotlb);
/**
- * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
- * @vrh: the vring.
- * @num: the number of descriptors to put back (ie. num
- * vringh_get_iotlb() to undo).
- *
- * The next vringh_get_iotlb() will return the old descriptor(s) again.
- */
-void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
-{
- /* We only update vring_avail_event(vr) when we want to be notified,
- * so we haven't changed that yet.
- */
- vrh->last_avail_idx -= num;
-}
-EXPORT_SYMBOL(vringh_abandon_iotlb);
-
-/**
* vringh_complete_iotlb - we've finished with descriptor, publish it.
* @vrh: the vring.
* @head: the head as filled in by vringh_getdesc_iotlb.
@@ -1572,32 +1480,6 @@ int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
EXPORT_SYMBOL(vringh_complete_iotlb);
/**
- * vringh_notify_enable_iotlb - we want to know if something changes.
- * @vrh: the vring.
- *
- * This always enables notifications, but returns false if there are
- * now more buffers available in the vring.
- */
-bool vringh_notify_enable_iotlb(struct vringh *vrh)
-{
- return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
-}
-EXPORT_SYMBOL(vringh_notify_enable_iotlb);
-
-/**
- * vringh_notify_disable_iotlb - don't tell us if something changes.
- * @vrh: the vring.
- *
- * This is our normal running state: we disable and then only enable when
- * we're going to sleep.
- */
-void vringh_notify_disable_iotlb(struct vringh *vrh)
-{
- __vringh_notify_disable(vrh, putu16_iotlb);
-}
-EXPORT_SYMBOL(vringh_notify_disable_iotlb);
-
-/**
* vringh_need_notify_iotlb - must we tell the other side about used buffers?
* @vrh: the vring we've called vringh_complete_iotlb() on.
*
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 802153e23073..ae01457ea2cd 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -344,6 +344,10 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
len = iov_length(vq->iov, out);
+ if (len < VIRTIO_VSOCK_SKB_HEADROOM ||
+ len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM)
+ return NULL;
+
/* len contains both payload and hdr */
skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
if (!skb)
@@ -367,18 +371,15 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
return skb;
/* The pkt is too big or the length in the header is invalid */
- if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
- payload_len + sizeof(*hdr) > len) {
+ if (payload_len + sizeof(*hdr) > len) {
kfree_skb(skb);
return NULL;
}
- virtio_vsock_skb_rx_put(skb);
+ virtio_vsock_skb_put(skb, payload_len);
- nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
- if (nbytes != payload_len) {
- vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
- payload_len, nbytes);
+ if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) {
+ vq_err(vq, "Failed to copy %zu byte payload\n", payload_len);
kfree_skb(skb);
return NULL;
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index f9cdbf8c53e3..37bd18730fe0 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
c->vc_screenbuf_size - delta);
c->vc_origin = vga_vram_end - c->vc_screenbuf_size;
vga_rolled_over = 0;
- } else if (oldo - delta >= (unsigned long)c->vc_screenbuf)
+ } else
c->vc_origin -= delta;
c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char,
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 55c6686f091e..c21484d15f0c 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -660,7 +660,7 @@ config FB_ATMEL
config FB_NVIDIA
tristate "nVidia Framebuffer Support"
- depends on FB && PCI
+ depends on FB && PCI && HAS_IOPORT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index b38c3b776bce..006638eefa41 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -16,7 +16,7 @@ config FB_DEVICE
default FB
help
Say Y here if you want the legacy /dev/fb* device file and
- interfaces within sysfs anc procfs. It is only required if you
+ interfaces within sysfs and procfs. It is only required if you
have userspace programs that depend on fbdev for graphics output.
This does not affect the framebuffer console. If unsure, say N.
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index d8eab4859fd4..55f5731e94c3 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -135,9 +135,9 @@ static int logo_shown = FBCON_LOGO_CANSHOW;
/* console mappings */
static unsigned int first_fb_vc;
static unsigned int last_fb_vc = MAX_NR_CONSOLES - 1;
-static int fbcon_is_default = 1;
+static bool fbcon_is_default = true;
static int primary_device = -1;
-static int fbcon_has_console_bind;
+static bool fbcon_has_console_bind;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
static int map_override;
@@ -172,7 +172,7 @@ static const struct consw fb_con;
#define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
-static int fbcon_cursor_noblink;
+static bool fbcon_cursor_blink = true;
#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1)
@@ -289,16 +289,16 @@ static bool fbcon_skip_panic(struct fb_info *info)
#endif
}
-static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+static inline bool fbcon_is_active(struct vc_data *vc, struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- return (info->state != FBINFO_STATE_RUNNING ||
- vc->vc_mode != KD_TEXT || ops->graphics || fbcon_skip_panic(info));
+ return info->state == FBINFO_STATE_RUNNING &&
+ vc->vc_mode == KD_TEXT && !ops->graphics && !fbcon_skip_panic(info);
}
static int get_color(struct vc_data *vc, struct fb_info *info,
- u16 c, int is_fg)
+ u16 c, bool is_fg)
{
int depth = fb_get_color_depth(&info->var, &info->fix);
int color = 0;
@@ -364,6 +364,16 @@ static int get_color(struct vc_data *vc, struct fb_info *info,
return color;
}
+static int get_fg_color(struct vc_data *vc, struct fb_info *info, u16 c)
+{
+ return get_color(vc, info, c, true);
+}
+
+static int get_bg_color(struct vc_data *vc, struct fb_info *info, u16 c)
+{
+ return get_color(vc, info, c, false);
+}
+
static void fb_flashcursor(struct work_struct *work)
{
struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work);
@@ -395,8 +405,9 @@ static void fb_flashcursor(struct work_struct *work)
c = scr_readw((u16 *) vc->vc_pos);
enable = ops->cursor_flash && !ops->cursor_state.enable;
- ops->cursor(vc, info, enable, get_color(vc, info, c, 1),
- get_color(vc, info, c, 0));
+ ops->cursor(vc, info, enable,
+ get_fg_color(vc, info, c),
+ get_bg_color(vc, info, c));
console_unlock();
queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
@@ -407,7 +418,7 @@ static void fbcon_add_cursor_work(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- if (!fbcon_cursor_noblink)
+ if (fbcon_cursor_blink)
queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
ops->cur_blink_jiffies);
}
@@ -464,7 +475,7 @@ static int __init fb_console_setup(char *this_opt)
last_fb_vc = simple_strtoul(options, &options, 10) - 1;
if (last_fb_vc < first_fb_vc || last_fb_vc >= MAX_NR_CONSOLES)
last_fb_vc = MAX_NR_CONSOLES - 1;
- fbcon_is_default = 0;
+ fbcon_is_default = false;
continue;
}
@@ -559,7 +570,7 @@ static int do_fbcon_takeover(int show_logo)
con2fb_map[i] = -1;
info_idx = -1;
} else {
- fbcon_has_console_bind = 1;
+ fbcon_has_console_bind = true;
}
return err;
@@ -826,7 +837,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
fg_vc->vc_rows);
}
- update_screen(vc_cons[fg_console].d);
+ if (fg_console != unit)
+ update_screen(vc_cons[fg_console].d);
}
/**
@@ -1267,7 +1279,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
struct fbcon_display *p = &fb_display[vc->vc_num];
u_int y_break;
- if (fbcon_is_inactive(vc, info))
+ if (!fbcon_is_active(vc, info))
return;
if (!height || !width)
@@ -1311,10 +1323,10 @@ static void fbcon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
- if (!fbcon_is_inactive(vc, info))
+ if (fbcon_is_active(vc, info))
ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
- get_color(vc, info, scr_readw(s), 1),
- get_color(vc, info, scr_readw(s), 0));
+ get_fg_color(vc, info, scr_readw(s)),
+ get_bg_color(vc, info, scr_readw(s)));
}
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
@@ -1322,7 +1334,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
- if (!fbcon_is_inactive(vc, info))
+ if (fbcon_is_active(vc, info))
ops->clear_margins(vc, info, margin_color, bottom_only);
}
@@ -1334,7 +1346,7 @@ static void fbcon_cursor(struct vc_data *vc, bool enable)
ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
- if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
+ if (!fbcon_is_active(vc, info) || vc->vc_deccm != 1)
return;
if (vc->vc_cursor_type & CUR_SW)
@@ -1347,8 +1359,9 @@ static void fbcon_cursor(struct vc_data *vc, bool enable)
if (!ops->cursor)
return;
- ops->cursor(vc, info, enable, get_color(vc, info, c, 1),
- get_color(vc, info, c, 0));
+ ops->cursor(vc, info, enable,
+ get_fg_color(vc, info, c),
+ get_bg_color(vc, info, c));
}
static int scrollback_phys_max = 0;
@@ -1363,6 +1376,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
struct vc_data *svc;
struct fbcon_ops *ops = info->fbcon_par;
int rows, cols;
+ unsigned long ret = 0;
p = &fb_display[unit];
@@ -1413,11 +1427,10 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
cols /= vc->vc_font.width;
rows /= vc->vc_font.height;
- vc_resize(vc, cols, rows);
+ ret = vc_resize(vc, cols, rows);
- if (con_is_visible(vc)) {
+ if (con_is_visible(vc) && !ret)
update_screen(vc);
- }
}
static __inline__ void ywrap_up(struct vc_data *vc, int count)
@@ -1740,7 +1753,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
- if (fbcon_is_inactive(vc, info))
+ if (!fbcon_is_active(vc, info))
return;
if (!width || !height)
@@ -1764,7 +1777,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
struct fbcon_display *p = &fb_display[vc->vc_num];
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
- if (fbcon_is_inactive(vc, info))
+ if (!fbcon_is_active(vc, info))
return true;
fbcon_cursor(vc, false);
@@ -2148,7 +2161,7 @@ static bool fbcon_switch(struct vc_data *vc)
fbcon_del_cursor_work(old_info);
}
- if (fbcon_is_inactive(vc, info) ||
+ if (!fbcon_is_active(vc, info) ||
ops->blank_state != FB_BLANK_UNBLANK)
fbcon_del_cursor_work(info);
else
@@ -2188,7 +2201,7 @@ static bool fbcon_switch(struct vc_data *vc)
scrollback_max = 0;
scrollback_current = 0;
- if (!fbcon_is_inactive(vc, info)) {
+ if (fbcon_is_active(vc, info)) {
ops->var.xoffset = ops->var.yoffset = p->yscroll = 0;
ops->update_start(info);
}
@@ -2244,7 +2257,7 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
}
}
- if (!fbcon_is_inactive(vc, info)) {
+ if (fbcon_is_active(vc, info)) {
if (ops->blank_state != blank) {
ops->blank_state = blank;
fbcon_cursor(vc, !blank);
@@ -2258,7 +2271,7 @@ static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
update_screen(vc);
}
- if (mode_switch || fbcon_is_inactive(vc, info) ||
+ if (mode_switch || !fbcon_is_active(vc, info) ||
ops->blank_state != FB_BLANK_UNBLANK)
fbcon_del_cursor_work(info);
else
@@ -2588,7 +2601,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
int i, j, k, depth;
u8 val;
- if (fbcon_is_inactive(vc, info))
+ if (!fbcon_is_active(vc, info))
return;
if (!con_is_visible(vc))
@@ -2688,7 +2701,7 @@ static void fbcon_modechanged(struct fb_info *info)
scrollback_max = 0;
scrollback_current = 0;
- if (!fbcon_is_inactive(vc, info)) {
+ if (fbcon_is_active(vc, info)) {
ops->var.xoffset = ops->var.yoffset = p->yscroll = 0;
ops->update_start(info);
}
@@ -2806,7 +2819,7 @@ static void fbcon_unbind(void)
fbcon_is_default);
if (!ret)
- fbcon_has_console_bind = 0;
+ fbcon_has_console_bind = false;
}
#else
static inline void fbcon_unbind(void) {}
@@ -3257,8 +3270,9 @@ static ssize_t cursor_blink_store(struct device *device,
const char *buf, size_t count)
{
struct fb_info *info;
- int blink, idx;
char **last = NULL;
+ bool blink;
+ int idx;
console_lock();
idx = con2fb_map[fg_console];
@@ -3274,10 +3288,10 @@ static ssize_t cursor_blink_store(struct device *device,
blink = simple_strtoul(buf, last, 0);
if (blink) {
- fbcon_cursor_noblink = 0;
+ fbcon_cursor_blink = true;
fbcon_add_cursor_work(info);
} else {
- fbcon_cursor_noblink = 1;
+ fbcon_cursor_blink = false;
fbcon_del_cursor_work(info);
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index dfcf5e4d1d4c..53f1719b1ae1 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -449,6 +449,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (!registered_fb[i])
break;
+ if (i >= FB_MAX)
+ return -ENXIO;
+
if (!fb_info->modelist.prev || !fb_info->modelist.next)
INIT_LIST_HEAD(&fb_info->modelist);
diff --git a/drivers/video/fbdev/core/svgalib.c b/drivers/video/fbdev/core/svgalib.c
index d6053af749f6..0e0ce4e024d9 100644
--- a/drivers/video/fbdev/core/svgalib.c
+++ b/drivers/video/fbdev/core/svgalib.c
@@ -20,7 +20,6 @@
#include <asm/types.h>
#include <asm/io.h>
-
/* Write a CRT register value spread across multiple registers */
void svga_wcrt_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value)
{
@@ -32,12 +31,13 @@ void svga_wcrt_multi(void __iomem *regbase, const struct vga_regset *regset, u32
while (bitnum <= regset->highbit) {
bitval = 1 << bitnum;
regval = regval & ~bitval;
- if (value & 1) regval = regval | bitval;
- bitnum ++;
+ if (value & 1)
+ regval = regval | bitval;
+ bitnum++;
value = value >> 1;
}
vga_wcrt(regbase, regset->regnum, regval);
- regset ++;
+ regset++;
}
}
@@ -52,12 +52,13 @@ void svga_wseq_multi(void __iomem *regbase, const struct vga_regset *regset, u32
while (bitnum <= regset->highbit) {
bitval = 1 << bitnum;
regval = regval & ~bitval;
- if (value & 1) regval = regval | bitval;
- bitnum ++;
+ if (value & 1)
+ regval = regval | bitval;
+ bitnum++;
value = value >> 1;
}
vga_wseq(regbase, regset->regnum, regval);
- regset ++;
+ regset++;
}
}
@@ -67,15 +68,13 @@ static unsigned int svga_regset_size(const struct vga_regset *regset)
while (regset->regnum != VGA_REGSET_END_VAL) {
count += regset->highbit - regset->lowbit + 1;
- regset ++;
+ regset++;
}
return 1 << count;
}
-
/* ------------------------------------------------------------------------- */
-
/* Set graphics controller registers to sane values */
void svga_set_default_gfx_regs(void __iomem *regbase)
{
@@ -103,7 +102,7 @@ void svga_set_default_atc_regs(void __iomem *regbase)
vga_w(regbase, VGA_ATT_W, 0x00);
/* All standard ATC registers (AR00 - AR14) */
- for (count = 0; count <= 0xF; count ++)
+ for (count = 0; count <= 0xF; count++)
svga_wattr(regbase, count, count);
svga_wattr(regbase, VGA_ATC_MODE, 0x01);
@@ -188,10 +187,8 @@ void svga_dump_var(struct fb_var_screeninfo *var, int node)
}
#endif /* 0 */
-
/* ------------------------------------------------------------------------- */
-
void svga_settile(struct fb_info *info, struct fb_tilemap *map)
{
const u8 *font = map->data;
@@ -230,7 +227,7 @@ void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area)
((area->sy == area->dy) && (area->sx > area->dx))) {
src = fb + area->sx * colstride + area->sy * rowstride;
dst = fb + area->dx * colstride + area->dy * rowstride;
- } else {
+ } else {
src = fb + (area->sx + area->width - 1) * colstride
+ (area->sy + area->height - 1) * rowstride;
dst = fb + (area->dx + area->width - 1) * colstride
@@ -238,7 +235,7 @@ void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area)
colstride = -colstride;
rowstride = -rowstride;
- }
+ }
for (dy = 0; dy < area->height; dy++) {
u16 __iomem *src2 = src;
@@ -285,19 +282,19 @@ void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit)
u8 __iomem *fb = (u8 __iomem *)info->screen_base;
fb += blit->sx * colstride + blit->sy * rowstride;
- i=0;
- for (dy=0; dy < blit->height; dy ++) {
+ i = 0;
+ for (dy = 0; dy < blit->height; dy++) {
u8 __iomem *fb2 = fb;
- for (dx = 0; dx < blit->width; dx ++) {
+ for (dx = 0; dx < blit->width; dx++) {
fb_writeb(blit->indices[i], fb2);
fb_writeb(attr, fb2 + 1);
fb2 += colstride;
- i ++;
- if (i == blit->length) return;
+ i++;
+ if (i == blit->length)
+ return;
}
fb += rowstride;
}
-
}
/* Set cursor in text (tileblit) mode */
@@ -309,15 +306,15 @@ void svga_tilecursor(void __iomem *regbase, struct fb_info *info, struct fb_tile
+ (cursor->sy + (info->var.yoffset / 16))
* (info->var.xres_virtual / 8);
- if (! cursor -> mode)
+ if (!cursor->mode)
return;
svga_wcrt_mask(regbase, 0x0A, 0x20, 0x20); /* disable cursor */
- if (cursor -> shape == FB_TILE_CURSOR_NONE)
+ if (cursor->shape == FB_TILE_CURSOR_NONE)
return;
- switch (cursor -> shape) {
+ switch (cursor->shape) {
case FB_TILE_CURSOR_UNDERLINE:
cs = 0x0d;
break;
@@ -375,7 +372,6 @@ EXPORT_SYMBOL(svga_get_caps);
/* ------------------------------------------------------------------------- */
-
/*
* Compute PLL settings (M, N, R)
* F_VCO = (F_BASE * M) / N
@@ -386,7 +382,7 @@ int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u
u16 am, an, ar;
u32 f_vco, f_current, delta_current, delta_best;
- pr_debug("fb%d: ideal frequency: %d kHz\n", node, (unsigned int) f_wanted);
+ pr_debug("fb%d: ideal frequency: %d kHz\n", node, (unsigned int)f_wanted);
ar = pll->r_max;
f_vco = f_wanted << ar;
@@ -417,7 +413,7 @@ int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u
while ((am <= pll->m_max) && (an <= pll->n_max)) {
f_current = (pll->f_base * am) / an;
- delta_current = abs_diff (f_current, f_vco);
+ delta_current = abs_diff(f_current, f_vco);
if (delta_current < delta_best) {
delta_best = delta_current;
@@ -425,58 +421,55 @@ int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u
*n = an;
}
- if (f_current <= f_vco) {
- am ++;
- } else {
- an ++;
- }
+ if (f_current <= f_vco)
+ am++;
+ else
+ an++;
}
f_current = (pll->f_base * *m) / *n;
- pr_debug("fb%d: found frequency: %d kHz (VCO %d kHz)\n", node, (int) (f_current >> ar), (int) f_current);
- pr_debug("fb%d: m = %d n = %d r = %d\n", node, (unsigned int) *m, (unsigned int) *n, (unsigned int) *r);
+ pr_debug("fb%d: found frequency: %d kHz (VCO %d kHz)\n", node, (int)(f_current >> ar), (int)f_current);
+ pr_debug("fb%d: m = %d n = %d r = %d\n", node, (unsigned int)*m, (unsigned int)*n, (unsigned int)*r);
return 0;
}
-
/* ------------------------------------------------------------------------- */
-
/* Check CRT timing values */
int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node)
{
u32 value;
- var->xres = (var->xres+7)&~7;
- var->left_margin = (var->left_margin+7)&~7;
- var->right_margin = (var->right_margin+7)&~7;
- var->hsync_len = (var->hsync_len+7)&~7;
+ var->xres = (var->xres + 7) & ~7;
+ var->left_margin = (var->left_margin + 7) & ~7;
+ var->right_margin = (var->right_margin + 7) & ~7;
+ var->hsync_len = (var->hsync_len + 7) & ~7;
/* Check horizontal total */
value = var->xres + var->left_margin + var->right_margin + var->hsync_len;
- if (((value / 8) - 5) >= svga_regset_size (tm->h_total_regs))
+ if (((value / 8) - 5) >= svga_regset_size(tm->h_total_regs))
return -EINVAL;
/* Check horizontal display and blank start */
value = var->xres;
- if (((value / 8) - 1) >= svga_regset_size (tm->h_display_regs))
+ if (((value / 8) - 1) >= svga_regset_size(tm->h_display_regs))
return -EINVAL;
- if (((value / 8) - 1) >= svga_regset_size (tm->h_blank_start_regs))
+ if (((value / 8) - 1) >= svga_regset_size(tm->h_blank_start_regs))
return -EINVAL;
/* Check horizontal sync start */
value = var->xres + var->right_margin;
- if (((value / 8) - 1) >= svga_regset_size (tm->h_sync_start_regs))
+ if (((value / 8) - 1) >= svga_regset_size(tm->h_sync_start_regs))
return -EINVAL;
/* Check horizontal blank end (or length) */
value = var->left_margin + var->right_margin + var->hsync_len;
- if ((value == 0) || ((value / 8) >= svga_regset_size (tm->h_blank_end_regs)))
+ if ((value == 0) || ((value / 8) >= svga_regset_size(tm->h_blank_end_regs)))
return -EINVAL;
/* Check horizontal sync end (or length) */
value = var->hsync_len;
- if ((value == 0) || ((value / 8) >= svga_regset_size (tm->h_sync_end_regs)))
+ if ((value == 0) || ((value / 8) >= svga_regset_size(tm->h_sync_end_regs)))
return -EINVAL;
/* Check vertical total */
@@ -498,12 +491,12 @@ int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screenin
/* Check vertical blank end (or length) */
value = var->upper_margin + var->lower_margin + var->vsync_len;
- if ((value == 0) || (value >= svga_regset_size (tm->v_blank_end_regs)))
+ if ((value == 0) || (value >= svga_regset_size(tm->v_blank_end_regs)))
return -EINVAL;
/* Check vertical sync end (or length) */
value = var->vsync_len;
- if ((value == 0) || (value >= svga_regset_size (tm->v_sync_end_regs)))
+ if ((value == 0) || (value >= svga_regset_size(tm->v_sync_end_regs)))
return -EINVAL;
return 0;
@@ -597,18 +590,15 @@ void svga_set_timings(void __iomem *regbase, const struct svga_timing_regs *tm,
vga_w(regbase, VGA_MIS_W, regval);
}
-
/* ------------------------------------------------------------------------- */
-
static inline int match_format(const struct svga_fb_format *frm,
struct fb_var_screeninfo *var)
{
int i = 0;
int stored = -EINVAL;
- while (frm->bits_per_pixel != SVGA_FORMAT_END_VAL)
- {
+ while (frm->bits_per_pixel != SVGA_FORMAT_END_VAL) {
if ((var->bits_per_pixel == frm->bits_per_pixel) &&
(var->red.length <= frm->red.length) &&
(var->green.length <= frm->green.length) &&
@@ -648,7 +638,6 @@ int svga_match_format(const struct svga_fb_format *frm,
return i;
}
-
EXPORT_SYMBOL(svga_wcrt_multi);
EXPORT_SYMBOL(svga_wseq_multi);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index f30da32cdaed..a077bf346bdf 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -996,8 +996,13 @@ static int imxfb_probe(struct platform_device *pdev)
info->fix.smem_start = fbi->map_dma;
INIT_LIST_HEAD(&info->modelist);
- for (i = 0; i < fbi->num_modes; i++)
- fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
+ for (i = 0; i < fbi->num_modes; i++) {
+ ret = fb_add_videomode(&fbi->mode[i].mode, &info->modelist);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add videomode\n");
+ goto failed_cmap;
+ }
+ }
/*
* This makes sure that our colour bitfield
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 08ee8baa79f8..c8b1dfa456a3 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -679,7 +679,8 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if ((err = pci_enable_device(pdev))) {
+ err = pcim_enable_device(pdev);
+ if (err) {
printk(KERN_WARNING "kyrofb: Can't enable pdev: %d\n", err);
return err;
}
@@ -688,6 +689,10 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!info)
return -ENOMEM;
+ err = pcim_request_all_regions(pdev, "kyrofb");
+ if (err)
+ goto out_free_fb;
+
currentpar = info->par;
kyro_fix.smem_start = pci_resource_start(pdev, 0);
@@ -696,13 +701,15 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
kyro_fix.mmio_len = pci_resource_len(pdev, 1);
currentpar->regbase = deviceInfo.pSTGReg =
- ioremap(kyro_fix.mmio_start, kyro_fix.mmio_len);
+ devm_ioremap(&pdev->dev, kyro_fix.mmio_start,
+ kyro_fix.mmio_len);
if (!currentpar->regbase)
goto out_free_fb;
- info->screen_base = pci_ioremap_wc_bar(pdev, 0);
+ info->screen_base = devm_ioremap_wc(&pdev->dev, kyro_fix.smem_start,
+ kyro_fix.smem_len);
if (!info->screen_base)
- goto out_unmap_regs;
+ goto out_free_fb;
if (!nomtrr)
currentpar->wc_cookie = arch_phys_wc_add(kyro_fix.smem_start,
@@ -737,7 +744,7 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fb_memset_io(info->screen_base, 0, size);
if (register_framebuffer(info) < 0)
- goto out_unmap;
+ goto out_free_fb;
fb_info(info, "%s frame buffer device, at %dx%d@%d using %ldk/%ldk of VRAM\n",
info->fix.id,
@@ -748,10 +755,6 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-out_unmap:
- iounmap(info->screen_base);
-out_unmap_regs:
- iounmap(currentpar->regbase);
out_free_fb:
framebuffer_release(info);
@@ -773,9 +776,6 @@ static void kyrofb_remove(struct pci_dev *pdev)
deviceInfo.ulNextFreeVidMem = 0;
deviceInfo.ulOverlayOffset = 0;
- iounmap(info->screen_base);
- iounmap(par->regbase);
-
arch_phys_wc_del(par->wc_cookie);
unregister_framebuffer(info);
diff --git a/drivers/video/fbdev/nvidia/nv_local.h b/drivers/video/fbdev/nvidia/nv_local.h
index 68e508daa417..93aff35305a9 100644
--- a/drivers/video/fbdev/nvidia/nv_local.h
+++ b/drivers/video/fbdev/nvidia/nv_local.h
@@ -80,7 +80,7 @@
(par)->dmaFree -= ((size) + 1); \
}
-#if defined(__i386__)
+#if defined(__i386__) && !defined(CONFIG_UML)
#define _NV_FENCE() outb(0, 0x3D0);
#else
#define _NV_FENCE() mb();
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index be95fcddce4c..1893815dc67f 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -21,9 +21,9 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/of_clk.h>
#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
#include <linux/parser.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
@@ -134,7 +134,7 @@ struct simplefb_params {
static int simplefb_parse_dt(struct platform_device *pdev,
struct simplefb_params *params)
{
- struct device_node *np = pdev->dev.of_node, *mem;
+ struct device_node *np = pdev->dev.of_node;
int ret;
const char *format;
int i;
@@ -174,19 +174,10 @@ static int simplefb_parse_dt(struct platform_device *pdev,
return -EINVAL;
}
- mem = of_parse_phandle(np, "memory-region", 0);
- if (mem) {
- ret = of_address_to_resource(mem, 0, &params->memory);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to parse memory-region\n");
- of_node_put(mem);
- return ret;
- }
-
+ ret = of_reserved_mem_region_to_resource(np, 0, &params->memory);
+ if (!ret) {
if (of_property_present(np, "reg"))
dev_warn(&pdev->dev, "preferring \"memory-region\" over \"reg\" property\n");
-
- of_node_put(mem);
} else {
memset(&params->memory, 0, sizeof(params->memory));
}
diff --git a/drivers/video/fbdev/via/via-gpio.c b/drivers/video/fbdev/via/via-gpio.c
index 72302384bf77..45c0a4a6f85c 100644
--- a/drivers/video/fbdev/via/via-gpio.c
+++ b/drivers/video/fbdev/via/via-gpio.c
@@ -145,7 +145,7 @@ static struct viafb_gpio_cfg viafb_gpio_config = {
.label = "VIAFB onboard GPIO",
.owner = THIS_MODULE,
.direction_output = via_gpio_dir_out,
- .set_rv = via_gpio_set,
+ .set = via_gpio_set,
.direction_input = via_gpio_dir_input,
.get = via_gpio_get,
.base = -1,
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index d2b3ae7113ab..b01ec99106cd 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -116,13 +116,11 @@ e_free:
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
+ struct snp_derived_key_resp *derived_key_resp __free(kfree) = NULL;
struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
- struct snp_derived_key_resp derived_key_resp = {0};
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_guest_req req = {};
int rc, resp_len;
- /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
- u8 buf[64 + 16];
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
@@ -132,8 +130,9 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
* response payload. Make sure that it has enough space to cover the
* authtag.
*/
- resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize;
- if (sizeof(buf) < resp_len)
+ resp_len = sizeof(derived_key_resp->data) + mdesc->ctx->authsize;
+ derived_key_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
+ if (!derived_key_resp)
return -ENOMEM;
derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
@@ -149,23 +148,21 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
req.vmpck_id = mdesc->vmpck_id;
req.req_buf = derived_key_req;
req.req_sz = sizeof(*derived_key_req);
- req.resp_buf = buf;
+ req.resp_buf = derived_key_resp;
req.resp_sz = resp_len;
req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
rc = snp_send_guest_request(mdesc, &req);
arg->exitinfo2 = req.exitinfo2;
- if (rc)
- return rc;
-
- memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data));
- if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp,
- sizeof(derived_key_resp)))
- rc = -EFAULT;
+ if (!rc) {
+ if (copy_to_user((void __user *)arg->resp_data, derived_key_resp,
+ sizeof(derived_key_resp->data)))
+ rc = -EFAULT;
+ }
/* The response buffer contains the sensitive data, explicitly clear it. */
- memzero_explicit(buf, sizeof(buf));
- memzero_explicit(&derived_key_resp, sizeof(derived_key_resp));
+ memzero_explicit(derived_key_resp, sizeof(*derived_key_resp));
+
return rc;
}
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 5c48788cdbec..a09eb4d62f82 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(virtio_config_changed);
/**
* virtio_config_driver_disable - disable config change reporting by drivers
- * @dev: the device to reset
+ * @dev: the device to disable
*
* This is only allowed to be called by a driver and disabling can't
* be nested.
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(virtio_config_driver_disable);
/**
* virtio_config_driver_enable - enable config change reporting by drivers
- * @dev: the device to reset
+ * @dev: the device to enable
*
* This is only allowed to be called by a driver and enabling can't
* be nested.
@@ -512,7 +512,7 @@ out:
* On error, the caller must call put_device on &@dev->dev (and not kfree),
* as another code path may have obtained a reference to @dev.
*
- * Returns: 0 on suceess, -error on failure
+ * Returns: 0 on success, -error on failure
*/
int register_virtio_device(struct virtio_device *dev)
{
@@ -536,6 +536,7 @@ int register_virtio_device(struct virtio_device *dev)
goto out_ida_remove;
spin_lock_init(&dev->config_lock);
+ dev->config_driver_disabled = false;
dev->config_core_enabled = false;
dev->config_change_pending = false;
diff --git a/drivers/virtio/virtio_dma_buf.c b/drivers/virtio/virtio_dma_buf.c
index 3fe1d03b0645..95c10632f84a 100644
--- a/drivers/virtio/virtio_dma_buf.c
+++ b/drivers/virtio/virtio_dma_buf.c
@@ -36,6 +36,8 @@ EXPORT_SYMBOL(virtio_dma_buf_export);
/**
* virtio_dma_buf_attach - mandatory attach callback for virtio dma-bufs
+ * @dma_buf: [in] buffer to attach
+ * @attach: [in] attachment structure
*/
int virtio_dma_buf_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index a5d63269f20b..d0728285b6ce 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -360,11 +360,15 @@ static int virtinput_freeze(struct virtio_device *vdev)
{
struct virtio_input *vi = vdev->priv;
unsigned long flags;
+ void *buf;
spin_lock_irqsave(&vi->lock, flags);
vi->ready = false;
spin_unlock_irqrestore(&vi->lock, flags);
+ virtio_reset_device(vdev);
+ while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
+ kfree(buf);
vdev->config->del_vqs(vdev);
return 0;
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 5d78c2d572ab..b152a1eca05a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -65,7 +65,6 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <uapi/linux/virtio_mmio.h>
@@ -88,22 +87,8 @@ struct virtio_mmio_device {
void __iomem *base;
unsigned long version;
-
- /* a list of queues so we can dispatch IRQs */
- spinlock_t lock;
- struct list_head virtqueues;
-};
-
-struct virtio_mmio_vq_info {
- /* the actual virtqueue */
- struct virtqueue *vq;
-
- /* the list node for the virtqueues list */
- struct list_head node;
};
-
-
/* Configuration interface */
static u64 vm_get_features(struct virtio_device *vdev)
@@ -300,9 +285,8 @@ static bool vm_notify_with_data(struct virtqueue *vq)
static irqreturn_t vm_interrupt(int irq, void *opaque)
{
struct virtio_mmio_device *vm_dev = opaque;
- struct virtio_mmio_vq_info *info;
+ struct virtqueue *vq;
unsigned long status;
- unsigned long flags;
irqreturn_t ret = IRQ_NONE;
/* Read and acknowledge interrupts */
@@ -315,10 +299,8 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
}
if (likely(status & VIRTIO_MMIO_INT_VRING)) {
- spin_lock_irqsave(&vm_dev->lock, flags);
- list_for_each_entry(info, &vm_dev->virtqueues, node)
- ret |= vring_interrupt(irq, info->vq);
- spin_unlock_irqrestore(&vm_dev->lock, flags);
+ virtio_device_for_each_vq(&vm_dev->vdev, vq)
+ ret |= vring_interrupt(irq, vq);
}
return ret;
@@ -329,14 +311,8 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
static void vm_del_vq(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
- struct virtio_mmio_vq_info *info = vq->priv;
- unsigned long flags;
unsigned int index = vq->index;
- spin_lock_irqsave(&vm_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vm_dev->lock, flags);
-
/* Select and deactivate the queue */
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
if (vm_dev->version == 1) {
@@ -347,8 +323,6 @@ static void vm_del_vq(struct virtqueue *vq)
}
vring_del_virtqueue(vq);
-
- kfree(info);
}
static void vm_del_vqs(struct virtio_device *vdev)
@@ -375,9 +349,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
bool (*notify)(struct virtqueue *vq);
- struct virtio_mmio_vq_info *info;
struct virtqueue *vq;
- unsigned long flags;
unsigned int num;
int err;
@@ -399,13 +371,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
goto error_available;
}
- /* Allocate and fill out our active queue description */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto error_kmalloc;
- }
-
num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
if (num == 0) {
err = -ENOENT;
@@ -463,13 +428,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in
writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
}
- vq->priv = info;
- info->vq = vq;
-
- spin_lock_irqsave(&vm_dev->lock, flags);
- list_add(&info->node, &vm_dev->virtqueues);
- spin_unlock_irqrestore(&vm_dev->lock, flags);
-
return vq;
error_bad_pfn:
@@ -481,8 +439,6 @@ error_new_virtqueue:
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
}
- kfree(info);
-error_kmalloc:
error_available:
return ERR_PTR(err);
}
@@ -627,8 +583,6 @@ static int virtio_mmio_probe(struct platform_device *pdev)
vm_dev->vdev.dev.release = virtio_mmio_release_dev;
vm_dev->vdev.config = &virtio_mmio_config_ops;
vm_dev->pdev = pdev;
- INIT_LIST_HEAD(&vm_dev->virtqueues);
- spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vm_dev->base)) {
diff --git a/drivers/virtio/virtio_pci_legacy_dev.c b/drivers/virtio/virtio_pci_legacy_dev.c
index 677d1f68bc9b..bbbf89c22880 100644
--- a/drivers/virtio/virtio_pci_legacy_dev.c
+++ b/drivers/virtio/virtio_pci_legacy_dev.c
@@ -140,9 +140,9 @@ EXPORT_SYMBOL_GPL(vp_legacy_set_status);
* vp_legacy_queue_vector - set the MSIX vector for a specific virtqueue
* @ldev: the legacy virtio-pci device
* @index: queue index
- * @vector: the config vector
+ * @vector: the queue vector
*
- * Returns the config vector read from the device
+ * Returns the queue vector read from the device
*/
u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
u16 index, u16 vector)
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index d665f8f73ea8..9e503b7a58d8 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -546,9 +546,9 @@ EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
* @index: queue index
- * @vector: the config vector
+ * @vector: the queue vector
*
- * Returns the config vector read from the device
+ * Returns the queue vector read from the device
*/
u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
u16 index, u16 vector)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 4397392bfef0..f5062061c408 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2296,6 +2296,10 @@ static inline int virtqueue_add(struct virtqueue *_vq,
* at the same time (except where noted).
*
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
+ *
+ * NB: ENOSPC is a special code that is only returned on an attempt to add a
+ * buffer to a full VQ. It indicates that some buffers are outstanding and that
+ * the operation can be retried after some buffers have been used.
*/
int virtqueue_add_sgs(struct virtqueue *_vq,
struct scatterlist *sgs[],
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index a7b297dae489..657b07a60788 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -28,19 +28,6 @@ struct virtio_vdpa_device {
struct virtio_device vdev;
struct vdpa_device *vdpa;
u64 features;
-
- /* The lock to protect virtqueue list */
- spinlock_t lock;
- /* List of virtio_vdpa_vq_info */
- struct list_head virtqueues;
-};
-
-struct virtio_vdpa_vq_info {
- /* the actual virtqueue */
- struct virtqueue *vq;
-
- /* the list node for the virtqueues list */
- struct list_head node;
};
static inline struct virtio_vdpa_device *
@@ -135,9 +122,9 @@ static irqreturn_t virtio_vdpa_config_cb(void *private)
static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
{
- struct virtio_vdpa_vq_info *info = private;
+ struct virtqueue *vq = private;
- return vring_interrupt(0, info->vq);
+ return vring_interrupt(0, vq);
}
static struct virtqueue *
@@ -145,18 +132,15 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name, bool ctx)
{
- struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
struct device *dma_dev;
const struct vdpa_config_ops *ops = vdpa->config;
- struct virtio_vdpa_vq_info *info;
bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify;
struct vdpa_callback cb;
struct virtqueue *vq;
u64 desc_addr, driver_addr, device_addr;
/* Assume split virtqueue, switch to packed if necessary */
struct vdpa_vq_state state = {0};
- unsigned long flags;
u32 align, max_num, min_num = 1;
bool may_reduce_num = true;
int err;
@@ -179,10 +163,6 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (ops->get_vq_ready(vdpa, index))
return ERR_PTR(-ENOENT);
- /* Allocate and fill out our active queue description */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
if (ops->get_vq_size)
max_num = ops->get_vq_size(vdpa, index);
else
@@ -217,7 +197,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
/* Setup virtqueue callback */
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
- cb.private = info;
+ cb.private = vq;
cb.trigger = NULL;
ops->set_vq_cb(vdpa, index, &cb);
ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
@@ -248,13 +228,6 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
ops->set_vq_ready(vdpa, index, 1);
- vq->priv = info;
- info->vq = vq;
-
- spin_lock_irqsave(&vd_dev->lock, flags);
- list_add(&info->node, &vd_dev->virtqueues);
- spin_unlock_irqrestore(&vd_dev->lock, flags);
-
return vq;
err_vq:
@@ -263,7 +236,6 @@ error_new_virtqueue:
ops->set_vq_ready(vdpa, index, 0);
/* VDPA driver should make sure vq is stopeed here */
WARN_ON(ops->get_vq_ready(vdpa, index));
- kfree(info);
return ERR_PTR(err);
}
@@ -272,20 +244,12 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
struct vdpa_device *vdpa = vd_dev->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
- struct virtio_vdpa_vq_info *info = vq->priv;
unsigned int index = vq->index;
- unsigned long flags;
-
- spin_lock_irqsave(&vd_dev->lock, flags);
- list_del(&info->node);
- spin_unlock_irqrestore(&vd_dev->lock, flags);
/* Select and deactivate the queue (best effort) */
ops->set_vq_ready(vdpa, index, 0);
vring_del_virtqueue(vq);
-
- kfree(info);
}
static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
@@ -502,8 +466,6 @@ static int virtio_vdpa_probe(struct vdpa_device *vdpa)
vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
vd_dev->vdev.config = &virtio_vdpa_config_ops;
vd_dev->vdpa = vdpa;
- INIT_LIST_HEAD(&vd_dev->virtqueues);
- spin_lock_init(&vd_dev->lock);
vd_dev->vdev.id.device = ops->get_device_id(vdpa);
if (vd_dev->vdev.id.device == 0)
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 26efca9ae0e7..c3fbb6068c52 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -644,6 +644,8 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
} else {
wdd->timeout = DW_WDT_DEFAULT_SECONDS;
watchdog_init_timeout(wdd, 0, dev);
+ /* Limit timeout value to hardware constraints. */
+ dw_wdt_set_timeout(wdd, wdd->timeout);
}
platform_set_drvdata(pdev, dw_wdt);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 9ab769aa0244..4ab3405ef8e6 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -577,7 +577,11 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
/* Check that the heartbeat value is within it's range;
if not reset to the default */
if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) {
- iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
+ ret = iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
+ if (ret != 0) {
+ dev_err(dev, "Failed to set watchdog timeout (%d)\n", WATCHDOG_TIMEOUT);
+ return ret;
+ }
dev_info(dev, "timeout value out of range, using %d\n",
WATCHDOG_TIMEOUT);
heartbeat = WATCHDOG_TIMEOUT;
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index a1e23dce8810..3b8488c86a2f 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -22,11 +22,13 @@
#include <linux/bits.h>
#include <linux/dmi.h>
+#include <linux/errno.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/kernel.h>
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/printk.h>
#include <linux/types.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index c0b2a9c5250d..97bcd32bade5 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -300,7 +300,7 @@ static void rwdt_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int __maybe_unused rwdt_suspend(struct device *dev)
+static int rwdt_suspend(struct device *dev)
{
struct rwdt_priv *priv = dev_get_drvdata(dev);
@@ -310,7 +310,7 @@ static int __maybe_unused rwdt_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused rwdt_resume(struct device *dev)
+static int rwdt_resume(struct device *dev)
{
struct rwdt_priv *priv = dev_get_drvdata(dev);
@@ -320,7 +320,7 @@ static int __maybe_unused rwdt_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume);
static const struct of_device_id rwdt_ids[] = {
{ .compatible = "renesas,rcar-gen2-wdt", },
@@ -334,7 +334,7 @@ static struct platform_driver rwdt_driver = {
.driver = {
.name = "renesas_wdt",
.of_match_table = rwdt_ids,
- .pm = &rwdt_pm_ops,
+ .pm = pm_sleep_ptr(&rwdt_pm_ops),
},
.probe = rwdt_probe,
.remove = rwdt_remove,
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index d1f9ce4100a8..be7d7db47591 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -214,7 +214,6 @@ static int rti_wdt_probe(struct platform_device *pdev)
struct rti_wdt_device *wdt;
struct clk *clk;
u32 last_ping = 0;
- struct device_node *node;
u32 reserved_mem_size;
struct resource res;
u32 *vaddr;
@@ -299,15 +298,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
}
}
- node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
- if (node) {
- ret = of_address_to_resource(node, 0, &res);
- of_node_put(node);
- if (ret) {
- dev_err(dev, "No memory address assigned to the region.\n");
- goto err_iomap;
- }
-
+ ret = of_reserved_mem_region_to_resource(pdev->dev.of_node, 0, &res);
+ if (!ret) {
/*
* If reserved memory is defined for watchdog reset cause.
* Readout the Power-on(PON) reason and pass to bootstatus.
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index 5f23913ce3b4..6ce1bfb39064 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -75,11 +75,17 @@
#define SBSA_GWDT_VERSION_MASK 0xF
#define SBSA_GWDT_VERSION_SHIFT 16
+#define SBSA_GWDT_IMPL_MASK 0x7FF
+#define SBSA_GWDT_IMPL_SHIFT 0
+#define SBSA_GWDT_IMPL_MEDIATEK 0x426
+
/**
* struct sbsa_gwdt - Internal representation of the SBSA GWDT
* @wdd: kernel watchdog_device structure
* @clk: store the System Counter clock frequency, in Hz.
* @version: store the architecture version
+ * @need_ws0_race_workaround:
+ * indicate whether to adjust wdd->timeout to avoid a race with WS0
* @refresh_base: Virtual address of the watchdog refresh frame
* @control_base: Virtual address of the watchdog control frame
*/
@@ -87,6 +93,7 @@ struct sbsa_gwdt {
struct watchdog_device wdd;
u32 clk;
int version;
+ bool need_ws0_race_workaround;
void __iomem *refresh_base;
void __iomem *control_base;
};
@@ -161,6 +168,31 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
*/
sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
+ /*
+ * Some watchdog hardware has a race condition where it will ignore
+ * sbsa_gwdt_keepalive() if it is called at the exact moment that a
+ * timeout occurs and WS0 is being asserted. Unfortunately, the default
+ * behavior of the watchdog core is very likely to trigger this race
+ * when action=0 because it programs WOR to be half of the desired
+ * timeout, and watchdog_next_keepalive() chooses the exact same time to
+ * send keepalive pings.
+ *
+ * This triggers a race where sbsa_gwdt_keepalive() can be called right
+ * as WS0 is being asserted, and affected hardware will ignore that
+ * write and continue to assert WS0. After another (timeout / 2)
+ * seconds, the same race happens again. If the driver wins then the
+ * explicit refresh will reset WS0 to false but if the hardware wins,
+ * then WS1 is asserted and the system resets.
+ *
+ * Avoid the problem by scheduling keepalive heartbeats one second later
+ * than the WOR timeout.
+ *
+ * This workaround might not be needed in a future revision of the
+ * hardware.
+ */
+ if (gwdt->need_ws0_race_workaround)
+ wdd->min_hw_heartbeat_ms = timeout * 500 + 1000;
+
return 0;
}
@@ -202,12 +234,15 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd)
static void sbsa_gwdt_get_version(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
- int ver;
+ int iidr, ver, impl;
- ver = readl(gwdt->control_base + SBSA_GWDT_W_IIDR);
- ver = (ver >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK;
+ iidr = readl(gwdt->control_base + SBSA_GWDT_W_IIDR);
+ ver = (iidr >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK;
+ impl = (iidr >> SBSA_GWDT_IMPL_SHIFT) & SBSA_GWDT_IMPL_MASK;
gwdt->version = ver;
+ gwdt->need_ws0_race_workaround =
+ !action && (impl == SBSA_GWDT_IMPL_MEDIATEK);
}
static int sbsa_gwdt_start(struct watchdog_device *wdd)
@@ -299,6 +334,15 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
else
wdd->max_hw_heartbeat_ms = GENMASK_ULL(47, 0) / gwdt->clk * 1000;
+ if (gwdt->need_ws0_race_workaround) {
+ /*
+ * A timeout of 3 seconds means that WOR will be set to 1.5
+ * seconds and the heartbeat will be scheduled every 2.5
+ * seconds.
+ */
+ wdd->min_timeout = 3;
+ }
+
status = readl(cf_base + SBSA_GWDT_WCS);
if (status & SBSA_GWDT_WCS_WS1) {
dev_warn(dev, "System reset by WDT.\n");
diff --git a/drivers/watchdog/watchdog_core.h b/drivers/watchdog/watchdog_core.h
index 5b35a8439e26..ab825d9f9248 100644
--- a/drivers/watchdog/watchdog_core.h
+++ b/drivers/watchdog/watchdog_core.h
@@ -24,8 +24,14 @@
* This material is provided "AS-IS" and at no charge.
*/
-#include <linux/hrtimer.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/hrtimer_types.h>
+#include <linux/init.h>
#include <linux/kthread.h>
+#include <linux/mutex_types.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
#define MAX_DOGS 32 /* Maximum number of watchdog devices */
diff --git a/drivers/watchdog/watchdog_pretimeout.c b/drivers/watchdog/watchdog_pretimeout.c
index e5295c990fa1..2526436dc74d 100644
--- a/drivers/watchdog/watchdog_pretimeout.c
+++ b/drivers/watchdog/watchdog_pretimeout.c
@@ -7,6 +7,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include <linux/watchdog.h>
#include "watchdog_core.h"
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index fcc1ba02e75b..5c6e3fa001d8 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -302,6 +302,9 @@ static int ziirave_firm_verify(struct watchdog_device *wdd,
const u16 len = be16_to_cpu(rec->len);
const u32 addr = be32_to_cpu(rec->addr);
+ if (len > sizeof(data))
+ return -EINVAL;
+
if (ziirave_firm_addr_readonly(addr))
continue;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 3c9da446b85d..528682bf0c7f 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -718,26 +718,6 @@ int xs_watch_msg(struct xs_watch_event *event)
return 0;
}
-/*
- * Certain older XenBus toolstack cannot handle reading values that are
- * not populated. Some Xen 3.4 installation are incapable of doing this
- * so if we are running on anything older than 4 do not attempt to read
- * control/platform-feature-xs_reset_watches.
- */
-static bool xen_strict_xenbus_quirk(void)
-{
-#ifdef CONFIG_X86
- uint32_t eax, ebx, ecx, edx, base;
-
- base = xen_cpuid_base();
- cpuid(base + 1, &eax, &ebx, &ecx, &edx);
-
- if ((eax >> 16) < 4)
- return true;
-#endif
- return false;
-
-}
static void xs_reset_watches(void)
{
int err;
@@ -745,9 +725,6 @@ static void xs_reset_watches(void)
if (!xen_hvm_domain() || xen_initial_domain())
return;
- if (xen_strict_xenbus_quirk())
- return;
-
if (!xenbus_read_unsigned("control",
"platform-feature-xs_reset_watches", 0))
return;