aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/qaic/qaic_data.c9
-rw-r--r--drivers/acpi/Makefile4
-rw-r--r--drivers/acpi/numa/srat.c1
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/base/arch_topology.c26
-rw-r--r--drivers/base/devtmpfs.c153
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/block/null_blk/main.c11
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/btusb.c41
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c3
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c2
-rw-r--r--drivers/clk/samsung/clk-gs101.c8
-rw-r--r--drivers/clk/samsung/clk-pll.c7
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/hyperv_timer.c4
-rw-r--r--drivers/cpufreq/Kconfig.x8612
-rw-r--r--drivers/cpufreq/cpufreq.c38
-rw-r--r--drivers/cpufreq/intel_pstate.c16
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c3
-rw-r--r--drivers/dpll/dpll_core.c2
-rw-r--r--drivers/edac/Kconfig30
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/amd64_edac.c52
-rw-r--r--drivers/edac/debugfs.c5
-rwxr-xr-xdrivers/edac/ecs.c205
-rw-r--r--drivers/edac/edac_device.c185
-rw-r--r--drivers/edac/i10nm_base.c2
-rw-r--r--drivers/edac/i5400_edac.c3
-rw-r--r--drivers/edac/i7300_edac.c7
-rw-r--r--drivers/edac/ie31200_edac.c641
-rw-r--r--drivers/edac/igen6_edac.c41
-rwxr-xr-xdrivers/edac/mem_repair.c359
-rw-r--r--drivers/edac/pnd2_edac.c4
-rwxr-xr-xdrivers/edac/scrub.c209
-rw-r--r--drivers/edac/skx_common.c33
-rw-r--r--drivers/edac/skx_common.h11
-rw-r--r--drivers/edac/xgene_edac.c17
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c4
-rw-r--r--drivers/firmware/imx/imx-scu.c1
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c18
-rw-r--r--drivers/firmware/qcom/qcom_scm.c4
-rw-r--r--drivers/firmware/smccc/kvm_guest.c66
-rw-r--r--drivers/gpio/gpiolib-cdev.c15
-rw-r--r--drivers/gpio/gpiolib.c35
-rw-r--r--drivers/gpio/gpiolib.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h677
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm82
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c64
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c94
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c40
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c4
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs16
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c5
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c3
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c3
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c23
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c4
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h2
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c53
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c6
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c4
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c13
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c3
-rw-r--r--drivers/gpu/host1x/dev.c6
-rw-r--r--drivers/hv/Kconfig17
-rw-r--r--drivers/hv/Makefile4
-rw-r--r--drivers/hv/hv.c94
-rw-r--r--drivers/hv/hv_common.c198
-rw-r--r--drivers/hv/hv_proc.c195
-rw-r--r--drivers/hv/mshv.h30
-rw-r--r--drivers/hv/mshv_common.c161
-rw-r--r--drivers/hv/mshv_eventfd.c833
-rw-r--r--drivers/hv/mshv_eventfd.h71
-rw-r--r--drivers/hv/mshv_irq.c124
-rw-r--r--drivers/hv/mshv_portid_table.c83
-rw-r--r--drivers/hv/mshv_root.h311
-rw-r--r--drivers/hv/mshv_root_hv_call.c849
-rw-r--r--drivers/hv/mshv_root_main.c2307
-rw-r--r--drivers/hv/mshv_synic.c665
-rw-r--r--drivers/hv/vmbus_drv.c67
-rw-r--r--drivers/hwmon/nct6775-core.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c2
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c12
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c12
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c26
-rw-r--r--drivers/i2c/busses/i2c-sis630.c12
-rw-r--r--drivers/idle/Makefile5
-rw-r--r--drivers/idle/intel_idle.c14
-rw-r--r--drivers/iio/adc/ti-tsc2046.c4
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h3
-rw-r--r--drivers/infiniband/hw/hfi1/init.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c20
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c14
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c25
-rw-r--r--drivers/input/joystick/xpad.c39
-rw-r--r--drivers/input/misc/iqs7222.c50
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h111
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c26
-rw-r--r--drivers/input/touchscreen/imagis.c9
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/iommu/hyperv-iommu.c8
-rw-r--r--drivers/irqchip/Kconfig20
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-apple-aic.c8
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c57
-rw-r--r--drivers/irqchip/irq-gic-v2m.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c23
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c1
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c14
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c1
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c1
-rw-r--r--drivers/irqchip/irq-msi-lib.c11
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c1
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c1
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c1
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c53
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c198
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c24
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c14
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c215
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c151
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h12
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c249
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c85
-rw-r--r--drivers/leds/leds-st1202.c21
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c4
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/media/cec/core/cec-pin.c3
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c3
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c4
-rw-r--r--drivers/media/rc/pwm-ir-tx.c3
-rw-r--r--drivers/memory/omap-gpmc.c20
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/vcpu_stall_detector.c3
-rw-r--r--drivers/mmc/host/atmel-mci.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c3
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c10
-rw-r--r--drivers/net/bonding/bond_options.c55
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c18
-rw-r--r--drivers/net/can/m_can/m_can.c9
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c28
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c8
-rw-r--r--drivers/net/can/usb/ucan.c43
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c5
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c42
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c25
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c41
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c5
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c6
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/mctp/mctp-i2c.c5
-rw-r--r--drivers/net/mctp/mctp-i3c.c5
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c68
-rw-r--r--drivers/net/phy/phy_link_topology.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c24
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/ntb/test/ntb_pingpong.c3
-rw-r--r--drivers/nvme/host/apple.c3
-rw-r--r--drivers/nvme/host/core.c12
-rw-r--r--drivers/nvme/host/pci.c18
-rw-r--r--drivers/nvme/target/pci-epf.c28
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/pci/controller/pci-hyperv.c17
-rw-r--r--drivers/pci/controller/vmd.c20
-rw-r--r--drivers/pci/msi/api.c6
-rw-r--r--drivers/pci/msi/msi.c204
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/tph.c44
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c105
-rw-r--r--drivers/perf/arm-ccn.c5
-rw-r--r--drivers/perf/arm-cmn.c5
-rw-r--r--drivers/perf/arm_cspmu/ampere_cspmu.c32
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c81
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h57
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c22
-rw-r--r--drivers/perf/arm_pmu.c8
-rw-r--r--drivers/perf/arm_pmuv3.c11
-rw-r--r--drivers/perf/arm_v7_pmu.c50
-rw-r--r--drivers/perf/dwc_pcie_pmu.c51
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c4
-rw-r--r--drivers/perf/thunderx2_pmu.c5
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c3
-rw-r--r--drivers/pinctrl/spacemit/Kconfig3
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c5
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c7
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c1
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h3
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c36
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c2
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c36
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c2
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c8
-rw-r--r--drivers/power/supply/ab8500_chargalg.c9
-rw-r--r--drivers/powercap/idle_inject.c3
-rw-r--r--drivers/powercap/intel_rapl_common.c5
-rw-r--r--drivers/pps/generators/pps_gen_parport.c3
-rw-r--r--drivers/regulator/core.c88
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/rtq2208-regulator.c216
-rw-r--r--drivers/reset/reset-microchip-sparx5.c19
-rw-r--r--drivers/rtc/class.c3
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c9
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c4
-rw-r--r--drivers/soc/imx/soc-imx8m.c26
-rw-r--r--drivers/soc/qcom/pdr_interface.c8
-rw-r--r--drivers/soc/qcom/pmic_glink.c2
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/spi/atmel-quadspi.c5
-rw-r--r--drivers/spi/spi-microchip-core.c41
-rw-r--r--drivers/spi/spi.c6
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h4
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h3
-rw-r--r--drivers/thermal/intel/intel_tcc.c2
-rw-r--r--drivers/thunderbolt/tunnel.c11
-rw-r--r--drivers/thunderbolt/tunnel.h2
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c3
-rw-r--r--drivers/tty/serial/8250/8250_port.c10
-rw-r--r--drivers/tty/serial/amba-pl011.c9
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/sh-sci.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c8
-rw-r--r--drivers/ufs/host/ufs-qcom.c75
-rw-r--r--drivers/usb/chipidea/otg_fsm.c3
-rw-r--r--drivers/usb/dwc2/hcd_queue.c3
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c3
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c6
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c14
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h13
-rw-r--r--drivers/usb/serial/option.c48
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c24
-rw-r--r--drivers/vfio/group.c16
-rw-r--r--drivers/video/fbdev/hyperv_fb.c52
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c1
-rw-r--r--drivers/virtio/virtio_mem.c4
-rw-r--r--drivers/watchdog/softdog.c8
-rw-r--r--drivers/watchdog/watchdog_dev.c4
-rw-r--r--drivers/watchdog/watchdog_hrtimer_pretimeout.c4
-rw-r--r--drivers/xen/pci.c32
-rw-r--r--drivers/xen/platform-pci.c4
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c20
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xenfs/xensyms.c4
374 files changed, 11297 insertions, 2863 deletions
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index c20eb63750f5..43aba57b48f0 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -172,9 +172,10 @@ static void free_slice(struct kref *kref)
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
- int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
+ unsigned int len, nents, offf, offl;
struct sg_table *sgt;
+ size_t total_len;
int ret, j;
/* find out number of relevant nents needed for this mem */
@@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
sgf = NULL;
sgl = NULL;
nents = 0;
+ offf = 0;
+ offl = 0;
size = size ? size : PAGE_SIZE;
for_each_sgtable_dma_sg(sgt_in, sg, j) {
@@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
+ u64 total;
int i;
for (i = 0; i < count; i++) {
@@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
- if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
+ total > total_size)
return -EINVAL;
}
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 40208a0f5dfb..797070fc9a3f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -5,6 +5,10 @@
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+ifdef CONFIG_TRACE_BRANCH_PROFILING
+CFLAGS_processor_idle.o += -DDISABLE_BRANCH_PROFILING
+endif
+
#
# ACPI Boot-Time Table Parsing
#
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 00ac0d7bb8c9..ce815d7cb8f6 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -51,6 +51,7 @@ int node_to_pxm(int node)
return PXM_INVAL;
return node_to_pxm_map[node];
}
+EXPORT_SYMBOL_GPL(node_to_pxm);
static void __acpi_map_pxm_to_node(int pxm, int node)
{
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 698897b29de2..586cc7d1d8aa 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -590,6 +590,8 @@ static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
+ } else if (cx->entry_method == ACPI_CSTATE_FFH) {
+ acpi_processor_ffh_play_dead(cx);
} else
return;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c085dd81ebe7..d956735e2a76 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2845,6 +2845,10 @@ int ata_dev_configure(struct ata_device *dev)
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->quirks |= ATA_QUIRK_NOLPM;
+ if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
+ dev->quirks |= ATA_QUIRK_NOLPM;
+
if (ap->flags & ATA_FLAG_NO_LPM)
dev->quirks |= ATA_QUIRK_NOLPM;
@@ -3897,6 +3901,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
+ [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
[__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
[__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
[__ATA_QUIRK_NO_FUA] = "nofua",
@@ -4142,13 +4147,16 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
ATA_QUIRK_ZERO_AFTER_TRIM },
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI, },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM },
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index dce24806a052..2d32125c16fd 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -935,9 +935,8 @@ static int octeon_cf_probe(struct platform_device *pdev)
ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
/* True IDE mode needs a timer to poll for not-busy. */
- hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cf_port->delayed_finish.function = octeon_cf_delayed_finish;
+ hrtimer_setup(&cf_port->delayed_finish, octeon_cf_delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
} else {
/* 16 bit but not True IDE */
base = cs0 + 0x800;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 3ebe77566788..af0029d30dbe 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_smt.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
@@ -28,7 +29,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0;
EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
@@ -293,13 +294,15 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -506,6 +509,10 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/* Used to enable the SMT control */
+static unsigned int max_smt_thread_num = 1;
+
/*
* This function returns the logic cpu number of the node.
* There are basically three kinds of return values:
@@ -565,6 +572,8 @@ static int __init parse_core(struct device_node *core, int package_id,
i++;
} while (1);
+ max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i);
+
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
@@ -677,6 +686,17 @@ static int __init parse_socket(struct device_node *socket)
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
+ /*
+ * Reset the max_smt_thread_num to 1 on failure. Since on failure
+ * we need to notify the framework the SMT is not supported, but
+ * max_smt_thread_num can be initialized to the SMT thread number
+ * of the cores which are successfully parsed.
+ */
+ if (ret)
+ max_smt_thread_num = 1;
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+
return ret;
}
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b848764ef018..6dd1a8860f1c 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -63,22 +63,6 @@ __setup("devtmpfs.mount=", mount_param);
static struct vfsmount *mnt;
-static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct super_block *s = mnt->mnt_sb;
- int err;
-
- atomic_inc(&s->s_active);
- down_write(&s->s_umount);
- err = reconfigure_single(s, flags, data);
- if (err < 0) {
- deactivate_locked_super(s);
- return ERR_PTR(err);
- }
- return dget(s->s_root);
-}
-
static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
@@ -89,9 +73,40 @@ static struct file_system_type internal_fs_type = {
.kill_sb = kill_litter_super,
};
+/* Simply take a ref on the existing mount */
+static int devtmpfs_get_tree(struct fs_context *fc)
+{
+ struct super_block *sb = mnt->mnt_sb;
+
+ atomic_inc(&sb->s_active);
+ down_write(&sb->s_umount);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+
+/* Ops are filled in during init depending on underlying shmem or ramfs type */
+struct fs_context_operations devtmpfs_context_ops = {};
+
+/* Call the underlying initialization and set to our ops */
+static int devtmpfs_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+#ifdef CONFIG_TMPFS
+ ret = shmem_init_fs_context(fc);
+#else
+ ret = ramfs_init_fs_context(fc);
+#endif
+ if (ret < 0)
+ return ret;
+
+ fc->ops = &devtmpfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .mount = public_dev_mount,
+ .init_fs_context = devtmpfs_init_fs_context,
};
static int devtmpfs_submit_req(struct req *req, const char *tmp)
@@ -160,18 +175,17 @@ static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
- int err;
dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
- if (!err)
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
+ if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
done_path_create(&path, dentry);
- return err;
+ return PTR_ERR_OR_ZERO(dentry);
}
static int create_path(const char *nodepath)
@@ -245,15 +259,12 @@ static int dev_rmdir(const char *name)
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry);
+ else
+ err = -EPERM;
+
dput(dentry);
inode_unlock(d_inode(parent.dentry));
path_put(&parent);
@@ -310,6 +321,8 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
+ struct kstat stat;
+ struct path p;
int deleted = 0;
int err;
@@ -317,32 +330,28 @@ static int handle_remove(const char *nodename, struct device *dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- struct kstat stat;
- struct path p = {.mnt = parent.mnt, .dentry = dentry};
- err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
- AT_STATX_SYNC_AS_STAT);
- if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = GLOBAL_ROOT_UID;
- newattrs.ia_gid = GLOBAL_ROOT_GID;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- inode_lock(d_inode(dentry));
- notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
- inode_unlock(d_inode(dentry));
- err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry, NULL);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ p.mnt = parent.mnt;
+ p.dentry = dentry;
+ err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
+ AT_STATX_SYNC_AS_STAT);
+ if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ inode_lock(d_inode(dentry));
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry, NULL);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
dput(dentry);
inode_unlock(d_inode(parent.dentry));
@@ -443,6 +452,31 @@ static int __ref devtmpfsd(void *p)
}
/*
+ * Get the underlying (shmem/ramfs) context ops to build ours
+ */
+static int devtmpfs_configure_context(void)
+{
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
+ MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ /* Set up devtmpfs_context_ops based on underlying type */
+ devtmpfs_context_ops.free = fc->ops->free;
+ devtmpfs_context_ops.dup = fc->ops->dup;
+ devtmpfs_context_ops.parse_param = fc->ops->parse_param;
+ devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
+ devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
+ devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
+
+ put_fs_context(fc);
+
+ return 0;
+}
+
+/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
@@ -456,6 +490,13 @@ int __init devtmpfs_init(void)
pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
+
+ err = devtmpfs_configure_context();
+ if (err) {
+ pr_err("unable to configure devtmpfs type %d\n", err);
+ return err;
+ }
+
err = register_filesystem(&dev_fs_type);
if (err) {
pr_err("unable to register devtmpfs type %d\n", err);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2ee45841486b..425c43b2d478 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1764,8 +1764,8 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index d94ef37480bd..da1ecbf988b8 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1426,8 +1426,7 @@ static void nullb_setup_bwtimer(struct nullb *nullb)
{
ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
- hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- nullb->bw_timer.function = nullb_bwtimer_fn;
+ hrtimer_setup(&nullb->bw_timer, nullb_bwtimer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
@@ -1549,8 +1548,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
- blk_mq_end_request_batch))
+ if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
+ blk_mq_end_request_batch))
blk_mq_end_request(req, cmd->error);
nr++;
}
@@ -1604,8 +1603,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
- hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cmd->timer.function = null_cmd_timer_expired;
+ hrtimer_setup(&cmd->timer, null_cmd_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
cmd->error = BLK_STS_OK;
cmd->nq = nq;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a61ec35f426..91cde76a4b3e 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1207,11 +1207,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
+ u8 status = virtblk_vbr_status(vbr);
found++;
if (!blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
- virtblk_complete_batch))
+ !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
+ virtblk_complete_batch))
virtblk_request_done(req);
}
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4ab32abf0f48..7771edf54fb3 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -56,6 +56,18 @@ config BT_HCIBTUSB_POLL_SYNC
Say Y here to enable USB poll_sync for Bluetooth USB devices by
default.
+config BT_HCIBTUSB_AUTO_ISOC_ALT
+ bool "Automatically adjust alternate setting for Isoc endpoints"
+ depends on BT_HCIBTUSB
+ default y if CHROME_PLATFORMS
+ help
+ Say Y here to automatically adjusting the alternate setting for
+ HCI_USER_CHANNEL whenever a SCO link is established.
+
+ When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
+ and configures isoc endpoint alternate setting automatically when
+ HCI_USER_CHANNEL is in use.
+
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2a8d91963c63..a0fc465458b2 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -34,6 +34,7 @@ static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
static bool reset = true;
+static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
static struct usb_driver btusb_driver;
@@ -1085,6 +1086,42 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
+static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *) skb->data;
+ struct hci_ev_sync_conn_complete *ev =
+ (void *) skb->data + sizeof(*hdr);
+ struct hci_dev *hdev = data->hdev;
+ unsigned int notify_air_mode;
+
+ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
+ return;
+
+ if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
+ return;
+
+ if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
+ return;
+
+ switch (ev->air_mode) {
+ case BT_CODEC_CVSD:
+ notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
+ break;
+
+ case BT_CODEC_TRANSPARENT:
+ notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
+ break;
+
+ default:
+ return;
+ }
+
+ bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
+ data->sco_num = 1;
+ data->air_mode = notify_air_mode;
+ schedule_work(&data->work);
+}
+
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
@@ -1092,6 +1129,10 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
schedule_delayed_work(&data->rx_work, 0);
}
+ /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
+ if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
+ btusb_sco_connected(data, skb);
+
return data->recv_event(data->hdev, skb);
}
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 7174bfccc7b3..b95f6d0f17ed 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -152,8 +152,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->period = ns_to_ktime(period * NSEC_PER_USEC);
init_completion(&priv->completion);
- hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->timer.function = timeriomem_rng_trigger;
+ hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2581186fa61b..92cbd24a36d8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -278,7 +278,7 @@ static void crng_reseed(struct work_struct *work)
WRITE_ONCE(base_crng.generation, next_gen);
#ifdef CONFIG_VDSO_GETRANDOM
/* base_crng.generation's invalid value is ULONG_MAX, while
- * _vdso_rng_data.generation's invalid value is 0, so add one to the
+ * vdso_k_rng_data->generation's invalid value is 0, so add one to the
* former to arrive at the latter. Use smp_store_release so that this
* is ordered with the write above to base_crng.generation. Pairs with
* the smp_rmb() before the syscall in the vDSO code.
@@ -290,7 +290,7 @@ static void crng_reseed(struct work_struct *work)
* because the vDSO side only checks whether the value changed, without
* actually using or interpreting the value.
*/
- smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1);
+ smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1);
#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
@@ -743,7 +743,7 @@ static void __cold _credit_init_bits(size_t bits)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
- WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true);
+ WRITE_ONCE(vdso_k_rng_data->is_ready, true);
#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
index 0358dff91da5..e9bca179998b 100644
--- a/drivers/clk/qcom/dispcc-sm8750.c
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -827,7 +827,6 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
&disp_cc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ops,
},
};
@@ -842,7 +841,6 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
&disp_cc_mdss_byte1_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ops,
},
};
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 86b39edba122..08b867ae3ed9 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -382,17 +382,9 @@ static const unsigned long cmu_top_clk_regs[] __initconst = {
EARLY_WAKEUP_DPU_DEST,
EARLY_WAKEUP_CSIS_DEST,
EARLY_WAKEUP_SW_TRIG_APM,
- EARLY_WAKEUP_SW_TRIG_APM_SET,
- EARLY_WAKEUP_SW_TRIG_APM_CLEAR,
EARLY_WAKEUP_SW_TRIG_CLUSTER0,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR,
EARLY_WAKEUP_SW_TRIG_DPU,
- EARLY_WAKEUP_SW_TRIG_DPU_SET,
- EARLY_WAKEUP_SW_TRIG_DPU_CLEAR,
EARLY_WAKEUP_SW_TRIG_CSIS,
- EARLY_WAKEUP_SW_TRIG_CSIS_SET,
- EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR,
CLK_CON_MUX_MUX_CLKCMU_BO_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 2e94bba6c396..023a25af73c4 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -206,6 +206,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = {
*/
/* Maximum lock time can be 270 * PDIV cycles */
#define PLL35XX_LOCK_FACTOR (270)
+#define PLL142XX_LOCK_FACTOR (150)
#define PLL35XX_MDIV_MASK (0x3FF)
#define PLL35XX_PDIV_MASK (0x3F)
@@ -272,7 +273,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
}
/* Set PLL lock time. */
- writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ if (pll->type == pll_142xx)
+ writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
+ pll->lock_reg);
+ else
+ writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
pll->lock_reg);
/* Change PLL PMS values */
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 808f259781fd..981a578043a5 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -842,7 +842,7 @@ static u64 __arch_timer_check_delta(void)
{},
};
- if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
+ if (is_midr_in_range_list(broken_cval_midrs)) {
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
return CLOCKSOURCE_MASK(31);
}
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index f00019b078a7..09549451dd51 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -582,7 +582,7 @@ static void __init hv_init_tsc_clocksource(void)
* mapped.
*/
tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
- if (hv_root_partition)
+ if (hv_root_partition())
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
@@ -627,7 +627,7 @@ void __init hv_remap_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return;
- if (!hv_root_partition) {
+ if (!hv_root_partition()) {
WARN(1, "%s: attempt to remap TSC page in guest partition\n",
__func__);
return;
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 97c2d4f15d76..2c5c228408bf 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -340,3 +340,15 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
option lets the probing code bypass some of those checks if the
parameter "relaxed_check=1" is passed to the module.
+config CPUFREQ_ARCH_CUR_FREQ
+ default y
+ bool "Current frequency derived from HW provided feedback"
+ help
+ This determines whether the scaling_cur_freq sysfs attribute returns
+ the last requested frequency or a more precise value based on hardware
+ provided feedback (as architected counters).
+ Given that a more precise frequency can now be provided via the
+ cpuinfo_avg_freq attribute, by enabling this option,
+ scaling_cur_freq maintains the provision of a counter based frequency,
+ for compatibility reasons.
+
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 30ffbddc7ece..0ce79fed8e55 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -729,18 +729,26 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
-__weak unsigned int arch_freq_get_on_cpu(int cpu)
+__weak int arch_freq_get_on_cpu(int cpu)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
+{
+ return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
- unsigned int freq;
+ int freq;
+
+ freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
+ ? arch_freq_get_on_cpu(policy->cpu)
+ : 0;
- freq = arch_freq_get_on_cpu(policy->cpu);
- if (freq)
+ if (freq > 0)
ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
@@ -785,6 +793,19 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
}
/*
+ * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int avg_freq = arch_freq_get_on_cpu(policy->cpu);
+
+ if (avg_freq > 0)
+ return sysfs_emit(buf, "%u\n", avg_freq);
+ return avg_freq != 0 ? avg_freq : -EINVAL;
+}
+
+/*
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
@@ -946,6 +967,7 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_avg_freq);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
@@ -1073,6 +1095,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
+ if (cpufreq_avg_freq_supported(policy)) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
+ if (ret)
+ return ret;
+ }
+
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
return ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9c4cc01fd51a..f06b9bc99945 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2200,28 +2200,20 @@ static int knl_get_turbo_pstate(int cpu)
return ret;
}
-static void hybrid_get_type(void *data)
-{
- u8 *cpu_type = data;
-
- *cpu_type = get_this_hybrid_cpu_type();
-}
-
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- u8 cpu_type = 0;
-
- smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ u8 cpu_type = c->topo.intel_type;
/*
* Return the hybrid scaling factor for P-cores and use the
* default core scaling for E-cores.
*/
- if (cpu_type == 0x40)
+ if (cpu_type == INTEL_CPU_TYPE_CORE)
return hybrid_scaling_factor;
- if (cpu_type == 0x20)
+ if (cpu_type == INTEL_CPU_TYPE_ATOM)
return core_get_scaling();
}
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index d103342b7cfc..1de9e92c5b0f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,6 +3,9 @@
# Makefile for cpuidle.
#
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index e2a1e4463b6f..0470d7c175f4 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -642,8 +642,7 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
if (ret)
return ret;
- hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dfi->timer.function = rockchip_dfi_timer;
+ hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
switch (dfi->ddr_type) {
case ROCKCHIP_DDRTYPE_LPDDR2:
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 32019dc33cca..1877201d1aa9 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -505,7 +505,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err_xa_alloc;
return pin;
err_xa_alloc:
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 2051a7c944a5..19ad3c3b675d 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -75,6 +75,34 @@ config EDAC_GHES
In doubt, say 'Y'.
+config EDAC_SCRUB
+ bool "EDAC scrub feature"
+ help
+ The EDAC scrub feature is optional and is designed to control the
+ memory scrubbers in the system. The common sysfs scrub interface
+ abstracts the control of various arbitrary scrubbing functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC scrub feature.
+
+config EDAC_ECS
+ bool "EDAC ECS (Error Check Scrub) feature"
+ help
+ The EDAC ECS feature is optional and is designed to control on-die
+ error check scrub (e.g., DDR5 ECS) in the system. The common sysfs
+ ECS interface abstracts the control of various ECS functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC ECS feature.
+
+config EDAC_MEM_REPAIR
+ bool "EDAC memory repair feature"
+ help
+ The EDAC memory repair feature is optional and is designed to control
+ the memory devices with repair features, such as Post Package Repair
+ (PPR), memory sparing etc. The common sysfs memory repair interface
+ abstracts the control of various memory repair functionalities into
+ a unified set of functions.
+ Say 'y/n' to enable/disable EDAC memory repair feature.
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
@@ -168,7 +196,7 @@ config EDAC_I3200
config EDAC_IE31200
tristate "Intel e312xx"
- depends on PCI && X86
+ depends on PCI && X86 && X86_MCE_INTEL
help
Support for error detection and correction on the Intel
E3-1200 based DRAM controllers.
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 89789ba8275f..a8f2d8f6c894 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -12,6 +12,9 @@ edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o
edac_core-y += edac_module.o edac_device_sysfs.o wq.o
edac_core-$(CONFIG_EDAC_DEBUG) += debugfs.o
+edac_core-$(CONFIG_EDAC_SCRUB) += scrub.o
+edac_core-$(CONFIG_EDAC_ECS) += ecs.o
+edac_core-$(CONFIG_EDAC_MEM_REPAIR) += mem_repair.o
ifdef CONFIG_PCI
edac_core-y += edac_pci.o edac_pci_sysfs.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8414ceb43e4a..90f0eb7cc5b9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ras.h>
+#include <linux/string_choices.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
#include <asm/amd_node.h>
@@ -1171,22 +1172,21 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
}
- edac_dbg(1, "All DIMMs support ECC:%s\n",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, "All DIMMs support ECC: %s\n", str_yes_no(dclr & BIT(19)));
edac_dbg(1, " PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ str_enabled_disabled(dclr & BIT(8)));
if (pvt->fam == 0x10)
edac_dbg(1, " DCT 128bit mode width: %s\n",
(dclr & BIT(11)) ? "128b" : "64b");
edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ str_yes_no(dclr & BIT(12)),
+ str_yes_no(dclr & BIT(13)),
+ str_yes_no(dclr & BIT(14)),
+ str_yes_no(dclr & BIT(15)));
}
#define CS_EVEN_PRIMARY BIT(0)
@@ -1353,14 +1353,14 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
- i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
- (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cap_hi & BIT(30)),
+ str_yes_no(umc->umc_cap_hi & BIT(31)));
edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
- i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cfg & BIT(12)));
edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(6)));
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(7)));
umc_debug_display_dimm_sizes(pvt, i);
}
@@ -1371,11 +1371,11 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
edac_dbg(1, " NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_DCT_DUAL));
edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_SECDED),
+ str_yes_no(pvt->nbcap & NBCAP_CHIPKILL));
debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
@@ -1398,7 +1398,7 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", str_yes_no(dhar_valid(pvt)));
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
@@ -2027,15 +2027,15 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
edac_dbg(0, " Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ str_yes_no(dct_high_range_enabled(pvt)));
edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ str_enabled_disabled(dct_data_intlv_enabled(pvt)),
+ str_yes_no(dct_memory_cleared(pvt)));
edac_dbg(0, " channel interleave: %s, "
"interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ str_enabled_disabled(dct_interleave_enabled(pvt)),
dct_sel_interleave_addr(pvt));
}
@@ -3208,8 +3208,7 @@ static bool nb_mce_bank_enabled_on_node(u16 nid)
nbe = reg->l & MSR_MCGCTL_NBE;
edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ cpu, reg->q, str_enabled_disabled(nbe));
if (!nbe)
goto out;
@@ -3353,12 +3352,9 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt)
edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, str_enabled_disabled(ecc_en));
- if (!ecc_en || !nb_mce_en)
- return false;
- else
- return true;
+ return ecc_en && nb_mce_en;
}
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
@@ -3378,7 +3374,7 @@ static bool umc_ecc_enabled(struct amd64_pvt *pvt)
}
}
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, str_enabled_disabled(ecc_en));
return ecc_en;
}
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 4804332d9946..8195fc9c9354 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/string_choices.h>
+
#include "edac_module.h"
static struct dentry *edac_debugfs;
@@ -22,7 +25,7 @@ static ssize_t edac_fake_inject_write(struct file *file,
"Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
errcount,
(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
- errcount > 1 ? "s" : "",
+ str_plural(errcount),
mci->fake_inject_layer[0],
mci->fake_inject_layer[1],
mci->fake_inject_layer[2]
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
new file mode 100755
index 000000000000..1d51838a60c1
--- /dev/null
+++ b/drivers/edac/ecs.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic ECS driver is designed to support control of on-die error
+ * check scrub (e.g., DDR5 ECS). The common sysfs ECS interface abstracts
+ * the control of various ECS functionalities into a unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+#define EDAC_ECS_FRU_NAME "ecs_fru"
+
+enum edac_ecs_attributes {
+ ECS_LOG_ENTRY_TYPE,
+ ECS_MODE,
+ ECS_RESET,
+ ECS_THRESHOLD,
+ ECS_MAX_ATTRS
+};
+
+struct edac_ecs_dev_attr {
+ struct device_attribute dev_attr;
+ int fru_id;
+};
+
+struct edac_ecs_fru_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_ecs_dev_attr dev_attr[ECS_MAX_ATTRS];
+ struct attribute *ecs_attrs[ECS_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+struct edac_ecs_context {
+ u16 num_media_frus;
+ struct edac_ecs_fru_context *fru_ctxs;
+};
+
+#define TO_ECS_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_ecs_dev_attr, dev_attr)
+
+#define EDAC_ECS_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_ECS_ATTR_SHOW(log_entry_type, get_log_entry_type, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(mode, get_mode, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(threshold, get_threshold, u32, "%u\n")
+
+#define EDAC_ECS_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_ECS_ATTR_STORE(log_entry_type, set_log_entry_type, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(mode, set_mode, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(reset, reset, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(threshold, set_threshold, unsigned long, kstrtoul)
+
+static umode_t ecs_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops;
+
+ switch (attr_id) {
+ case ECS_LOG_ENTRY_TYPE:
+ if (ops->get_log_entry_type) {
+ if (ops->set_log_entry_type)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_MODE:
+ if (ops->get_mode) {
+ if (ops->set_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_RESET:
+ if (ops->reset)
+ return a->mode;
+ break;
+ case ECS_THRESHOLD:
+ if (ops->get_threshold) {
+ if (ops->set_threshold)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_ECS_ATTR_RO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_WO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_RW(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .fru_id = _fru_id })
+
+static int ecs_create_desc(struct device *ecs_dev, const struct attribute_group **attr_groups,
+ u16 num_media_frus)
+{
+ struct edac_ecs_context *ecs_ctx;
+ u32 fru;
+
+ ecs_ctx = devm_kzalloc(ecs_dev, sizeof(*ecs_ctx), GFP_KERNEL);
+ if (!ecs_ctx)
+ return -ENOMEM;
+
+ ecs_ctx->num_media_frus = num_media_frus;
+ ecs_ctx->fru_ctxs = devm_kcalloc(ecs_dev, num_media_frus,
+ sizeof(*ecs_ctx->fru_ctxs),
+ GFP_KERNEL);
+ if (!ecs_ctx->fru_ctxs)
+ return -ENOMEM;
+
+ for (fru = 0; fru < num_media_frus; fru++) {
+ struct edac_ecs_fru_context *fru_ctx = &ecs_ctx->fru_ctxs[fru];
+ struct attribute_group *group = &fru_ctx->group;
+ int i;
+
+ fru_ctx->dev_attr[ECS_LOG_ENTRY_TYPE] = EDAC_ECS_ATTR_RW(log_entry_type, fru);
+ fru_ctx->dev_attr[ECS_MODE] = EDAC_ECS_ATTR_RW(mode, fru);
+ fru_ctx->dev_attr[ECS_RESET] = EDAC_ECS_ATTR_WO(reset, fru);
+ fru_ctx->dev_attr[ECS_THRESHOLD] = EDAC_ECS_ATTR_RW(threshold, fru);
+
+ for (i = 0; i < ECS_MAX_ATTRS; i++)
+ fru_ctx->ecs_attrs[i] = &fru_ctx->dev_attr[i].dev_attr.attr;
+
+ sprintf(fru_ctx->name, "%s%d", EDAC_ECS_FRU_NAME, fru);
+ group->name = fru_ctx->name;
+ group->attrs = fru_ctx->ecs_attrs;
+ group->is_visible = ecs_attr_visible;
+
+ attr_groups[fru] = group;
+ }
+
+ return 0;
+}
+
+/**
+ * edac_ecs_get_desc - get EDAC ECS descriptors
+ * @ecs_dev: client device, supports ECS feature
+ * @attr_groups: pointer to attribute group container
+ * @num_media_frus: number of media FRUs in the device
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups, u16 num_media_frus)
+{
+ if (!ecs_dev || !attr_groups || !num_media_frus)
+ return -EINVAL;
+
+ return ecs_create_desc(ecs_dev, attr_groups, num_media_frus);
+}
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 621dc2a5d034..0734909b08a4 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -570,3 +570,188 @@ void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
block ? block->name : "N/A", count, msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
+
+static void edac_dev_release(struct device *dev)
+{
+ struct edac_dev_feat_ctx *ctx = container_of(dev, struct edac_dev_feat_ctx, dev);
+
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+ kfree(ctx->dev.groups);
+ kfree(ctx);
+}
+
+static const struct device_type edac_dev_type = {
+ .name = "edac_dev",
+ .release = edac_dev_release,
+};
+
+static void edac_dev_unreg(void *data)
+{
+ device_unregister(data);
+}
+
+/**
+ * edac_dev_register - register device for RAS features with EDAC
+ * @parent: parent device.
+ * @name: name for the folder in the /sys/bus/edac/devices/,
+ * which is derived from the parent device.
+ * For e.g. /sys/bus/edac/devices/cxl_mem0/
+ * @private: parent driver's data to store in the context if any.
+ * @num_features: number of RAS features to register.
+ * @ras_features: list of RAS features to register.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ *
+ */
+int edac_dev_register(struct device *parent, char *name,
+ void *private, int num_features,
+ const struct edac_dev_feature *ras_features)
+{
+ const struct attribute_group **ras_attr_groups;
+ struct edac_dev_data *dev_data;
+ struct edac_dev_feat_ctx *ctx;
+ int mem_repair_cnt = 0;
+ int attr_gcnt = 0;
+ int ret = -ENOMEM;
+ int scrub_cnt = 0;
+ int feat;
+
+ if (!parent || !name || !num_features || !ras_features)
+ return -EINVAL;
+
+ /* Double parse to make space for attributes */
+ for (feat = 0; feat < num_features; feat++) {
+ switch (ras_features[feat].ft_type) {
+ case RAS_FEAT_SCRUB:
+ attr_gcnt++;
+ scrub_cnt++;
+ break;
+ case RAS_FEAT_ECS:
+ attr_gcnt += ras_features[feat].ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ attr_gcnt++;
+ mem_repair_cnt++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ras_attr_groups = kcalloc(attr_gcnt + 1, sizeof(*ras_attr_groups), GFP_KERNEL);
+ if (!ras_attr_groups)
+ goto ctx_free;
+
+ if (scrub_cnt) {
+ ctx->scrub = kcalloc(scrub_cnt, sizeof(*ctx->scrub), GFP_KERNEL);
+ if (!ctx->scrub)
+ goto groups_free;
+ }
+
+ if (mem_repair_cnt) {
+ ctx->mem_repair = kcalloc(mem_repair_cnt, sizeof(*ctx->mem_repair), GFP_KERNEL);
+ if (!ctx->mem_repair)
+ goto data_mem_free;
+ }
+
+ attr_gcnt = 0;
+ scrub_cnt = 0;
+ mem_repair_cnt = 0;
+ for (feat = 0; feat < num_features; feat++, ras_features++) {
+ switch (ras_features->ft_type) {
+ case RAS_FEAT_SCRUB:
+ if (!ras_features->scrub_ops || scrub_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->scrub[scrub_cnt];
+ dev_data->instance = scrub_cnt;
+ dev_data->scrub_ops = ras_features->scrub_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_scrub_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ scrub_cnt++;
+ attr_gcnt++;
+ break;
+ case RAS_FEAT_ECS:
+ if (!ras_features->ecs_ops) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->ecs;
+ dev_data->ecs_ops = ras_features->ecs_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_ecs_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->ecs_info.num_media_frus);
+ if (ret)
+ goto data_mem_free;
+
+ attr_gcnt += ras_features->ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ if (!ras_features->mem_repair_ops ||
+ mem_repair_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->mem_repair[mem_repair_cnt];
+ dev_data->instance = mem_repair_cnt;
+ dev_data->mem_repair_ops = ras_features->mem_repair_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_mem_repair_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ mem_repair_cnt++;
+ attr_gcnt++;
+ break;
+ default:
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+ }
+
+ ctx->dev.parent = parent;
+ ctx->dev.bus = edac_get_sysfs_subsys();
+ ctx->dev.type = &edac_dev_type;
+ ctx->dev.groups = ras_attr_groups;
+ ctx->private = private;
+ dev_set_drvdata(&ctx->dev, ctx);
+
+ ret = dev_set_name(&ctx->dev, "%s", name);
+ if (ret)
+ goto data_mem_free;
+
+ ret = device_register(&ctx->dev);
+ if (ret) {
+ put_device(&ctx->dev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, edac_dev_unreg, &ctx->dev);
+
+data_mem_free:
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+groups_free:
+ kfree(ras_attr_groups);
+ctx_free:
+ kfree(ctx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(edac_dev_register);
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index f45d849d3f15..355a977019e9 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -751,6 +751,8 @@ static int i10nm_get_ddr_munits(void)
continue;
} else {
d->imc[lmc].mdev = mdev;
+ if (res_cfg->type == SPR)
+ skx_set_mc_mapping(d, i, lmc);
lmc++;
}
}
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 49b4499269fb..b5cf25905b05 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -899,7 +900,7 @@ static void decode_mtr(int slot_row, u16 mtr)
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 61adaa872ba7..69068f8d0cad 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -620,7 +621,7 @@ static int decode_mtr(struct i7300_pvt *pvt,
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
@@ -871,9 +872,9 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
edac_dbg(0, "Error detection is %s\n",
- IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_ECC_ENABLED(pvt->mc_settings)));
edac_dbg(0, "Retry is %s\n",
- IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_RETRY_ENABLED(pvt->mc_settings)));
/* Get Memory Interleave Range registers */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4fc16922dc1a..204834149579 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -51,6 +51,7 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <asm/mce.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
@@ -84,44 +85,23 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9 0x3ec6
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
-/* Test if HB is for Skylake or later. */
-#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
- (((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_12) || \
- (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
- PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
-
-#define IE31200_DIMMS 4
-#define IE31200_RANKS 8
-#define IE31200_RANKS_PER_CHANNEL 4
+/* Raptor Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630
+
+#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
#define IE31200_CHANNELS 2
+#define IE31200_IMC_NUM 2
/* Intel IE31200 register addresses - device 0 function 0 - DRAM Controller */
#define IE31200_MCHBAR_LOW 0x48
#define IE31200_MCHBAR_HIGH 0x4c
-#define IE31200_MCHBAR_MASK GENMASK_ULL(38, 15)
-#define IE31200_MMR_WINDOW_SIZE BIT(15)
/*
* Error Status Register (16b)
*
- * 15 reserved
- * 14 Isochronous TBWRR Run Behind FIFO Full
- * (ITCV)
- * 13 Isochronous TBWRR Run Behind FIFO Put
- * (ITSTV)
- * 12 reserved
- * 11 MCH Thermal Sensor Event
- * for SMI/SCI/SERR (GTSE)
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 reserved
- * 7 DRAM Throttle Flag (DTF)
- * 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
@@ -130,68 +110,60 @@
#define IE31200_ERRSTS_CE BIT(0)
#define IE31200_ERRSTS_BITS (IE31200_ERRSTS_UE | IE31200_ERRSTS_CE)
-/*
- * Channel 0 ECC Error Log (64b)
- *
- * 63:48 Error Column Address (ERRCOL)
- * 47:32 Error Row Address (ERRROW)
- * 31:29 Error Bank Address (ERRBANK)
- * 28:27 Error Rank Address (ERRRANK)
- * 26:24 reserved
- * 23:16 Error Syndrome (ERRSYND)
- * 15: 2 reserved
- * 1 Multiple Bit Error Status (MERRSTS)
- * 0 Correctable Error Status (CERRSTS)
- */
-
-#define IE31200_C0ECCERRLOG 0x40c8
-#define IE31200_C1ECCERRLOG 0x44c8
-#define IE31200_C0ECCERRLOG_SKL 0x4048
-#define IE31200_C1ECCERRLOG_SKL 0x4448
-#define IE31200_ECCERRLOG_CE BIT(0)
-#define IE31200_ECCERRLOG_UE BIT(1)
-#define IE31200_ECCERRLOG_RANK_BITS GENMASK_ULL(28, 27)
-#define IE31200_ECCERRLOG_RANK_SHIFT 27
-#define IE31200_ECCERRLOG_SYNDROME_BITS GENMASK_ULL(23, 16)
-#define IE31200_ECCERRLOG_SYNDROME_SHIFT 16
-
-#define IE31200_ECCERRLOG_SYNDROME(log) \
- ((log & IE31200_ECCERRLOG_SYNDROME_BITS) >> \
- IE31200_ECCERRLOG_SYNDROME_SHIFT)
-
#define IE31200_CAPID0 0xe4
#define IE31200_CAPID0_PDCD BIT(4)
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-#define IE31200_MAD_DIMM_0_OFFSET 0x5004
-#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
-#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
-#define IE31200_MAD_DIMM_A_RANK BIT(17)
-#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
-#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
-#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT 10
-#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
-#define IE31200_MAD_DIMM_A_WIDTH_SHIFT 19
-#define IE31200_MAD_DIMM_A_WIDTH_SKL GENMASK_ULL(9, 8)
-#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT 8
-
-/* Skylake reports 1GB increments, everything else is 256MB */
-#define IE31200_PAGES(n, skl) \
- (n << (28 + (2 * skl) - PAGE_SHIFT))
+/* Non-constant mask variant of FIELD_GET() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
+struct res_config {
+ enum mem_type mtype;
+ bool cmci;
+ int imc_num;
+ /* Host MMIO configuration register */
+ u64 reg_mchbar_mask;
+ u64 reg_mchbar_window_size;
+ /* ECC error log register */
+ u64 reg_eccerrlog_offset[IE31200_CHANNELS];
+ u64 reg_eccerrlog_ce_mask;
+ u64 reg_eccerrlog_ce_ovfl_mask;
+ u64 reg_eccerrlog_ue_mask;
+ u64 reg_eccerrlog_ue_ovfl_mask;
+ u64 reg_eccerrlog_rank_mask;
+ u64 reg_eccerrlog_syndrome_mask;
+ /* MSR to clear ECC error log register */
+ u32 msr_clear_eccerrlog_offset;
+ /* DIMM characteristics register */
+ u64 reg_mad_dimm_size_granularity;
+ u64 reg_mad_dimm_offset[IE31200_CHANNELS];
+ u32 reg_mad_dimm_size_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_rank_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_width_mask[IE31200_DIMMS_PER_CHANNEL];
+};
+
struct ie31200_priv {
void __iomem *window;
void __iomem *c0errlog;
void __iomem *c1errlog;
+ struct res_config *cfg;
+ struct mem_ctl_info *mci;
+ struct pci_dev *pdev;
+ struct device dev;
};
+static struct ie31200_pvt {
+ struct ie31200_priv *priv[IE31200_IMC_NUM];
+} ie31200_pvt;
+
enum ie31200_chips {
IE31200 = 0,
+ IE31200_1 = 1,
};
struct ie31200_dev_info {
@@ -202,18 +174,22 @@ struct ie31200_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[IE31200_CHANNELS];
+ u64 erraddr;
};
static const struct ie31200_dev_info ie31200_devs[] = {
[IE31200] = {
.ctl_name = "IE31200"
},
+ [IE31200_1] = {
+ .ctl_name = "IE31200_1"
+ },
};
struct dimm_data {
- u8 size; /* in multiples of 256MB, except Skylake is 1GB */
- u8 dual_rank : 1,
- x16_width : 2; /* 0 means x8 width */
+ u64 size; /* in bytes */
+ u8 ranks;
+ enum dev_type dtype;
};
static int how_many_channels(struct pci_dev *pdev)
@@ -251,29 +227,54 @@ static bool ecc_capable(struct pci_dev *pdev)
return true;
}
-static int eccerrlog_row(u64 log)
-{
- return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
- IE31200_ECCERRLOG_RANK_SHIFT);
-}
+#define mci_to_pci_dev(mci) (((struct ie31200_priv *)(mci)->pvt_info)->pdev)
static void ie31200_clear_error_info(struct mem_ctl_info *mci)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
+
+ /*
+ * The PCI ERRSTS register is deprecated. Write the MSR to clear
+ * the ECC error log registers in all memory controllers.
+ */
+ if (cfg->msr_clear_eccerrlog_offset) {
+ if (wrmsr_safe(cfg->msr_clear_eccerrlog_offset,
+ cfg->reg_eccerrlog_ce_mask |
+ cfg->reg_eccerrlog_ce_ovfl_mask |
+ cfg->reg_eccerrlog_ue_mask |
+ cfg->reg_eccerrlog_ue_ovfl_mask, 0) < 0)
+ ie31200_printk(KERN_ERR, "Failed to wrmsr.\n");
+
+ return;
+ }
+
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
- pci_write_bits16(to_pci_dev(mci->pdev), IE31200_ERRSTS,
+ pci_write_bits16(mci_to_pci_dev(mci), IE31200_ERRSTS,
IE31200_ERRSTS_BITS, IE31200_ERRSTS_BITS);
}
static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = mci_to_pci_dev(mci);
struct ie31200_priv *priv = mci->pvt_info;
- pdev = to_pci_dev(mci->pdev);
+ /*
+ * The PCI ERRSTS register is deprecated, directly read the
+ * MMIO-mapped ECC error log registers.
+ */
+ if (priv->cfg->msr_clear_eccerrlog_offset) {
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
+
+ ie31200_clear_error_info(mci);
+ return;
+ }
/*
* This is a mess because there is no atomic way to read all the
@@ -309,46 +310,56 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
static void ie31200_process_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
int channel;
u64 log;
- if (!(info->errsts & IE31200_ERRSTS_BITS))
- return;
+ if (!cfg->msr_clear_eccerrlog_offset) {
+ if (!(info->errsts & IE31200_ERRSTS_BITS))
+ return;
- if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
- -1, -1, -1, "UE overwrote CE", "");
- info->errsts = info->errsts2;
+ if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "");
+ info->errsts = info->errsts2;
+ }
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
- if (log & IE31200_ECCERRLOG_UE) {
+ if (log & cfg->reg_eccerrlog_ue_mask) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- 0, 0, 0,
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0, 0,
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 UE", "");
- } else if (log & IE31200_ECCERRLOG_CE) {
+ } else if (log & cfg->reg_eccerrlog_ce_mask) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- 0, 0,
- IE31200_ECCERRLOG_SYNDROME(log),
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0,
+ field_get(cfg->reg_eccerrlog_syndrome_mask, log),
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 CE", "");
}
}
}
-static void ie31200_check(struct mem_ctl_info *mci)
+static void __ie31200_check(struct mem_ctl_info *mci, struct mce *mce)
{
struct ie31200_error_info info;
+ info.erraddr = mce ? mce->addr : 0;
ie31200_get_and_clear_error_info(mci, &info);
ie31200_process_error_info(mci, &info);
}
-static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
+static void ie31200_check(struct mem_ctl_info *mci)
+{
+ __ie31200_check(mci, NULL);
+}
+
+static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
union {
u64 mchbar;
@@ -361,7 +372,8 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
pci_read_config_dword(pdev, IE31200_MCHBAR_LOW, &u.mchbar_low);
pci_read_config_dword(pdev, IE31200_MCHBAR_HIGH, &u.mchbar_high);
- u.mchbar &= IE31200_MCHBAR_MASK;
+ u.mchbar &= cfg->reg_mchbar_mask;
+ u.mchbar += cfg->reg_mchbar_window_size * mc;
if (u.mchbar != (resource_size_t)u.mchbar) {
ie31200_printk(KERN_ERR, "mmio space beyond accessible range (0x%llx)\n",
@@ -369,7 +381,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, cfg->reg_mchbar_window_size);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
@@ -377,155 +389,108 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return window;
}
-static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int dimm,
+ struct res_config *cfg)
{
- dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
- dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
- (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+ dd->size = field_get(cfg->reg_mad_dimm_size_mask[dimm], addr_decode) * cfg->reg_mad_dimm_size_granularity;
+ dd->ranks = field_get(cfg->reg_mad_dimm_rank_mask[dimm], addr_decode) + 1;
+ dd->dtype = field_get(cfg->reg_mad_dimm_width_mask[dimm], addr_decode) + DEV_X8;
}
-static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void ie31200_get_dimm_config(struct mem_ctl_info *mci, void __iomem *window,
+ struct res_config *cfg, int mc)
{
- dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
- dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
-}
+ struct dimm_data dimm_info;
+ struct dimm_info *dimm;
+ unsigned long nr_pages;
+ u32 addr_decode;
+ int i, j, k;
-static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
- bool skl)
-{
- if (skl)
- __skl_populate_dimm_info(dd, addr_decode, chan);
- else
- __populate_dimm_info(dd, addr_decode, chan);
-}
+ for (i = 0; i < IE31200_CHANNELS; i++) {
+ addr_decode = readl(window + cfg->reg_mad_dimm_offset[i]);
+ edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
+ for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
+ populate_dimm_info(&dimm_info, addr_decode, j, cfg);
+ edac_dbg(0, "mc: %d, channel: %d, dimm: %d, size: %lld MiB, ranks: %d, DRAM chip type: %d\n",
+ mc, i, j, dimm_info.size >> 20,
+ dimm_info.ranks,
+ dimm_info.dtype);
-static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ nr_pages = MiB_TO_PAGES(dimm_info.size >> 20);
+ if (nr_pages == 0)
+ continue;
+
+ nr_pages = nr_pages / dimm_info.ranks;
+ for (k = 0; k < dimm_info.ranks; k++) {
+ dimm = edac_get_dimm(mci, (j * dimm_info.ranks) + k, i, 0);
+ dimm->nr_pages = nr_pages;
+ edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+ dimm->grain = 8; /* just a guess */
+ dimm->mtype = cfg->mtype;
+ dimm->dtype = dimm_info.dtype;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
+ }
+ }
+}
+
+static int ie31200_register_mci(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
- int i, j, ret;
- struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
- void __iomem *window;
struct ie31200_priv *priv;
- u32 addr_decode, mad_offset;
-
- /*
- * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
- * this logic when adding new CPU support.
- */
- bool skl = DEVICE_ID_SKYLAKE_OR_LATER(pdev->device);
-
- edac_dbg(0, "MC:\n");
-
- if (!ecc_capable(pdev)) {
- ie31200_printk(KERN_INFO, "No ECC support\n");
- return -ENODEV;
- }
+ struct mem_ctl_info *mci;
+ void __iomem *window;
+ int ret;
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = IE31200_DIMMS;
+ layers[0].size = IE31200_RANKS_PER_CHANNEL;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers,
sizeof(struct ie31200_priv));
if (!mci)
return -ENOMEM;
- window = ie31200_map_mchbar(pdev);
+ window = ie31200_map_mchbar(pdev, cfg, mc);
if (!window) {
ret = -ENODEV;
goto fail_free;
}
edac_dbg(3, "MC: init mci\n");
- mci->pdev = &pdev->dev;
- if (skl)
- mci->mtype_cap = MEM_FLAG_DDR4;
- else
- mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->mtype_cap = BIT(cfg->mtype);
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
+ mci->ctl_name = ie31200_devs[mc].ctl_name;
mci->dev_name = pci_name(pdev);
- mci->edac_check = ie31200_check;
+ mci->edac_check = cfg->cmci ? NULL : ie31200_check;
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
- if (skl) {
- priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
- priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
- } else {
- priv->c0errlog = window + IE31200_C0ECCERRLOG;
- priv->c1errlog = window + IE31200_C1ECCERRLOG;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET;
- }
-
- /* populate DIMM info */
- for (i = 0; i < IE31200_CHANNELS; i++) {
- addr_decode = readl(window + mad_offset +
- (i * 4));
- edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
- for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
- populate_dimm_info(&dimm_info[i][j], addr_decode, j,
- skl);
- edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
- dimm_info[i][j].size,
- dimm_info[i][j].dual_rank,
- dimm_info[i][j].x16_width);
- }
- }
-
+ priv->c0errlog = window + cfg->reg_eccerrlog_offset[0];
+ priv->c1errlog = window + cfg->reg_eccerrlog_offset[1];
+ priv->cfg = cfg;
+ priv->mci = mci;
+ priv->pdev = pdev;
+ device_initialize(&priv->dev);
/*
- * The dram rank boundary (DRB) reg values are boundary addresses
- * for each DRAM rank with a granularity of 64MB. DRB regs are
- * cumulative; the last one will contain the total memory
- * contained in all ranks.
+ * The EDAC core uses mci->pdev (pointer to the structure device)
+ * as the memory controller ID. The SoCs attach one or more memory
+ * controllers to a single pci_dev (a single pci_dev->dev can
+ * correspond to multiple memory controllers).
+ *
+ * To make mci->pdev unique, assign pci_dev->dev to mci->pdev
+ * for the first memory controller and assign a unique priv->dev
+ * to mci->pdev for each additional memory controller.
*/
- for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
- for (j = 0; j < IE31200_CHANNELS; j++) {
- struct dimm_info *dimm;
- unsigned long nr_pages;
-
- nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
- if (nr_pages == 0)
- continue;
-
- if (dimm_info[j][i].dual_rank) {
- nr_pages = nr_pages / 2;
- dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* just a guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- dimm = edac_get_dimm(mci, i * 2, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* same guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- }
+ mci->pdev = mc ? &priv->dev : &pdev->dev;
+ ie31200_get_dimm_config(mci, window, cfg, mc);
ie31200_clear_error_info(mci);
if (edac_mc_add_mc(mci)) {
@@ -534,16 +499,115 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
goto fail_unmap;
}
- /* get this far and it's successful */
- edac_dbg(3, "MC: success\n");
+ ie31200_pvt.priv[mc] = priv;
return 0;
-
fail_unmap:
iounmap(window);
-
fail_free:
edac_mc_free(mci);
+ return ret;
+}
+
+static void mce_check(struct mce *mce)
+{
+ struct ie31200_priv *priv;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+
+ __ie31200_check(priv->mci, mce);
+ }
+}
+
+static int mce_handler(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ char *type;
+
+ if (mce->kflags & MCE_HANDLED_CEC)
+ return NOTIFY_DONE;
+
+ /*
+ * Ignore unless this is a memory related error.
+ * Don't check MCI_STATUS_ADDRV since it's not set on some CPUs.
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ type = mce->mcgstatus & MCG_STATUS_MCIP ? "Exception" : "Event";
+
+ edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
+ mce->extcpu, type, mce->mcgstatus,
+ mce->bank, mce->status);
+ edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
+ edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
+ edac_dbg(0, "MISC 0x%llx\n", mce->misc);
+ edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+
+ mce_check(mce);
+ mce->kflags |= MCE_HANDLED_EDAC;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ie31200_mce_dec = {
+ .notifier_call = mce_handler,
+ .priority = MCE_PRIO_EDAC,
+};
+
+static void ie31200_unregister_mcis(void)
+{
+ struct ie31200_priv *priv;
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+ mci = priv->mci;
+ edac_mc_del_mc(mci->pdev);
+ iounmap(priv->window);
+ edac_mc_free(mci);
+ }
+}
+
+static int ie31200_probe1(struct pci_dev *pdev, struct res_config *cfg)
+{
+ int i, ret;
+
+ edac_dbg(0, "MC:\n");
+
+ if (!ecc_capable(pdev)) {
+ ie31200_printk(KERN_INFO, "No ECC support\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cfg->imc_num; i++) {
+ ret = ie31200_register_mci(pdev, cfg, i);
+ if (ret)
+ goto fail_register;
+ }
+
+ if (cfg->cmci) {
+ mce_register_decode_chain(&ie31200_mce_dec);
+ edac_op_state = EDAC_OPSTATE_INT;
+ } else {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+
+ /* get this far and it's successful. */
+ edac_dbg(3, "MC: success\n");
+ return 0;
+
+fail_register:
+ ie31200_unregister_mcis();
return ret;
}
@@ -555,7 +619,7 @@ static int ie31200_init_one(struct pci_dev *pdev,
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
- rc = ie31200_probe1(pdev, ent->driver_data);
+ rc = ie31200_probe1(pdev, (struct res_config *)ent->driver_data);
if (rc == 0 && !mci_pdev)
mci_pdev = pci_dev_get(pdev);
@@ -564,43 +628,112 @@ static int ie31200_init_one(struct pci_dev *pdev,
static void ie31200_remove_one(struct pci_dev *pdev)
{
- struct mem_ctl_info *mci;
- struct ie31200_priv *priv;
+ struct ie31200_priv *priv = ie31200_pvt.priv[0];
edac_dbg(0, "\n");
pci_dev_put(mci_pdev);
mci_pdev = NULL;
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
- priv = mci->pvt_info;
- iounmap(priv->window);
- edac_mc_free(mci);
+ if (priv->cfg->cmci)
+ mce_unregister_decode_chain(&ie31200_mce_dec);
+ ie31200_unregister_mcis();
}
+static struct res_config snb_cfg = {
+ .mtype = MEM_DDR3,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x40c8,
+ .reg_eccerrlog_offset[1] = 0x44c8,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(28),
+ .reg_mad_dimm_offset[0] = 0x5004,
+ .reg_mad_dimm_offset[1] = 0x5008,
+ .reg_mad_dimm_size_mask[0] = GENMASK(7, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(15, 8),
+ .reg_mad_dimm_rank_mask[0] = BIT(17),
+ .reg_mad_dimm_rank_mask[1] = BIT(18),
+ .reg_mad_dimm_width_mask[0] = BIT(19),
+ .reg_mad_dimm_width_mask[1] = BIT(20),
+};
+
+static struct res_config skl_cfg = {
+ .mtype = MEM_DDR4,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x4048,
+ .reg_eccerrlog_offset[1] = 0x4448,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(30),
+ .reg_mad_dimm_offset[0] = 0x500c,
+ .reg_mad_dimm_offset[1] = 0x5010,
+ .reg_mad_dimm_size_mask[0] = GENMASK(5, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(21, 16),
+ .reg_mad_dimm_rank_mask[0] = BIT(10),
+ .reg_mad_dimm_rank_mask[1] = BIT(26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(9, 8),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
+struct res_config rpl_s_cfg = {
+ .mtype = MEM_DDR5,
+ .cmci = true,
+ .imc_num = 2,
+ .reg_mchbar_mask = GENMASK_ULL(41, 17),
+ .reg_mchbar_window_size = BIT_ULL(16),
+ .reg_eccerrlog_offset[0] = 0xe048,
+ .reg_eccerrlog_offset[1] = 0xe848,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ce_ovfl_mask = BIT_ULL(1),
+ .reg_eccerrlog_ue_mask = BIT_ULL(2),
+ .reg_eccerrlog_ue_ovfl_mask = BIT_ULL(3),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .msr_clear_eccerrlog_offset = 0x791,
+ .reg_mad_dimm_offset[0] = 0xd80c,
+ .reg_mad_dimm_offset[1] = 0xd810,
+ .reg_mad_dimm_size_granularity = BIT_ULL(29),
+ .reg_mad_dimm_size_mask[0] = GENMASK(6, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(22, 16),
+ .reg_mad_dimm_rank_mask[0] = GENMASK(10, 9),
+ .reg_mad_dimm_rank_mask[1] = GENMASK(27, 26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(8, 7),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
static const struct pci_device_id ie31200_pci_tbl[] = {
- { PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_12), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_1), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_2), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_3), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_4), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_5), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_6), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_7), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_11), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_12), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_1), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_2), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_3), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_4), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_5), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_6), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_7), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
@@ -617,12 +750,10 @@ static int __init ie31200_init(void)
int pci_rc, i;
edac_dbg(3, "MC:\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
pci_rc = pci_register_driver(&ie31200_driver);
if (pci_rc < 0)
- goto fail0;
+ return pci_rc;
if (!mci_pdev) {
ie31200_registered = 0;
@@ -633,11 +764,13 @@ static int __init ie31200_init(void)
if (mci_pdev)
break;
}
+
if (!mci_pdev) {
edac_dbg(0, "ie31200 pci_get_device fail\n");
pci_rc = -ENODEV;
- goto fail1;
+ goto fail0;
}
+
pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
if (pci_rc < 0) {
edac_dbg(0, "ie31200 init fail\n");
@@ -645,12 +778,12 @@ static int __init ie31200_init(void)
goto fail1;
}
}
- return 0;
+ return 0;
fail1:
- pci_unregister_driver(&ie31200_driver);
-fail0:
pci_dev_put(mci_pdev);
+fail0:
+ pci_unregister_driver(&ie31200_driver);
return pci_rc;
}
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index fdf3a84fe698..5807517ee32d 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -125,7 +125,7 @@
#define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
-static struct res_config {
+static const struct res_config {
bool machine_check;
int num_imc;
u32 imc_base;
@@ -472,7 +472,7 @@ static u64 rpl_p_err_addr(u64 ecclog)
return ECC_ERROR_LOG_ADDR45(ecclog);
}
-static struct res_config ehl_cfg = {
+static const struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xdc00,
@@ -482,7 +482,7 @@ static struct res_config ehl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static struct res_config icl_cfg = {
+static const struct res_config icl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xd800,
@@ -492,7 +492,7 @@ static struct res_config icl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static struct res_config tgl_cfg = {
+static const struct res_config tgl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0x5000,
@@ -506,7 +506,7 @@ static struct res_config tgl_cfg = {
.err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
};
-static struct res_config adl_cfg = {
+static const struct res_config adl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -517,7 +517,7 @@ static struct res_config adl_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config adl_n_cfg = {
+static const struct res_config adl_n_cfg = {
.machine_check = true,
.num_imc = 1,
.imc_base = 0xd800,
@@ -528,7 +528,7 @@ static struct res_config adl_n_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config rpl_p_cfg = {
+static const struct res_config rpl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -540,7 +540,7 @@ static struct res_config rpl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config mtl_ps_cfg = {
+static const struct res_config mtl_ps_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -551,7 +551,7 @@ static struct res_config mtl_ps_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config mtl_p_cfg = {
+static const struct res_config mtl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -785,13 +785,22 @@ static u64 ecclog_read_and_clear(struct igen6_imc *imc)
{
u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
- if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) {
- /* Clear CE/UE bits by writing 1s */
- writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
- return ecclog;
- }
+ /*
+ * Quirk: The ECC_ERROR_LOG register of certain SoCs may contain
+ * the invalid value ~0. This will result in a flood of invalid
+ * error reports in polling mode. Skip it.
+ */
+ if (ecclog == ~0)
+ return 0;
- return 0;
+ /* Neither a CE nor a UE. Skip it.*/
+ if (!(ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)))
+ return 0;
+
+ /* Clear CE/UE bits by writing 1s */
+ writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
+
+ return ecclog;
}
static void errsts_clear(struct igen6_imc *imc)
@@ -1374,7 +1383,7 @@ static void unregister_err_handler(void)
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
}
-static void opstate_set(struct res_config *cfg, const struct pci_device_id *ent)
+static void opstate_set(const struct res_config *cfg, const struct pci_device_id *ent)
{
/*
* Quirk: Certain SoCs' error reporting interrupts don't work.
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
new file mode 100755
index 000000000000..3b1a845457b0
--- /dev/null
+++ b/drivers/edac/mem_repair.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC memory repair driver is designed to control the memory
+ * devices with memory repair features, such as Post Package Repair (PPR),
+ * memory sparing etc. The common sysfs memory repair interface abstracts
+ * the control of various arbitrary memory repair functionalities into a
+ * unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_mem_repair_attributes {
+ MR_TYPE,
+ MR_PERSIST_MODE,
+ MR_SAFE_IN_USE,
+ MR_HPA,
+ MR_MIN_HPA,
+ MR_MAX_HPA,
+ MR_DPA,
+ MR_MIN_DPA,
+ MR_MAX_DPA,
+ MR_NIBBLE_MASK,
+ MR_BANK_GROUP,
+ MR_BANK,
+ MR_RANK,
+ MR_ROW,
+ MR_COLUMN,
+ MR_CHANNEL,
+ MR_SUB_CHANNEL,
+ MEM_DO_REPAIR,
+ MR_MAX_ATTRS
+};
+
+struct edac_mem_repair_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_mem_repair_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_mem_repair_dev_attr mem_repair_dev_attr[MR_MAX_ATTRS];
+ struct attribute *mem_repair_attrs[MR_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_MR_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
+
+#define MR_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+MR_ATTR_SHOW(repair_type, get_repair_type, const char *, "%s\n")
+MR_ATTR_SHOW(persist_mode, get_persist_mode, bool, "%u\n")
+MR_ATTR_SHOW(repair_safe_when_in_use, get_repair_safe_when_in_use, bool, "%u\n")
+MR_ATTR_SHOW(hpa, get_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_hpa, get_min_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_hpa, get_max_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(dpa, get_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_dpa, get_min_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_dpa, get_max_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(nibble_mask, get_nibble_mask, u32, "0x%x\n")
+MR_ATTR_SHOW(bank_group, get_bank_group, u32, "%u\n")
+MR_ATTR_SHOW(bank, get_bank, u32, "%u\n")
+MR_ATTR_SHOW(rank, get_rank, u32, "%u\n")
+MR_ATTR_SHOW(row, get_row, u32, "0x%x\n")
+MR_ATTR_SHOW(column, get_column, u32, "%u\n")
+MR_ATTR_SHOW(channel, get_channel, u32, "%u\n")
+MR_ATTR_SHOW(sub_channel, get_sub_channel, u32, "%u\n")
+
+#define MR_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_ATTR_STORE(persist_mode, set_persist_mode, unsigned long, kstrtoul)
+MR_ATTR_STORE(hpa, set_hpa, u64, kstrtou64)
+MR_ATTR_STORE(dpa, set_dpa, u64, kstrtou64)
+MR_ATTR_STORE(nibble_mask, set_nibble_mask, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank_group, set_bank_group, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank, set_bank, unsigned long, kstrtoul)
+MR_ATTR_STORE(rank, set_rank, unsigned long, kstrtoul)
+MR_ATTR_STORE(row, set_row, unsigned long, kstrtoul)
+MR_ATTR_STORE(column, set_column, unsigned long, kstrtoul)
+MR_ATTR_STORE(channel, set_channel, unsigned long, kstrtoul)
+MR_ATTR_STORE(sub_channel, set_sub_channel, unsigned long, kstrtoul)
+
+#define MR_DO_OP(attrib, cb) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops; \
+ unsigned long data; \
+ int ret; \
+ \
+ ret = kstrtoul(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_DO_OP(repair, do_repair)
+
+static umode_t mem_repair_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ u8 inst = TO_MR_DEV_ATTR(dev_attr)->instance;
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops;
+
+ switch (attr_id) {
+ case MR_TYPE:
+ if (ops->get_repair_type)
+ return a->mode;
+ break;
+ case MR_PERSIST_MODE:
+ if (ops->get_persist_mode) {
+ if (ops->set_persist_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SAFE_IN_USE:
+ if (ops->get_repair_safe_when_in_use)
+ return a->mode;
+ break;
+ case MR_HPA:
+ if (ops->get_hpa) {
+ if (ops->set_hpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_HPA:
+ if (ops->get_min_hpa)
+ return a->mode;
+ break;
+ case MR_MAX_HPA:
+ if (ops->get_max_hpa)
+ return a->mode;
+ break;
+ case MR_DPA:
+ if (ops->get_dpa) {
+ if (ops->set_dpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_DPA:
+ if (ops->get_min_dpa)
+ return a->mode;
+ break;
+ case MR_MAX_DPA:
+ if (ops->get_max_dpa)
+ return a->mode;
+ break;
+ case MR_NIBBLE_MASK:
+ if (ops->get_nibble_mask) {
+ if (ops->set_nibble_mask)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK_GROUP:
+ if (ops->get_bank_group) {
+ if (ops->set_bank_group)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK:
+ if (ops->get_bank) {
+ if (ops->set_bank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_RANK:
+ if (ops->get_rank) {
+ if (ops->set_rank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_ROW:
+ if (ops->get_row) {
+ if (ops->set_row)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_COLUMN:
+ if (ops->get_column) {
+ if (ops->set_column)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_CHANNEL:
+ if (ops->get_channel) {
+ if (ops->set_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SUB_CHANNEL:
+ if (ops->get_sub_channel) {
+ if (ops->set_sub_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MEM_DO_REPAIR:
+ if (ops->do_repair)
+ return a->mode;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define MR_ATTR_RO(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define MR_ATTR_WO(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define MR_ATTR_RW(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int mem_repair_create_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{
+ struct edac_mem_repair_context *ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_mem_repair_dev_attr dev_attr[] = {
+ [MR_TYPE] = MR_ATTR_RO(repair_type, instance),
+ [MR_PERSIST_MODE] = MR_ATTR_RW(persist_mode, instance),
+ [MR_SAFE_IN_USE] = MR_ATTR_RO(repair_safe_when_in_use, instance),
+ [MR_HPA] = MR_ATTR_RW(hpa, instance),
+ [MR_MIN_HPA] = MR_ATTR_RO(min_hpa, instance),
+ [MR_MAX_HPA] = MR_ATTR_RO(max_hpa, instance),
+ [MR_DPA] = MR_ATTR_RW(dpa, instance),
+ [MR_MIN_DPA] = MR_ATTR_RO(min_dpa, instance),
+ [MR_MAX_DPA] = MR_ATTR_RO(max_dpa, instance),
+ [MR_NIBBLE_MASK] = MR_ATTR_RW(nibble_mask, instance),
+ [MR_BANK_GROUP] = MR_ATTR_RW(bank_group, instance),
+ [MR_BANK] = MR_ATTR_RW(bank, instance),
+ [MR_RANK] = MR_ATTR_RW(rank, instance),
+ [MR_ROW] = MR_ATTR_RW(row, instance),
+ [MR_COLUMN] = MR_ATTR_RW(column, instance),
+ [MR_CHANNEL] = MR_ATTR_RW(channel, instance),
+ [MR_SUB_CHANNEL] = MR_ATTR_RW(sub_channel, instance),
+ [MEM_DO_REPAIR] = MR_ATTR_WO(repair, instance)
+ };
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < MR_MAX_ATTRS; i++) {
+ memcpy(&ctx->mem_repair_dev_attr[i],
+ &dev_attr[i], sizeof(dev_attr[i]));
+ ctx->mem_repair_attrs[i] =
+ &ctx->mem_repair_dev_attr[i].dev_attr.attr;
+ }
+
+ sprintf(ctx->name, "%s%d", "mem_repair", instance);
+ group = &ctx->group;
+ group->name = ctx->name;
+ group->attrs = ctx->mem_repair_attrs;
+ group->is_visible = mem_repair_attr_visible;
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_mem_repair_get_desc - get EDAC memory repair descriptors
+ * @dev: client device with memory repair feature
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's memory repair instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!dev || !attr_groups)
+ return -EINVAL;
+
+ return mem_repair_create_desc(dev, attr_groups, instance);
+}
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index f93f2f2b1cf2..af14c8a3279f 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -372,7 +372,7 @@ static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
struct b_cr_asym_mem_region1_mchbar *as1,
struct b_cr_asym_2way_mem_region_mchbar *as2way)
{
- const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
+ static const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
int mask = 0;
if (as2way->asym_2way_interleave_enable)
@@ -489,7 +489,7 @@ static int dnv_get_registers(void)
*/
static int get_registers(void)
{
- const int intlv[] = { 10, 11, 12, 12 };
+ static const int intlv[] = { 10, 11, 12, 12 };
if (RD_REG(&tolud, b_cr_tolud_pci) ||
RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
new file mode 100755
index 000000000000..e421d3ebd959
--- /dev/null
+++ b/drivers/edac/scrub.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC scrub driver controls the memory scrubbers in the
+ * system. The common sysfs scrub interface abstracts the control of
+ * various arbitrary scrubbing functionalities into a unified set of
+ * functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_scrub_attributes {
+ SCRUB_ADDRESS,
+ SCRUB_SIZE,
+ SCRUB_ENABLE_BACKGROUND,
+ SCRUB_MIN_CYCLE_DURATION,
+ SCRUB_MAX_CYCLE_DURATION,
+ SCRUB_CUR_CYCLE_DURATION,
+ SCRUB_MAX_ATTRS
+};
+
+struct edac_scrub_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_scrub_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_scrub_dev_attr scrub_dev_attr[SCRUB_MAX_ATTRS];
+ struct attribute *scrub_attrs[SCRUB_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_SCRUB_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_scrub_dev_attr, dev_attr)
+
+#define EDAC_SCRUB_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_SCRUB_ATTR_SHOW(addr, read_addr, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(size, read_size, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(enable_background, get_enabled_bg, bool, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(min_cycle_duration, get_min_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(max_cycle_duration, get_max_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(current_cycle_duration, get_cycle_duration, u32, "%u\n")
+
+#define EDAC_SCRUB_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_SCRUB_ATTR_STORE(addr, write_addr, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(size, write_size, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(enable_background, set_enabled_bg, unsigned long, kstrtoul)
+EDAC_SCRUB_ATTR_STORE(current_cycle_duration, set_cycle_duration, unsigned long, kstrtoul)
+
+static umode_t scrub_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ u8 inst = TO_SCRUB_DEV_ATTR(dev_attr)->instance;
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops;
+
+ switch (attr_id) {
+ case SCRUB_ADDRESS:
+ if (ops->read_addr) {
+ if (ops->write_addr)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_SIZE:
+ if (ops->read_size) {
+ if (ops->write_size)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_ENABLE_BACKGROUND:
+ if (ops->get_enabled_bg) {
+ if (ops->set_enabled_bg)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_MIN_CYCLE_DURATION:
+ if (ops->get_min_cycle)
+ return a->mode;
+ break;
+ case SCRUB_MAX_CYCLE_DURATION:
+ if (ops->get_max_cycle)
+ return a->mode;
+ break;
+ case SCRUB_CUR_CYCLE_DURATION:
+ if (ops->get_cycle_duration) {
+ if (ops->set_cycle_duration)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_SCRUB_ATTR_RO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_WO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_RW(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int scrub_create_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ struct edac_scrub_context *scrub_ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_scrub_dev_attr dev_attr[] = {
+ [SCRUB_ADDRESS] = EDAC_SCRUB_ATTR_RW(addr, instance),
+ [SCRUB_SIZE] = EDAC_SCRUB_ATTR_RW(size, instance),
+ [SCRUB_ENABLE_BACKGROUND] = EDAC_SCRUB_ATTR_RW(enable_background, instance),
+ [SCRUB_MIN_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(min_cycle_duration, instance),
+ [SCRUB_MAX_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(max_cycle_duration, instance),
+ [SCRUB_CUR_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RW(current_cycle_duration, instance)
+ };
+
+ scrub_ctx = devm_kzalloc(scrub_dev, sizeof(*scrub_ctx), GFP_KERNEL);
+ if (!scrub_ctx)
+ return -ENOMEM;
+
+ group = &scrub_ctx->group;
+ for (i = 0; i < SCRUB_MAX_ATTRS; i++) {
+ memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i]));
+ scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr;
+ }
+ sprintf(scrub_ctx->name, "%s%d", "scrub", instance);
+ group->name = scrub_ctx->name;
+ group->attrs = scrub_ctx->scrub_attrs;
+ group->is_visible = scrub_attr_visible;
+
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_scrub_get_desc - get EDAC scrub descriptors
+ * @scrub_dev: client device, with scrub support
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's scrub instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!scrub_dev || !attr_groups)
+ return -EINVAL;
+
+ return scrub_create_desc(scrub_dev, attr_groups, instance);
+}
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index f7bd930e058f..fa5b442b1844 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -121,6 +121,35 @@ void skx_adxl_put(void)
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
+static void skx_init_mc_mapping(struct skx_dev *d)
+{
+ /*
+ * By default, the BIOS presents all memory controllers within each
+ * socket to the EDAC driver. The physical indices are the same as
+ * the logical indices of the memory controllers enumerated by the
+ * EDAC driver.
+ */
+ for (int i = 0; i < NUM_IMC; i++)
+ d->mc_mapping[i] = i;
+}
+
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
+{
+ edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
+
+ d->mc_mapping[pmc] = lmc;
+}
+EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
+
+static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+{
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, d->mc_mapping[pmc]);
+
+ return d->mc_mapping[pmc];
+}
+
static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
struct skx_dev *d;
@@ -188,6 +217,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
return false;
}
+ res->imc = skx_get_mc_mapping(d, res->imc);
+
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
continue;
@@ -326,6 +357,8 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
+
+ skx_init_mc_mapping(d);
}
if (list)
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index b0845bdd4516..ca5408803f87 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -93,6 +93,16 @@ struct skx_dev {
struct pci_dev *uracu; /* for i10nm CPU */
struct pci_dev *pcu_cr3; /* for HBM memory detection */
u32 mcroute;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping[NUM_IMC];
struct skx_imc {
struct mem_ctl_info *mci;
struct pci_dev *mdev; /* for i10nm CPU */
@@ -242,6 +252,7 @@ void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
void skx_set_res_cfg(struct res_config *cfg);
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 699c7d29d80c..9955396c9a52 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -1407,7 +1408,7 @@ static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
- info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
+ str_read_write(info & REQTYPE_MASK), ERRADDR_RD(info),
info);
writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
@@ -1489,19 +1490,19 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
if (reg & AGENT_OFFLINE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to offline agent error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & UNIMPL_RBPAGE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to unimplemented page error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & WORD_ALIGNED_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s word aligned access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & PAGE_ACCESS_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s to page out of range access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
return;
if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
@@ -1560,7 +1561,7 @@ rb_skip:
err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
- REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_F2_RD(err_addr_hi)),
ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
if (reg & WRERR_RESP_MASK)
dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
@@ -1611,7 +1612,7 @@ chk_iob_axi0:
dev_err(edac_dev->dev,
"%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
@@ -1625,7 +1626,7 @@ chk_iob_axi1:
dev_err(edac_dev->dev,
"%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 5a732018be36..fd80b2f3233a 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -75,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size,
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
+ /* Avoid address 0x0, as it can be mistaken for NULL */
+ if (alloc_min == 0)
+ alloc_min = align;
+
size = round_up(size, EFI_ALLOC_ALIGN);
/* count the suitable slots in each memory map entry */
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 1dd4362ef9a3..8c28e25ddc8a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+ of_node_put(args.np);
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 447246bd04be..98a463e9774b 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev);
- auxiliary_set_drvdata(aux_dev, qcuefi);
- status = qcuefi_set_reference(qcuefi);
- if (status)
- return status;
-
- status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
- if (status)
- qcuefi_set_reference(NULL);
-
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = SZ_4K;
pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
@@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
if (IS_ERR(qcuefi->mempool))
return PTR_ERR(qcuefi->mempool);
+ auxiliary_set_drvdata(aux_dev, qcuefi);
+ status = qcuefi_set_reference(qcuefi);
+ if (status)
+ return status;
+
+ status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
+ if (status)
+ qcuefi_set_reference(NULL);
+
return status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index f0569bb9411f..fc4d67e4c4a6 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -2301,8 +2301,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
if (IS_ERR(__scm->mempool)) {
- dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
+ ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
goto err;
}
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index f3319be20b36..5767aed25cdc 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -6,8 +6,11 @@
#include <linux/bitmap.h>
#include <linux/cache.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/string.h>
+#include <uapi/linux/psci.h>
+
#include <asm/hypervisor.h>
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
@@ -51,3 +54,66 @@ bool kvm_arm_hyp_service_available(u32 func_id)
return test_bit(func_id, __kvm_arm_hyp_services);
}
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
+
+#ifdef CONFIG_ARM64
+void __init kvm_arm_target_impl_cpu_init(void)
+{
+ int i;
+ u32 ver;
+ u64 max_cpus;
+ struct arm_smccc_res res;
+ struct target_impl_cpu *target;
+
+ if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
+ !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
+ 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return;
+
+ /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
+ ver = lower_32_bits(res.a1);
+ if (PSCI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Unsupported target CPU implementation version v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+ return;
+ }
+
+ if (!res.a2) {
+ pr_warn("No target implementation CPUs specified\n");
+ return;
+ }
+
+ max_cpus = res.a2;
+ target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target));
+ if (!target) {
+ pr_warn("Not enough memory for struct target_impl_cpu\n");
+ return;
+ }
+
+ for (i = 0; i < max_cpus; i++) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
+ i, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_warn("Discovering target implementation CPUs failed\n");
+ goto mem_free;
+ }
+ target[i].midr = res.a1;
+ target[i].revidr = res.a2;
+ target[i].aidr = res.a3;
+ };
+
+ if (!cpu_errata_set_target_impl(max_cpus, target)) {
+ pr_warn("Failed to set target implementation CPUs\n");
+ goto mem_free;
+ }
+
+ pr_info("Number of target implementation CPUs is %lld\n", max_cpus);
+ return;
+
+mem_free:
+ memblock_free(target, sizeof(*target) * max_cpus);
+}
+#endif
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 40f76a90fd7d..107d75558b5a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2729,8 +2729,9 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
- ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ ret = raw_notifier_chain_register(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
if (ret)
goto out_free_bitmap;
@@ -2754,8 +2755,9 @@ out_unregister_device_notifier:
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
out_unregister_line_notifier:
- atomic_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
out_free_bitmap:
gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
@@ -2779,8 +2781,9 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
- atomic_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
bitmap_free(cdev->watched_lines);
gpio_device_put(gdev);
kfree(cdev);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 8741600af7ef..0c00ed2ab431 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1025,7 +1025,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
}
- ATOMIC_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ rwlock_init(&gdev->line_state_lock);
+ RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
@@ -1056,24 +1057,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
desc->gdev = gdev;
- if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
- ret = gc->get_direction(gc, desc_index);
- if (ret < 0)
- /*
- * FIXME: Bail-out here once all GPIO drivers
- * are updated to not return errors in
- * situations that can be considered normal
- * operation.
- */
- dev_warn(&gdev->dev,
- "%s: get_direction failed: %d\n",
- __func__, ret);
-
- assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
- } else {
+ /*
+ * We would typically want to check the return value of
+ * get_direction() here but we must not check the return value
+ * and bail-out as pin controllers can have pins configured to
+ * alternate functions and return -EINVAL. Also: there's no
+ * need to take the SRCU lock here.
+ */
+ if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index))
+ assign_bit(FLAG_IS_OUT, &desc->flags,
+ !gc->get_direction(gc, desc_index));
+ else
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
- }
}
ret = of_gpiochip_add(gc);
@@ -4193,8 +4189,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action)
{
- atomic_notifier_call_chain(&desc->gdev->line_state_notifier,
- action, desc);
+ guard(read_lock_irqsave)(&desc->gdev->line_state_lock);
+
+ raw_notifier_call_chain(&desc->gdev->line_state_notifier, action, desc);
}
/**
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 147156ec502b..c129a03e2040 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,6 +16,7 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/workqueue.h>
@@ -45,6 +46,7 @@
* @list: links gpio_device:s together for traversal
* @line_state_notifier: used to notify subscribers about lines being
* requested, released or reconfigured
+ * @line_state_lock: RW-spinlock protecting the line state notifier
* @line_state_wq: used to emit line state events from a separate thread in
* process context
* @device_notifier: used to notify character device wait queues about the GPIO
@@ -72,7 +74,8 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
- struct atomic_notifier_head line_state_notifier;
+ struct raw_notifier_head line_state_notifier;
+ rwlock_t line_state_lock;
struct workqueue_struct *line_state_wq;
struct blocking_notifier_head device_notifier;
struct srcu_struct srcu;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 95a05b03f799..c0ddbe7d6f0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2555,7 +2555,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
int r;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s4 = false;
if (r)
return r;
@@ -2567,8 +2566,13 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_resume(drm_dev, true);
+ r = amdgpu_device_resume(drm_dev, true);
+ adev->in_s4 = false;
+
+ return r;
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2581,6 +2585,9 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 03308261f894..7507d9443028 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -188,8 +188,8 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
- hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
+ hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 2523221a2519..48ff00427882 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -2437,7 +2437,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
(void **)&adev->gfx.me.me_fw_data_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
- gfx_v12_0_pfp_fini(adev);
+ gfx_v12_0_me_fini(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index b749f1c3f6a9..c3c144a4f45e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -501,9 +501,6 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
uint64_t *flags)
{
struct amdgpu_bo *bo = mapping->bo_va->base.bo;
- struct amdgpu_device *bo_adev;
- bool coherent, is_system;
-
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@@ -519,25 +516,11 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (!bo)
- return;
-
- if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
- AMDGPU_GEM_CREATE_UNCACHED))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
-
- bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
- coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
- is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
- (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
-
if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
*flags |= AMDGPU_PTE_DCC;
- /* WA for HW bug */
- if (is_system || ((bo_adev != adev) && coherent))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
-
+ if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
}
static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 47db483c3516..95c609317a8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -78,12 +78,12 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = {
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -104,10 +104,10 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = {
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -115,10 +115,10 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a59b4c36cad7..e98fb3fa36a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -103,12 +103,11 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -120,12 +119,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
@@ -138,10 +137,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c633b7ff2943..09fd6ef99b3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -284,7 +284,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
return 0;
}
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
if (!ip_block)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 06615f160331..0c9c4d8b7b71 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -167,16 +167,16 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -188,9 +188,9 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
};
@@ -206,16 +206,16 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -227,9 +227,9 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
{
@@ -239,13 +239,6 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
};
static const struct amdgpu_video_codecs cz_video_codecs_decode =
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 651660958e5b..0320163b6e74 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -3644,7 +3644,7 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
- 0xbfa00001, 0xbfa0024b,
+ 0xbfa00001, 0xbfa002a2,
0xb0804009, 0xb8f8f804,
0x9178ff78, 0x00008c00,
0xb8fbf811, 0x8b6eff78,
@@ -3718,7 +3718,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00011677, 0xd7610000,
0x00011a79, 0xd7610000,
0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
+ 0x00011e7f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x00003fff, 0xbeff0080,
0xee0a407a, 0x000c0000,
0x00004000, 0xd760007a,
@@ -3755,38 +3763,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00000200, 0xbef600ff,
0x01000000, 0x7e000280,
0x7e020280, 0x7e040280,
- 0xbefd0080, 0xbe804ec2,
- 0xbf94fffe, 0xb8faf804,
- 0x8b7a847a, 0x91788478,
- 0x8c787a78, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xd7610002, 0x0000fa6c,
- 0x807d817d, 0x917aff6d,
- 0x80000000, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa6e,
- 0x807d817d, 0xd7610002,
- 0x0000fa6f, 0x807d817d,
- 0xd7610002, 0x0000fa78,
- 0x807d817d, 0xb8faf811,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa7b, 0x807d817d,
- 0xb8f1f801, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f814, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f815, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f812, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f813, 0xd7610002,
- 0x0000fa71, 0x807d817d,
+ 0xbe804ec2, 0xbf94fffe,
+ 0xb8faf804, 0x8b7a847a,
+ 0x91788478, 0x8c787a78,
+ 0x917aff6d, 0x80000000,
+ 0xd7610002, 0x00010071,
+ 0xd7610002, 0x0001026c,
+ 0xd7610002, 0x0001047a,
+ 0xd7610002, 0x0001066e,
+ 0xd7610002, 0x0001086f,
+ 0xd7610002, 0x00010a78,
+ 0xd7610002, 0x00010e7b,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xb8faf811, 0xd7610002,
+ 0x00010c7a, 0xb8faf801,
+ 0xd7610002, 0x0001107a,
+ 0xb8faf814, 0xd7610002,
+ 0x0001127a, 0xb8faf815,
+ 0xd7610002, 0x0001147a,
+ 0xb8faf812, 0xd7610002,
+ 0x0001167a, 0xb8faf813,
+ 0xd7610002, 0x0001187a,
0xb8faf802, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xbefa50c1, 0xbfc70000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xbefe00ff,
+ 0x00011a7a, 0xbefa50c1,
+ 0xbfc70000, 0xd7610002,
+ 0x00011c7a, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x0000ffff, 0xbeff0080,
0xc4068070, 0x008ce802,
0x00000000, 0xbefe00c1,
@@ -3801,331 +3817,358 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbe824102, 0xbe844104,
0xbe864106, 0xbe884108,
0xbe8a410a, 0xbe8c410c,
- 0xbe8e410e, 0xd7610002,
- 0x0000f200, 0x80798179,
- 0xd7610002, 0x0000f201,
- 0x80798179, 0xd7610002,
- 0x0000f202, 0x80798179,
- 0xd7610002, 0x0000f203,
- 0x80798179, 0xd7610002,
- 0x0000f204, 0x80798179,
- 0xd7610002, 0x0000f205,
- 0x80798179, 0xd7610002,
- 0x0000f206, 0x80798179,
- 0xd7610002, 0x0000f207,
- 0x80798179, 0xd7610002,
- 0x0000f208, 0x80798179,
- 0xd7610002, 0x0000f209,
- 0x80798179, 0xd7610002,
- 0x0000f20a, 0x80798179,
- 0xd7610002, 0x0000f20b,
- 0x80798179, 0xd7610002,
- 0x0000f20c, 0x80798179,
- 0xd7610002, 0x0000f20d,
- 0x80798179, 0xd7610002,
- 0x0000f20e, 0x80798179,
- 0xd7610002, 0x0000f20f,
- 0x80798179, 0xbf06a079,
- 0xbfa10007, 0xc4068070,
+ 0xbe8e410e, 0xbf068079,
+ 0xbfa10032, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd7610002,
+ 0x0001180c, 0xd7610002,
+ 0x00011a0d, 0xd7610002,
+ 0x00011c0e, 0xd7610002,
+ 0x00011e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xbfa00038, 0xd7610002,
+ 0x00012000, 0xd7610002,
+ 0x00012201, 0xd7610002,
+ 0x00012402, 0xd7610002,
+ 0x00012603, 0xd7610002,
+ 0x00012804, 0xd7610002,
+ 0x00012a05, 0xd7610002,
+ 0x00012c06, 0xd7610002,
+ 0x00012e07, 0xd7610002,
+ 0x00013008, 0xd7610002,
+ 0x00013209, 0xd7610002,
+ 0x0001340a, 0xd7610002,
+ 0x0001360b, 0xd7610002,
+ 0x0001380c, 0xd7610002,
+ 0x00013a0d, 0xd7610002,
+ 0x00013c0e, 0xd7610002,
+ 0x00013e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xc4068070, 0x008ce802,
+ 0x00000000, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ff88, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xc4068070,
0x008ce802, 0x00000000,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8fb4306, 0x8b7bc17b,
+ 0xbfa10044, 0x8b7aff6d,
+ 0x80000000, 0xbfa10041,
+ 0x847b897b, 0xbef6007b,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbb,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
- 0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
- 0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
- 0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
- 0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xc4068070, 0x008ce802,
- 0x00000000, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8fb4306,
- 0x8b7bc17b, 0xbfa10044,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10041, 0x847b897b,
- 0xbef6007b, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0x8070ff70,
- 0x00000200, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xd71f0000,
- 0x000100c1, 0xd7200000,
- 0x000200c1, 0x16000084,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa20013, 0xbe8300ff,
- 0x00000080, 0xbf800000,
- 0xbf800000, 0xbf800000,
- 0xd8d80000, 0x01000000,
- 0xbf8a0000, 0xc4068070,
- 0x008ce801, 0x00000000,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff3, 0xbfa00012,
- 0xbe8300ff, 0x00000100,
+ 0xbef600ff, 0x01000000,
+ 0xd71f0000, 0x000100c1,
+ 0xd7200000, 0x000200c1,
+ 0x16000084, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa20013,
+ 0xbe8300ff, 0x00000080,
0xbf800000, 0xbf800000,
0xbf800000, 0xd8d80000,
0x01000000, 0xbf8a0000,
0xc4068070, 0x008ce801,
0x00000000, 0x807d037d,
0x80700370, 0xd5250000,
- 0x0001ff00, 0x00000100,
+ 0x0001ff00, 0x00000080,
0xbf0a7b7d, 0xbfa2fff3,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20004, 0xbef000ff,
- 0x00000200, 0xbeff0080,
- 0xbfa00003, 0xbef000ff,
- 0x00000400, 0xbeff00c1,
- 0xb8fb3b05, 0x807b817b,
- 0x847b827b, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa2001b, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10040,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
- 0xc4068070, 0x008ce800,
- 0x00000000, 0xc4068070,
- 0x008ce801, 0x00008000,
- 0xc4068070, 0x008ce802,
- 0x00010000, 0xc4068070,
- 0x008ce803, 0x00018000,
- 0x807d847d, 0x8070ff70,
- 0x00000200, 0xbf0a7b7d,
- 0xbfa2ffeb, 0xbfa0002a,
+ 0xbfa00012, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8a0000, 0xc4068070,
+ 0x008ce801, 0x00000000,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7d,
+ 0xbfa2fff3, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20004,
+ 0xbef000ff, 0x00000200,
+ 0xbeff0080, 0xbfa00003,
+ 0xbef000ff, 0x00000400,
+ 0xbeff00c1, 0xb8fb3b05,
+ 0x807b817b, 0x847b827b,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa2001b,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10015, 0x7e008700,
+ 0xbfa10040, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xc4068070,
0x008ce800, 0x00000000,
0xc4068070, 0x008ce801,
- 0x00010000, 0xc4068070,
- 0x008ce802, 0x00020000,
+ 0x00008000, 0xc4068070,
+ 0x008ce802, 0x00010000,
0xc4068070, 0x008ce803,
- 0x00030000, 0x807d847d,
- 0x8070ff70, 0x00000400,
+ 0x00018000, 0x807d847d,
+ 0x8070ff70, 0x00000200,
0xbf0a7b7d, 0xbfa2ffeb,
- 0xb8fb1e06, 0x8b7bc17b,
- 0xbfa1000d, 0x847b837b,
- 0x807b7d7b, 0xbefe00c1,
- 0xbeff0080, 0x7e008700,
+ 0xbfa0002a, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10015,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xc4068070, 0x008ce800,
- 0x00000000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff7,
- 0xbfa0016e, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xbef1007f,
- 0xb8f20742, 0x84729972,
- 0x8b6eff7f, 0x04000000,
- 0xbfa1003b, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef4306,
- 0x8b6fc16f, 0xbfa10030,
- 0x846f896f, 0xbef6006f,
+ 0x00000000, 0xc4068070,
+ 0x008ce801, 0x00010000,
+ 0xc4068070, 0x008ce802,
+ 0x00020000, 0xc4068070,
+ 0x008ce803, 0x00030000,
+ 0x807d847d, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7d,
+ 0xbfa2ffeb, 0xb8fb1e06,
+ 0x8b7bc17b, 0xbfa1000d,
+ 0x847b837b, 0x807b7d7b,
+ 0xbefe00c1, 0xbeff0080,
+ 0x7e008700, 0xc4068070,
+ 0x008ce800, 0x00000000,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff7, 0xbfa0016e,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xbef1007f, 0xb8f20742,
+ 0x84729972, 0x8b6eff7f,
+ 0x04000000, 0xbfa1003b,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef4306, 0x8b6fc16f,
+ 0xbfa10030, 0x846f896f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000d,
+ 0xc4050078, 0x0080e800,
+ 0x00000000, 0xbf8a0000,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff4,
+ 0xbfa0000c, 0xc4050078,
+ 0x0080e800, 0x00000000,
+ 0xbf8a0000, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff4, 0xbef80080,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa2002c, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000200,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10061, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00008000, 0xc4050078,
+ 0x008ce802, 0x00010000,
+ 0xc4050078, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00008000, 0xc405006e,
+ 0x008ce802, 0x00010000,
+ 0xc405006e, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0xbfa0003d, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10016, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00010000, 0xc4050078,
+ 0x008ce802, 0x00020000,
+ 0xc4050078, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000f,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
+ 0xc4050078, 0x008ce800,
+ 0x00000000, 0xbf8a0000,
+ 0x7e008500, 0x807d817d,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff6,
+ 0xbeff00c1, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00010000, 0xc405006e,
+ 0x008ce802, 0x00020000,
+ 0xc405006e, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000d, 0xc4050078,
- 0x0080e800, 0x00000000,
- 0xbf8a0000, 0xdac00000,
- 0x00000000, 0x807dff7d,
- 0x00000080, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff4, 0xbfa0000c,
- 0xc4050078, 0x0080e800,
- 0x00000000, 0xbf8a0000,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff4,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa2002c,
+ 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10061,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00008000,
- 0xc4050078, 0x008ce802,
- 0x00010000, 0xc4050078,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
+ 0xbefd00ff, 0x0000006c,
+ 0x80f89078, 0xf462403a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd847d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0x80f8a078, 0xf462603a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd887d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0x80f8c078, 0xf462803a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd907d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0xbe884308, 0xbe8a430a,
+ 0xbe8c430c, 0xbe8e430e,
+ 0xbf06807d, 0xbfa1fff0,
+ 0xb980f801, 0x00000000,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00008000,
- 0xc405006e, 0x008ce802,
- 0x00010000, 0xc405006e,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0xbfa0003d,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10016,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00010000,
- 0xc4050078, 0x008ce802,
- 0x00020000, 0xc4050078,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000f, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xc4050078,
- 0x008ce800, 0x00000000,
- 0xbf8a0000, 0x7e008500,
- 0x807d817d, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff6, 0xbeff00c1,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00010000,
- 0xc405006e, 0x008ce802,
- 0x00020000, 0xc405006e,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef600ff,
- 0x01000000, 0xbefd00ff,
- 0x0000006c, 0x80f89078,
- 0xf462403a, 0xf0000000,
- 0xbf8a0000, 0x80fd847d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0x80f8a078,
- 0xf462603a, 0xf0000000,
- 0xbf8a0000, 0x80fd887d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0x80f8c078,
- 0xf462803a, 0xf0000000,
- 0xbf8a0000, 0x80fd907d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0xbe884308,
- 0xbe8a430a, 0xbe8c430c,
- 0xbe8e430e, 0xbf06807d,
- 0xbfa1fff0, 0xb980f801,
- 0x00000000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xbeff0071,
- 0xf4621bfa, 0xf0000000,
- 0x80788478, 0xf4621b3a,
+ 0xbeff0071, 0xf4621bfa,
0xf0000000, 0x80788478,
- 0xf4621b7a, 0xf0000000,
- 0x80788478, 0xf4621c3a,
+ 0xf4621b3a, 0xf0000000,
+ 0x80788478, 0xf4621b7a,
0xf0000000, 0x80788478,
- 0xf4621c7a, 0xf0000000,
- 0x80788478, 0xf4621eba,
+ 0xf4621c3a, 0xf0000000,
+ 0x80788478, 0xf4621c7a,
0xf0000000, 0x80788478,
- 0xf4621efa, 0xf0000000,
- 0x80788478, 0xf4621e7a,
+ 0xf4621eba, 0xf0000000,
+ 0x80788478, 0xf4621efa,
0xf0000000, 0x80788478,
- 0xf4621cfa, 0xf0000000,
- 0x80788478, 0xf4621bba,
+ 0xf4621e7a, 0xf0000000,
+ 0x80788478, 0xf4621cfa,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef814,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef815, 0xf4621bba,
+ 0xb96ef814, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef812,
+ 0xbf8a0000, 0xb96ef815,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef813, 0x8b6eff7f,
- 0x04000000, 0xbfa1000d,
- 0x80788478, 0xf4621bba,
+ 0xb96ef812, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xbf0d806e,
- 0xbfa10006, 0x856e906e,
- 0x8b6e6e6e, 0xbfa10003,
- 0xbe804ec1, 0x816ec16e,
- 0xbfa0fffb, 0xbefd006f,
- 0xbefe0070, 0xbeff0071,
- 0xb97b2011, 0x857b867b,
- 0xb97b0191, 0x857b827b,
- 0xb97bba11, 0xb973f801,
- 0xb8ee3b05, 0x806e816e,
- 0xbf0d9972, 0xbfa20002,
- 0x846e896e, 0xbfa00001,
- 0x846e8a6e, 0xb8ef1e06,
- 0x846f8a6f, 0x806e6f6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x8b6fff6f, 0x0000ffff,
- 0xf4605c37, 0xf8000050,
- 0xf4605d37, 0xf8000060,
- 0xf4601e77, 0xf8000074,
- 0xbf8a0000, 0x8b6dff6d,
- 0x0000ffff, 0x8bfe7e7e,
- 0x8bea6a6a, 0xb97af804,
+ 0xbf8a0000, 0xb96ef813,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1000d, 0x80788478,
+ 0xf4621bba, 0xf0000000,
+ 0x80788478, 0xbf8a0000,
+ 0xbf0d806e, 0xbfa10006,
+ 0x856e906e, 0x8b6e6e6e,
+ 0xbfa10003, 0xbe804ec1,
+ 0x816ec16e, 0xbfa0fffb,
+ 0xbefd006f, 0xbefe0070,
+ 0xbeff0071, 0xb97b2011,
+ 0x857b867b, 0xb97b0191,
+ 0x857b827b, 0xb97bba11,
+ 0xb973f801, 0xb8ee3b05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbfa20002, 0x846e896e,
+ 0xbfa00001, 0x846e8a6e,
+ 0xb8ef1e06, 0x846f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x8b6fff6f,
+ 0x0000ffff, 0xf4605c37,
+ 0xf8000050, 0xf4605d37,
+ 0xf8000060, 0xf4601e77,
+ 0xf8000074, 0xbf8a0000,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb97af804, 0xbe804ec2,
+ 0xbf94fffe, 0xbe804a6c,
0xbe804ec2, 0xbf94fffe,
- 0xbe804a6c, 0xbe804ec2,
- 0xbf94fffe, 0xbfb10000,
+ 0xbfb10000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
- 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
index 7b9d36e5fa43..5a1a1b1f897f 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -30,6 +30,7 @@
#define CHIP_GFX12 37
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
+#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12)
var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
@@ -351,6 +352,7 @@ L_HAVE_VGPRS:
v_writelane_b32 v0, ttmp13, 0xD
v_writelane_b32 v0, exec_lo, 0xE
v_writelane_b32 v0, exec_hi, 0xF
+ valu_sgpr_hazard()
s_mov_b32 exec_lo, 0x3FFF
s_mov_b32 exec_hi, 0x0
@@ -417,7 +419,6 @@ L_SAVE_HWREG:
v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
- s_mov_b32 m0, 0x0 //Next lane of v2 to write to
// Ensure no further changes to barrier or LDS state.
// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
@@ -430,40 +431,41 @@ L_SAVE_HWREG:
s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
- write_hwreg_to_v2(s_save_m0)
- write_hwreg_to_v2(s_save_pc_lo)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
- write_hwreg_to_v2(s_save_tmp)
- write_hwreg_to_v2(s_save_exec_lo)
- write_hwreg_to_v2(s_save_exec_hi)
- write_hwreg_to_v2(s_save_state_priv)
+ v_writelane_b32 v2, s_save_m0, 0x0
+ v_writelane_b32 v2, s_save_pc_lo, 0x1
+ v_writelane_b32 v2, s_save_tmp, 0x2
+ v_writelane_b32 v2, s_save_exec_lo, 0x3
+ v_writelane_b32 v2, s_save_exec_hi, 0x4
+ v_writelane_b32 v2, s_save_state_priv, 0x5
+ v_writelane_b32 v2, s_save_xnack_mask, 0x7
+ valu_sgpr_hazard()
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0x6
- write_hwreg_to_v2(s_save_xnack_mask)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_MODE)
+ v_writelane_b32 v2, s_save_tmp, 0x8
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+ v_writelane_b32 v2, s_save_tmp, 0x9
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+ v_writelane_b32 v2, s_save_tmp, 0xA
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ v_writelane_b32 v2, s_save_tmp, 0xB
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- write_hwreg_to_v2(s_save_m0)
-
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ v_writelane_b32 v2, s_save_tmp, 0xC
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0xD
s_get_barrier_state s_save_tmp, -1
s_wait_kmcnt (0)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0xE
+ valu_sgpr_hazard()
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@@ -497,10 +499,12 @@ L_SAVE_SGPR_LOOP:
s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
- write_16sgpr_to_v2(s0)
-
- s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled?
- s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE
+ s_cmp_eq_u32 ttmp13, 0x0
+ s_cbranch_scc0 L_WRITE_V2_SECOND_HALF
+ write_16sgpr_to_v2(s0, 0x0)
+ s_branch L_SAVE_SGPR_SKIP_TCP_STORE
+L_WRITE_V2_SECOND_HALF:
+ write_16sgpr_to_v2(s0, 0x10)
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
@@ -1056,27 +1060,21 @@ L_END_PGM:
s_endpgm_saved
end
-function write_hwreg_to_v2(s)
- // Copy into VGPR for later TCP store.
- v_writelane_b32 v2, s, m0
- s_add_u32 m0, m0, 0x1
-end
-
-
-function write_16sgpr_to_v2(s)
+function write_16sgpr_to_v2(s, lane_offset)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
- v_writelane_b32 v2, s[sgpr_idx], ttmp13
- s_add_u32 ttmp13, ttmp13, 0x1
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx + lane_offset
end
+ valu_sgpr_hazard()
+ s_add_u32 ttmp13, ttmp13, 0x10
end
function write_12sgpr_to_v2(s)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
- v_writelane_b32 v2, s[sgpr_idx], ttmp13
- s_add_u32 ttmp13, ttmp13, 0x1
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx
end
+ valu_sgpr_hazard()
end
function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
@@ -1128,3 +1126,11 @@ function get_wave_size2(s_reg)
s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
+
+function valu_sgpr_hazard
+#if HAVE_VALU_SGPR_HAZARD
+ for var rep = 0; rep < 8; rep ++
+ ds_nop
+ end
+#endif
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d4593374e7a1..34c2c42c0f95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1230,11 +1230,13 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
- retval = remove_queue_mes(dqm, q, qpd);
- if (retval) {
+ int err;
+
+ err = remove_queue_mes(dqm, q, qpd);
+ if (err) {
dev_err(dev, "Failed to evict queue %d\n",
q->properties.queue_id);
- goto out;
+ retval = err;
}
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 24396a2c77bd..4afff7094caf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -233,6 +233,7 @@ void kfd_queue_buffer_put(struct amdgpu_bo **bo)
int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
{
struct kfd_topology_device *topo_dev;
+ u64 expected_queue_size;
struct amdgpu_vm *vm;
u32 total_cwsr_size;
int err;
@@ -241,6 +242,15 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
if (!topo_dev)
return -EINVAL;
+ /* AQL queues on GFX7 and GFX8 appear twice their actual size */
+ if (properties->type == KFD_QUEUE_TYPE_COMPUTE &&
+ properties->format == KFD_QUEUE_FORMAT_AQL &&
+ topo_dev->node_props.gfx_target_version >= 70000 &&
+ topo_dev->node_props.gfx_target_version < 90000)
+ expected_queue_size = properties->queue_size / 2;
+ else
+ expected_queue_size = properties->queue_size;
+
vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(vm->root.bo, false);
if (err)
@@ -255,7 +265,7 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
- &properties->ring_bo, properties->queue_size);
+ &properties->ring_bo, expected_queue_size);
if (err)
goto out_err_unreserve;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index bd3e20d981e0..9477a4adcd36 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1286,13 +1286,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
- if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_node != node)
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- } else {
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- }
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
break;
default:
mapping_flags |= coherent ?
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9d9645a2d18e..39df45f652b3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -245,6 +245,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -1741,7 +1745,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_
}
if (quirk_entries.support_edp0_on_dp1) {
init_data->flags.support_edp0_on_dp1 = true;
- drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
}
}
@@ -3371,8 +3375,19 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
mutex_unlock(&dm->dc_lock);
+ /* set the backlight after a reset */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
return 0;
}
+
+ /* leave display off for S4 sequence */
+ if (adev->in_s4)
+ return 0;
+
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -4906,6 +4921,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->backlight_dev[aconnector->bl_idx] =
backlight_device_register(bl_name, aconnector->base.kdev, dm,
&amdgpu_dm_backlight_ops, &props);
+ dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
DRM_ERROR("DM: Backlight registration failed!\n");
@@ -4973,7 +4989,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
aconnector->bl_idx = bl_idx;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
dm->backlight_link[bl_idx] = link;
dm->num_of_edps++;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index e339c7a8d541..c0dc23244049 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -455,6 +455,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
for (i = 0; i < hdcp_work->max_link; i++) {
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
}
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index c4a7fd453e5f..a215234151ac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -894,8 +894,16 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
int i;
+ /* First, clear all hpd and hpdrx interrupts */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
+ if (!dc_interrupt_set(adev->dm.dc, i, false))
+ drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
+ i);
+ }
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector;
@@ -908,10 +916,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
+ /*
+ * Get a base driver irq reference for hpd ints for the lifetime
+ * of dm. Note that only hpd interrupt types are registered with
+ * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
+ * hpd_rx isn't available. DM currently controls hpd_rx
+ * explicitly with dc_interrupt_set()
+ */
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- true);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+ /*
+ * TODO: There's a mismatch between mode_info.num_hpd
+ * and what bios reports as the # of connectors with hpd
+ * sources. Since the # of hpd source types registered
+ * with base driver == mode_info.num_hpd, we have to
+ * fallback to dc_interrupt_set for the remaining types.
+ */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -921,12 +950,6 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
-
- /* Update reference counts for HPDs */
- for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
- if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
- drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
- }
}
/**
@@ -942,7 +965,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- int i;
+ int irq_type;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -956,9 +979,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- false);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+
+ /* TODO: See same TODO in amdgpu_dm_hpd_init() */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ false);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -968,10 +1000,4 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
-
- /* Update reference counts for HPDs */
- for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
- if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
- drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
- }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 774cc3f4f3fd..92472109f84a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -277,8 +277,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
if (!dcc->enable)
return 0;
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
- !dc->cap_funcs.get_dcc_compression_cap)
+ if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
+ format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return -EINVAL;
+
+ if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a45037cb4cc0..298668e9729c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3389,10 +3389,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
break;
case COLOR_DEPTH_121212:
normalized_pix_clk = (pix_clk * 36) / 24;
- break;
+ break;
+ case COLOR_DEPTH_141414:
+ normalized_pix_clk = (pix_clk * 42) / 24;
+ break;
case COLOR_DEPTH_161616:
normalized_pix_clk = (pix_clk * 48) / 24;
- break;
+ break;
default:
ASSERT(0);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..6e2fce329d73 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -69,5 +69,16 @@ bool should_use_dmub_lock(struct dc_link *link)
if (link->replay_settings.replay_feature_enabled)
return true;
+ /* only use HW lock for PSR1 on single eDP */
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(link->dc, edp_links, &edp_num);
+
+ if (edp_num == 1)
+ return true;
+ }
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index e5fb0e8333e4..e691a1cf3356 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = {
dce60_timing_generator_enable_advanced_request,
.configure_crc = dce60_configure_crc,
.get_crc = dce110_get_crc,
+ .is_two_pixels_per_container = dce110_is_two_pixels_per_container,
};
void dce60_timing_generator_construct(
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 15ea216e903d..6157886f4802 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -704,7 +704,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw6.region.base = DMUB_CW6_BASE;
cw6.region.top = cw6.region.base + fw_state_fb->size;
- dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->fw_state = (void *)((uintptr_t)(fw_state_fb->cpu_addr) + DMUB_DEBUG_FW_STATE_OFFSET);
region6.offset.quad_part = shared_state_fb->gpu_addr;
region6.region.base = DMUB_CW6_BASE;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e8ae7681bf0a..77b1f061bbf0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2421,6 +2421,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
*states = ATTR_STATE_SUPPORTED;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5cad09c5f2ff..3f1fcf8c4ee8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1193,16 +1193,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- PPTable_t *pptable = smu->smu_table.driver_pptable;
- const OverDriveLimits_t * const overdrive_upperlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMax;
- const OverDriveLimits_t * const overdrive_lowerlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMin;
-
size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
- size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
- overdrive_lowerlimits->GfxclkFoffset,
- overdrive_upperlimits->GfxclkFoffset);
+ size += sysfs_emit_at(buf, size, "%dMhz\n",
+ od_table->OverDriveTable.GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1337,12 +1330,8 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &min_value,
- NULL);
- smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
- NULL,
+ &min_value,
&max_value);
size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
@@ -1627,6 +1616,39 @@ out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
}
+static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANPWM,
+ speed);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+ return ret;
+ }
+
+ /* Convert the PMFW output which is in percent to pwm(255) based */
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ if (!speed)
+ return -EINVAL;
+
+ return smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
+}
+
static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -2417,36 +2439,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -ENOTSUPP;
}
- for (i = 0; i < size; i += 2) {
- if (i + 2 > size) {
- dev_info(adev->dev, "invalid number of input parameters %d\n", size);
- return -EINVAL;
- }
-
- switch (input[i]) {
- case 1:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMAX,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
+ if (size != 1) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
- default:
- dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
- dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
- return -EINVAL;
- }
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
+ minimum, maximum);
+ return -EINVAL;
}
+ od_table->OverDriveTable.GfxclkFoffset = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
@@ -2804,6 +2814,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.set_performance_level = smu_v14_0_set_performance_level,
.gfx_off_control = smu_v14_0_gfx_off_control,
.get_unique_id = smu_v14_0_2_get_unique_id,
+ .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
+ .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
.get_power_limit = smu_v14_0_2_get_power_limit,
.set_power_limit = smu_v14_0_2_set_power_limit,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 06c91c5b7f7c..6d09bef671da 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -4025,6 +4025,22 @@ out:
return 0;
}
+static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
+{
+ bool probing_done = false;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
+ probing_done = mgr->mst_primary->link_address_sent;
+ drm_dp_mst_topology_put_mstb(mgr->mst_primary);
+ }
+
+ mutex_unlock(&mgr->lock);
+
+ return probing_done;
+}
+
static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
@@ -4055,8 +4071,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
- hotplug = true;
+ if (!primary_mstb_probing_is_done(mgr)) {
+ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
+ } else {
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
}
drm_dp_mst_topology_put_mstb(mstb);
@@ -4138,10 +4158,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
+ drm_dp_mst_topology_put_mstb(mst_primary);
+
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
- bool handle_csn;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
conn_stat->port_number,
@@ -4150,16 +4171,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->message_capability_status,
conn_stat->input_port,
conn_stat->peer_device_type);
-
- mutex_lock(&mgr->probe_lock);
- handle_csn = mst_primary->link_address_sent;
- mutex_unlock(&mgr->probe_lock);
-
- if (!handle_csn) {
- drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
- kfree(up_req);
- goto out_put_primary;
- }
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
@@ -4174,9 +4185,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
-
-out_put_primary:
- drm_dp_mst_topology_put_mstb(mst_primary);
out_clear_reply:
reset_msg_rx_state(&mgr->up_req_recv);
return ret;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 370dc676e3aa..fd36b8fd54e9 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -956,6 +956,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
+
+ if (connector->dpms == mode)
+ goto out;
+
connector->dpms = mode;
crtc = connector->state->crtc;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5f24d6b41cc6..48b08c9611a7 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1427,6 +1427,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core.
*
+ * On atomic drivers any DPMS setproperty ioctl where the value does not
+ * change is completely skipped, otherwise a full atomic commit will occur.
+ * On legacy drivers the exact behavior is driver specific.
+ *
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
* userspace must use "ACTIVE" on the CRTC instead.
*
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index bcf248f69252..6903e2010cb9 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -545,7 +545,7 @@ impl EncodedMsg<'_> {
}
self.push(&mut offset, (MODE_STOP, 4));
- let pad_offset = (offset + 7) / 8;
+ let pad_offset = offset.div_ceil(8);
for i in pad_offset..self.version.max_data() {
self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
}
@@ -659,7 +659,7 @@ struct QrImage<'a> {
impl QrImage<'_> {
fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
let width = em.version.width();
- let stride = (width + 7) / 8;
+ let stride = width.div_ceil(8);
let data = qrdata;
let mut qr_image = QrImage {
@@ -911,16 +911,16 @@ impl QrImage<'_> {
///
/// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
-/// will be encoded as binary segment, otherwise it will be encoded
-/// efficiently as a numeric segment, and appended to the URL.
+/// will be encoded as binary segment, otherwise it will be encoded
+/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than data_size.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
-/// a V40 QR code. It will then be overwritten with the QR code image.
+/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
-/// segments and ECC.
+/// segments and ECC.
/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
-/// long for V40.
+/// long for V40.
///
/// # Safety
///
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 7e76790c6a81..cba97d7db131 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
0, PCI_DEVFN(2, 0));
int ret = -1;
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
/* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index f59abfa7622a..0d49f168a919 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -154,6 +154,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
return 0;
err_free_mmio:
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
@@ -172,6 +173,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 41128469f12a..c9dcf2bbd4c7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7830,9 +7830,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_program_dpkgc_latency(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
-
intel_wait_for_vblank_workers(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -7906,6 +7903,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
+ if (state->modeset)
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 21274aa9bddd..c3dabb857960 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 4 - Support multiple fault handlers per object depending on object's
* backing storage (a.k.a. MMAP_OFFSET).
*
+ * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple
+ * times with different size and offset).
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 4;
+ return 5;
}
static inline struct i915_gtt_view
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index b3cbf85c00cb..00d00c480cc5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -231,8 +231,8 @@ static void delayed_huc_load_init(struct intel_huc *huc)
sw_fence_dummy_notify);
i915_sw_fence_commit(&huc->delayed_load.fence);
- hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
+ hrtimer_setup(&huc->delayed_load.timer, huc_delayed_load_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static void delayed_huc_load_fini(struct intel_huc *huc)
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 95570cabdf27..f668cd9487f1 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -581,8 +581,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
vgpu->display.port_num = port_num;
/* Init hrtimer based on default refresh rate */
- hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- vblank_timer->timer.function = vblank_timer_fn;
+ hrtimer_setup(&vblank_timer->timer, vblank_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->vrefresh_k = port->vrefresh_k;
vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index c077fb4674f0..9f97f743aa71 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -286,8 +286,7 @@ static int tbs_sched_init(struct intel_gvt *gvt)
return -ENOMEM;
INIT_LIST_HEAD(&data->lru_runq_head);
- hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- data->timer.function = tbs_timer_fn;
+ hrtimer_setup(&data->timer, tbs_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 5384d1bb4923..279e266b4b06 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3359,9 +3359,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
"opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
- hrtimer_init(&stream->poll_check_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
mutex_init(&stream->lock);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index e55db036be1b..0ce87f188d11 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1264,8 +1264,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
int ret = -ENOMEM;
spin_lock_init(&pmu->lock);
- hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pmu->timer.function = i915_sample;
+ hrtimer_setup(&pmu->timer, i915_sample, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->cpuhp.cpu = -1;
init_rc6(pmu);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8f62cfa23fb7..ea0b8e7e4828 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -293,8 +293,7 @@ static void __rq_init_watchdog(struct i915_request *rq)
{
struct i915_request_watchdog *wdg = &rq->watchdog;
- hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wdg->timer.function = __rq_watchdog_expired;
+ hrtimer_setup(&wdg->timer, __rq_watchdog_expired, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
static void __rq_arm_watchdog(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index eed4937c3ff3..bdcfcae83b52 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2103,8 +2103,7 @@ static int __fw_domain_init(struct intel_uncore *uncore,
d->mask = BIT(domain_id);
- hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- d->timer.function = intel_uncore_fw_release_timer;
+ hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
uncore->fw_domains |= BIT(domain_id);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 1a5d4f1c8b42..d41e5a6bbee0 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -65,8 +65,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
fctx->completed_fence = fctx->last_fence;
*fctx->fenceptr = fctx->last_fence;
- hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fctx->deadline_timer.function = deadline_timer;
+ hrtimer_setup(&fctx->deadline_timer, deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
kthread_init_work(&fctx->deadline_work, deadline_work);
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index afedd61c3e28..a6efe1eac271 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -135,8 +135,7 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode)
{
- hrtimer_init(&work->timer, clock_id, mode);
- work->timer.function = msm_hrtimer_worktimer;
+ hrtimer_setup(&work->timer, msm_hrtimer_worktimer, clock_id, mode);
work->worker = worker;
kthread_init_work(&work->work, fn);
}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index d1871af967d4..2355a78e1b69 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
int session_idx = -1;
bool destroyed = false, created = false, allocated = false;
- uint32_t tmp, handle = 0;
+ uint32_t tmp = 0, handle = 0;
uint32_t *size = &tmp;
int i, r = 0;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 69bcf0e99d57..da00572d7d42 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -259,9 +259,16 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
- if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb))
+ if (!prev ||
+ dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb)) {
+ /*
+ * Adding callback above failed.
+ * dma_fence_put() checks for NULL.
+ */
+ dma_fence_put(prev);
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ }
prev = &s_fence->finished;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index da08ddb01d21..05608c894ed9 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -226,8 +226,12 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
unsigned long irqflags;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ spin_lock_irqsave(&v3d->job_lock, irqflags);
+ v3d->bin_job = NULL;
+ spin_unlock_irqrestore(&v3d->job_lock, irqflags);
return NULL;
+ }
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
@@ -281,8 +285,10 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->render_job = NULL;
return NULL;
+ }
v3d->render_job = job;
@@ -327,11 +333,17 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->tfu_job = NULL;
+ return NULL;
+ }
+
+ v3d->tfu_job = job;
+
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
return NULL;
- v3d->tfu_job = job;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
@@ -369,6 +381,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
int i, csd_cfg0_reg;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->csd_job = NULL;
+ return NULL;
+ }
+
v3d->csd_job = job;
v3d_invalidate_caches(v3d);
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 28a57ae109fc..ae4e36bc337c 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -64,8 +64,8 @@ static int vkms_enable_vblank(struct drm_crtc *crtc)
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- out->vblank_hrtimer.function = &vkms_vblank_simulate;
+ hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
out->period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
index 8651b788e98b..aec774fa4d7b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -290,8 +290,8 @@ vmw_vkms_enable_vblank(struct drm_crtc *crtc)
drm_calc_timestamping_constants(crtc, &crtc->mode);
- hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- du->vkms.timer.function = &vmw_vkms_vblank_simulate;
+ hrtimer_setup(&du->vkms.timer, &vmw_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index d9386ab03140..43bf6f140d40 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -341,7 +341,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
return round_down(max / 2, PAGE_SIZE);
}
-#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
* TTM memory type
@@ -356,4 +355,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
return bo->ttm.resource->mem_type == mem_type;
}
#endif
-#endif
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index c5b95470fa32..f67803e15a0e 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
* 1) Avoid pinning in a placement not accessible to some importers.
* 2) Pinning in VRAM requires PIN accounting which is a to-do.
*/
- if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index df7f130fb663..b995d1d51aed 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -6,6 +6,7 @@
#include "xe_guc_pc.h"
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <drm/drm_managed.h>
#include <generated/xe_wa_oob.h>
@@ -19,6 +20,7 @@
#include "xe_gt.h"
#include "xe_gt_idle.h"
#include "xe_gt_printk.h"
+#include "xe_gt_throttle.h"
#include "xe_gt_types.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
@@ -49,6 +51,9 @@
#define LNL_MERT_FREQ_CAP 800
#define BMG_MERT_FREQ_CAP 2133
+#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
+#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
+
/**
* DOC: GuC Power Conservation (PC)
*
@@ -113,9 +118,10 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
static int wait_for_pc_state(struct xe_guc_pc *pc,
- enum slpc_global_state state)
+ enum slpc_global_state state,
+ int timeout_ms)
{
- int timeout_us = 5000; /* rought 5ms, but no need for precision */
+ int timeout_us = 1000 * timeout_ms;
int slept, wait = 10;
xe_device_assert_mem_access(pc_to_xe(pc));
@@ -164,7 +170,8 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
/* Blocking here to ensure the results are ready before reading them */
@@ -187,7 +194,8 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -208,7 +216,8 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -440,6 +449,15 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
return freq;
}
+static u32 get_cur_freq(struct xe_gt *gt)
+{
+ u32 freq;
+
+ freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
+ freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
+ return decode_freq(freq);
+}
+
/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
@@ -463,10 +481,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
return -ETIMEDOUT;
}
- *freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
-
- *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
- *freq = decode_freq(*freq);
+ *freq = get_cur_freq(gt);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
@@ -1002,6 +1017,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
unsigned int fw_ref;
+ ktime_t earlier;
int ret;
xe_gt_assert(gt, xe_device_uc_enabled(xe));
@@ -1026,14 +1042,25 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
memset(pc->bo->vmap.vaddr, 0, size);
slpc_shared_data_write(pc, header.size, size);
+ earlier = ktime_get();
ret = pc_action_reset(pc);
if (ret)
goto out;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
- xe_gt_err(gt, "GuC PC Start failed\n");
- ret = -EIO;
- goto out;
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS)) {
+ xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
+ xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
+ xe_gt_throttle_get_limit_reasons(gt));
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
+ xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
+ goto out;
+ }
+
+ xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
+ ktime_ms_delta(ktime_get(), earlier));
}
ret = pc_init_freqs(pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index b6a2dd742ebd..1a5fe4822a62 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1246,11 +1246,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
+ release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
- release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 392102515f3d..c3cc0fa105e8 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -138,13 +138,17 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
i += size;
if (unlikely(j == st->nents - 1)) {
+ xe_assert(xe, i >= npages);
if (i > npages)
size -= (i - npages);
+
sg_mark_end(sgl);
+ } else {
+ xe_assert(xe, i < npages);
}
+
sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
}
- xe_assert(xe, i == npages);
return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index eb6cd91e1e22..1fa46a04425e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1766,8 +1766,8 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
WRITE_ONCE(u->exclusive_stream, stream);
- hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = xe_oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index c9cc0c091dfd..89fd2c043136 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -267,6 +267,15 @@ int xe_pm_init_early(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
+static u32 vram_threshold_value(struct xe_device *xe)
+{
+ /* FIXME: D3Cold temporarily disabled by default on BMG */
+ if (xe->info.platform == XE_BATTLEMAGE)
+ return 0;
+
+ return DEFAULT_VRAM_THRESHOLD;
+}
+
/**
* xe_pm_init - Initialize Xe Power Management
* @xe: xe device instance
@@ -277,6 +286,7 @@ ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
*/
int xe_pm_init(struct xe_device *xe)
{
+ u32 vram_threshold;
int err;
/* For now suspend/resume is only allowed with GuC */
@@ -290,7 +300,8 @@ int xe_pm_init(struct xe_device *xe)
if (err)
return err;
- err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ vram_threshold = vram_threshold_value(xe);
+ err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
return err;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ec6ec18ab3fa..5956631c0d40 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1809,9 +1809,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions))
- return -EINVAL;
-
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 46cae925b095..1f93e5e276c0 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -361,6 +361,10 @@ static bool host1x_wants_iommu(struct host1x *host1x)
return true;
}
+/*
+ * Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a
+ * valid paging domain.
+ */
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
@@ -385,6 +389,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
* Similarly, if host1x is already attached to an IOMMU (via the DMA
* API), don't try to attach again.
*/
+ if (domain && domain->type == IOMMU_DOMAIN_IDENTITY)
+ domain = NULL;
if (!host1x_wants_iommu(host) || domain)
return domain;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 862c47b191af..6c1416167bd2 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -55,4 +55,21 @@ config HYPERV_BALLOON
help
Select this option to enable Hyper-V Balloon driver.
+config MSHV_ROOT
+ tristate "Microsoft Hyper-V root partition support"
+ depends on HYPERV && (X86_64 || ARM64)
+ depends on !HYPERV_VTL_MODE
+ # The hypervisor interface operates on 4k pages. Enforcing it here
+ # simplifies many assumptions in the root partition code.
+ # e.g. When withdrawing memory, the hypervisor gives back 4k pages in
+ # no particular order, making it impossible to reassemble larger pages
+ depends on PAGE_SIZE_4KB
+ select EVENTFD
+ default n
+ help
+ Select this option to enable support for booting and running as root
+ partition on Microsoft Hyper-V.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index b992c0ed182b..976189c725dc 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
+obj-$(CONFIG_MSHV_ROOT) += mshv_root.o
CFLAGS_hv_trace.o = -I$(src)
CFLAGS_hv_balloon.o = -I$(src)
@@ -11,6 +12,9 @@ hv_vmbus-y := vmbus_drv.o \
channel_mgmt.o ring_buffer.o hv_trace.o
hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_utils_transport.o
+mshv_root-y := mshv_root_main.o mshv_synic.o mshv_eventfd.o mshv_irq.o \
+ mshv_root_hv_call.o mshv_portid_table.o
# Code that must be built-in
obj-$(subst m,y,$(CONFIG_HYPERV)) += hv_common.o
+obj-$(subst m,y,$(CONFIG_MSHV_ROOT)) += hv_proc.o mshv_common.o
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 36d9ba097ff5..308c8f279df8 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -144,7 +144,7 @@ int hv_synic_alloc(void)
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
- if (!ms_hyperv.paravisor_present && !hv_root_partition) {
+ if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (!hv_cpu->synic_message_page) {
@@ -272,7 +272,7 @@ void hv_synic_enable_regs(unsigned int cpu)
simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
simp.simp_enabled = 1;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -291,7 +291,7 @@ void hv_synic_enable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 1;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -313,17 +313,7 @@ void hv_synic_enable_regs(unsigned int cpu)
shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
-
- /*
- * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
- * it doesn't provide a recommendation flag and AEOI must be disabled.
- */
-#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
- shared_sint.auto_eoi =
- !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
-#else
- shared_sint.auto_eoi = 0;
-#endif
+ shared_sint.auto_eoi = hv_recommend_using_aeoi();
hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
@@ -367,7 +357,7 @@ void hv_synic_disable_regs(unsigned int cpu)
* addresses.
*/
simp.simp_enabled = 0;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
iounmap(hv_cpu->synic_message_page);
hv_cpu->synic_message_page = NULL;
} else {
@@ -379,7 +369,7 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 0;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
iounmap(hv_cpu->synic_event_page);
hv_cpu->synic_event_page = NULL;
} else {
@@ -433,13 +423,47 @@ retry:
return pending;
}
+static int hv_pick_new_cpu(struct vmbus_channel *channel)
+{
+ int ret = -EBUSY;
+ int start;
+ int cpu;
+
+ lockdep_assert_cpus_held();
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+
+ /*
+ * We can't assume that the relevant interrupts will be sent before
+ * the cpu is offlined on older versions of hyperv.
+ */
+ if (vmbus_proto_version < VERSION_WIN10_V5_3)
+ return -EBUSY;
+
+ start = get_random_u32_below(nr_cpu_ids);
+
+ for_each_cpu_wrap(cpu, cpu_online_mask, start) {
+ if (channel->target_cpu == cpu ||
+ channel->target_cpu == VMBUS_CONNECT_CPU)
+ continue;
+
+ ret = vmbus_channel_set_cpu(channel, cpu);
+ if (!ret)
+ break;
+ }
+
+ if (ret)
+ ret = vmbus_channel_set_cpu(channel, VMBUS_CONNECT_CPU);
+
+ return ret;
+}
+
/*
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
int hv_synic_cleanup(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
- bool channel_found = false;
+ int ret = 0;
if (vmbus_connection.conn_state != CONNECTED)
goto always_cleanup;
@@ -456,38 +480,34 @@ int hv_synic_cleanup(unsigned int cpu)
/*
* Search for channels which are bound to the CPU we're about to
- * cleanup. In case we find one and vmbus is still connected, we
- * fail; this will effectively prevent CPU offlining.
- *
- * TODO: Re-bind the channels to different CPUs.
+ * cleanup.
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->target_cpu == cpu) {
- channel_found = true;
- break;
+ ret = hv_pick_new_cpu(channel);
+ if (ret) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return ret;
+ }
}
list_for_each_entry(sc, &channel->sc_list, sc_list) {
if (sc->target_cpu == cpu) {
- channel_found = true;
- break;
+ ret = hv_pick_new_cpu(sc);
+ if (ret) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return ret;
+ }
}
}
- if (channel_found)
- break;
}
mutex_unlock(&vmbus_connection.channel_mutex);
- if (channel_found)
- return -EBUSY;
-
/*
- * channel_found == false means that any channels that were previously
- * assigned to the CPU have been reassigned elsewhere with a call of
- * vmbus_send_modifychannel(). Scan the event flags page looking for
- * bits that are set and waiting with a timeout for vmbus_chan_sched()
- * to process such bits. If bits are still set after this operation
- * and VMBus is connected, fail the CPU offlining operation.
+ * Scan the event flags page looking for bits that are set and waiting
+ * with a timeout for vmbus_chan_sched() to process such bits. If bits
+ * are still set after this operation and VMBus is connected, fail the
+ * CPU offlining operation.
*/
if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
return -EBUSY;
@@ -497,5 +517,5 @@ always_cleanup:
hv_synic_disable_regs(cpu);
- return 0;
+ return ret;
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index f2e6f55d6ca6..b3b11be11650 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -31,8 +31,14 @@
#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
+u64 hv_current_partition_id = HV_PARTITION_ID_SELF;
+EXPORT_SYMBOL_GPL(hv_current_partition_id);
+
+enum hv_partition_type hv_curr_partition_type;
+EXPORT_SYMBOL_GPL(hv_curr_partition_type);
+
/*
- * hv_root_partition, ms_hyperv and hv_nested are defined here with other
+ * ms_hyperv and hv_nested are defined here with other
* Hyper-V specific globals so they are shared across all architectures and are
* built only when CONFIG_HYPERV is defined. But on x86,
* ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not
@@ -40,9 +46,6 @@
* here, allowing for an overriding definition in the module containing
* ms_hyperv_init_platform().
*/
-bool __weak hv_root_partition;
-EXPORT_SYMBOL_GPL(hv_root_partition);
-
bool __weak hv_nested;
EXPORT_SYMBOL_GPL(hv_nested);
@@ -66,6 +69,16 @@ static void hv_kmsg_dump_unregister(void);
static struct ctl_table_header *hv_ctl_table_hdr;
/*
+ * Per-cpu array holding the tail pointer for the SynIC event ring buffer
+ * for each SINT.
+ *
+ * We cannot maintain this in mshv driver because the tail pointer should
+ * persist even if the mshv driver is unloaded.
+ */
+u8 * __percpu *hv_synic_eventring_tail;
+EXPORT_SYMBOL_GPL(hv_synic_eventring_tail);
+
+/*
* Hyper-V specific initialization and shutdown code that is
* common across all architectures. Called from architecture
* specific initialization functions.
@@ -87,6 +100,9 @@ void __init hv_common_free(void)
free_percpu(hyperv_pcpu_input_arg);
hyperv_pcpu_input_arg = NULL;
+
+ free_percpu(hv_synic_eventring_tail);
+ hv_synic_eventring_tail = NULL;
}
/*
@@ -280,7 +296,26 @@ static void hv_kmsg_dump_register(void)
static inline bool hv_output_page_exists(void)
{
- return hv_root_partition || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+ return hv_root_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+}
+
+void __init hv_get_partition_id(void)
+{
+ struct hv_output_get_partition_id *output;
+ unsigned long flags;
+ u64 status, pt_id;
+
+ local_irq_save(flags);
+ output = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output);
+ pt_id = output->partition_id;
+ local_irq_restore(flags);
+
+ if (hv_result_success(status))
+ hv_current_partition_id = pt_id;
+ else
+ pr_err("Hyper-V: failed to get partition ID: %#x\n",
+ hv_result(status));
}
int __init hv_common_init(void)
@@ -350,6 +385,11 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_output_arg);
}
+ if (hv_root_partition()) {
+ hv_synic_eventring_tail = alloc_percpu(u8 *);
+ BUG_ON(!hv_synic_eventring_tail);
+ }
+
hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index),
GFP_KERNEL);
if (!hv_vp_index) {
@@ -438,11 +478,12 @@ error:
int hv_common_cpu_init(unsigned int cpu)
{
void **inputarg, **outputarg;
+ u8 **synic_eventring_tail;
u64 msr_vp_index;
gfp_t flags;
const int pgcount = hv_output_page_exists() ? 2 : 1;
void *mem;
- int ret;
+ int ret = 0;
/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
@@ -450,8 +491,8 @@ int hv_common_cpu_init(unsigned int cpu)
inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
/*
- * hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory is already
- * allocated if this CPU was previously online and then taken offline
+ * The per-cpu memory is already allocated if this CPU was previously
+ * online and then taken offline
*/
if (!*inputarg) {
mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
@@ -498,11 +539,21 @@ int hv_common_cpu_init(unsigned int cpu)
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
- return 0;
+ if (hv_root_partition()) {
+ synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
+ *synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT,
+ sizeof(u8), flags);
+ /* No need to unwind any of the above on failure here */
+ if (unlikely(!*synic_eventring_tail))
+ ret = -ENOMEM;
+ }
+
+ return ret;
}
int hv_common_cpu_die(unsigned int cpu)
{
+ u8 **synic_eventring_tail;
/*
* The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory
* is not freed when the CPU goes offline as the hyperv_pcpu_input_arg
@@ -515,6 +566,10 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
+ synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
+ kfree(*synic_eventring_tail);
+ *synic_eventring_tail = NULL;
+
return 0;
}
@@ -572,7 +627,7 @@ EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
bool hv_is_hibernation_supported(void)
{
- return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
+ return !hv_root_partition() && acpi_sleep_state_supported(ACPI_STATE_S4);
}
EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
@@ -625,6 +680,11 @@ void __weak hv_remove_vmbus_handler(void)
}
EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
+void __weak hv_setup_mshv_handler(void (*handler)(void))
+{
+}
+EXPORT_SYMBOL_GPL(hv_setup_mshv_handler);
+
void __weak hv_setup_kexec_handler(void (*handler)(void))
{
}
@@ -661,3 +721,121 @@ u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_tdx_hypercall);
+
+void hv_identify_partition_type(void)
+{
+ /* Assume guest role */
+ hv_curr_partition_type = HV_PARTITION_TYPE_GUEST;
+ /*
+ * Check partition creation and cpu management privileges
+ *
+ * Hyper-V should never specify running as root and as a Confidential
+ * VM. But to protect against a compromised/malicious Hyper-V trying
+ * to exploit root behavior to expose Confidential VM memory, ignore
+ * the root partition setting if also a Confidential VM.
+ */
+ if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) &&
+ (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
+ !(ms_hyperv.priv_high & HV_ISOLATION)) {
+ pr_info("Hyper-V: running as root partition\n");
+ if (IS_ENABLED(CONFIG_MSHV_ROOT))
+ hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
+ else
+ pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n");
+ }
+}
+
+struct hv_status_info {
+ char *string;
+ int errno;
+ u16 code;
+};
+
+/*
+ * Note on the errno mappings:
+ * A failed hypercall is usually only recoverable (or loggable) near
+ * the call site where the HV_STATUS_* code is known. So the errno
+ * it gets converted to is not too useful further up the stack.
+ * Provide a few mappings that could be useful, and revert to -EIO
+ * as a fallback.
+ */
+static const struct hv_status_info hv_status_infos[] = {
+#define _STATUS_INFO(status, errno) { #status, (errno), (status) }
+ _STATUS_INFO(HV_STATUS_SUCCESS, 0),
+ _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_CODE, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_INPUT, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_ALIGNMENT, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PARAMETER, -EINVAL),
+ _STATUS_INFO(HV_STATUS_ACCESS_DENIED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PARTITION_STATE, -EIO),
+ _STATUS_INFO(HV_STATUS_OPERATION_DENIED, -EIO),
+ _STATUS_INFO(HV_STATUS_UNKNOWN_PROPERTY, -EIO),
+ _STATUS_INFO(HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE, -EIO),
+ _STATUS_INFO(HV_STATUS_INSUFFICIENT_MEMORY, -ENOMEM),
+ _STATUS_INFO(HV_STATUS_INVALID_PARTITION_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_VP_INDEX, -EINVAL),
+ _STATUS_INFO(HV_STATUS_NOT_FOUND, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PORT_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_CONNECTION_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INSUFFICIENT_BUFFERS, -EIO),
+ _STATUS_INFO(HV_STATUS_NOT_ACKNOWLEDGED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_VP_STATE, -EIO),
+ _STATUS_INFO(HV_STATUS_NO_RESOURCES, -EIO),
+ _STATUS_INFO(HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EIO),
+ _STATUS_INFO(HV_STATUS_OPERATION_FAILED, -EIO),
+ _STATUS_INFO(HV_STATUS_TIME_OUT, -EIO),
+ _STATUS_INFO(HV_STATUS_CALL_PENDING, -EIO),
+ _STATUS_INFO(HV_STATUS_VTL_ALREADY_ENABLED, -EIO),
+#undef _STATUS_INFO
+};
+
+static inline const struct hv_status_info *find_hv_status_info(u64 hv_status)
+{
+ int i;
+ u16 code = hv_result(hv_status);
+
+ for (i = 0; i < ARRAY_SIZE(hv_status_infos); ++i) {
+ const struct hv_status_info *info = &hv_status_infos[i];
+
+ if (info->code == code)
+ return info;
+ }
+
+ return NULL;
+}
+
+/* Convert a hypercall result into a linux-friendly error code. */
+int hv_result_to_errno(u64 status)
+{
+ const struct hv_status_info *info;
+
+ /* hv_do_hypercall() may return U64_MAX, hypercalls aren't possible */
+ if (unlikely(status == U64_MAX))
+ return -EOPNOTSUPP;
+
+ info = find_hv_status_info(status);
+ if (info)
+ return info->errno;
+
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(hv_result_to_errno);
+
+const char *hv_result_to_string(u64 status)
+{
+ const struct hv_status_info *info;
+
+ if (unlikely(status == U64_MAX))
+ return "Hypercall page missing!";
+
+ info = find_hv_status_info(status);
+ if (info)
+ return info->string;
+
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(hv_result_to_string);
diff --git a/drivers/hv/hv_proc.c b/drivers/hv/hv_proc.c
new file mode 100644
index 000000000000..7d7ecb6f6137
--- /dev/null
+++ b/drivers/hv/hv_proc.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/cpuhotplug.h>
+#include <linux/minmax.h>
+#include <asm/mshyperv.h>
+
+/*
+ * See struct hv_deposit_memory. The first u64 is partition ID, the rest
+ * are GPAs.
+ */
+#define HV_DEPOSIT_MAX (HV_HYP_PAGE_SIZE / sizeof(u64) - 1)
+
+/* Deposits exact number of pages. Must be called with interrupts enabled. */
+int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
+{
+ struct page **pages, *page;
+ int *counts;
+ int num_allocations;
+ int i, j, page_count;
+ int order;
+ u64 status;
+ int ret;
+ u64 base_pfn;
+ struct hv_deposit_memory *input_page;
+ unsigned long flags;
+
+ if (num_pages > HV_DEPOSIT_MAX)
+ return -E2BIG;
+ if (!num_pages)
+ return 0;
+
+ /* One buffer for page pointers and counts */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ pages = page_address(page);
+
+ counts = kcalloc(HV_DEPOSIT_MAX, sizeof(int), GFP_KERNEL);
+ if (!counts) {
+ free_page((unsigned long)pages);
+ return -ENOMEM;
+ }
+
+ /* Allocate all the pages before disabling interrupts */
+ i = 0;
+
+ while (num_pages) {
+ /* Find highest order we can actually allocate */
+ order = 31 - __builtin_clz(num_pages);
+
+ while (1) {
+ pages[i] = alloc_pages_node(node, GFP_KERNEL, order);
+ if (pages[i])
+ break;
+ if (!order) {
+ ret = -ENOMEM;
+ num_allocations = i;
+ goto err_free_allocations;
+ }
+ --order;
+ }
+
+ split_page(pages[i], order);
+ counts[i] = 1 << order;
+ num_pages -= counts[i];
+ i++;
+ }
+ num_allocations = i;
+
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->partition_id = partition_id;
+
+ /* Populate gpa_page_list - these will fit on the input page */
+ for (i = 0, page_count = 0; i < num_allocations; ++i) {
+ base_pfn = page_to_pfn(pages[i]);
+ for (j = 0; j < counts[i]; ++j, ++page_count)
+ input_page->gpa_page_list[page_count] = base_pfn + j;
+ }
+ status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY,
+ page_count, 0, input_page, NULL);
+ local_irq_restore(flags);
+ if (!hv_result_success(status)) {
+ hv_status_err(status, "\n");
+ ret = hv_result_to_errno(status);
+ goto err_free_allocations;
+ }
+
+ ret = 0;
+ goto free_buf;
+
+err_free_allocations:
+ for (i = 0; i < num_allocations; ++i) {
+ base_pfn = page_to_pfn(pages[i]);
+ for (j = 0; j < counts[i]; ++j)
+ __free_page(pfn_to_page(base_pfn + j));
+ }
+
+free_buf:
+ free_page((unsigned long)pages);
+ kfree(counts);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_call_deposit_pages);
+
+int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
+{
+ struct hv_input_add_logical_processor *input;
+ struct hv_output_add_logical_processor *output;
+ u64 status;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * When adding a logical processor, the hypervisor may return
+ * HV_STATUS_INSUFFICIENT_MEMORY. When that happens, we deposit more
+ * pages and retry.
+ */
+ do {
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ /* We don't do anything with the output right now */
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input->lp_index = lp_index;
+ input->apic_id = apic_id;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
+ input, output);
+ local_irq_restore(flags);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (!hv_result_success(status)) {
+ hv_status_err(status, "cpu %u apic ID: %u\n",
+ lp_index, apic_id);
+ ret = hv_result_to_errno(status);
+ }
+ break;
+ }
+ ret = hv_call_deposit_pages(node, hv_current_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
+{
+ struct hv_create_vp *input;
+ u64 status;
+ unsigned long irq_flags;
+ int ret = 0;
+
+ /* Root VPs don't seem to need pages deposited */
+ if (partition_id != hv_current_partition_id) {
+ /* The value 90 is empirically determined. It may change. */
+ ret = hv_call_deposit_pages(node, partition_id, 90);
+ if (ret)
+ return ret;
+ }
+
+ do {
+ local_irq_save(irq_flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->flags = flags;
+ input->subnode_type = HV_SUBNODE_ANY;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
+ local_irq_restore(irq_flags);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (!hv_result_success(status)) {
+ hv_status_err(status, "vcpu: %u, lp: %u\n",
+ vp_index, flags);
+ ret = hv_result_to_errno(status);
+ }
+ break;
+ }
+ ret = hv_call_deposit_pages(node, partition_id, 1);
+
+ } while (!ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_call_create_vp);
diff --git a/drivers/hv/mshv.h b/drivers/hv/mshv.h
new file mode 100644
index 000000000000..0340a67acd0a
--- /dev/null
+++ b/drivers/hv/mshv.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ */
+
+#ifndef _MSHV_H_
+#define _MSHV_H_
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <hyperv/hvhdk.h>
+
+#define mshv_field_nonzero(STRUCT, MEMBER) \
+ memchr_inv(&((STRUCT).MEMBER), \
+ 0, sizeof_field(typeof(STRUCT), MEMBER))
+
+int hv_call_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers);
+
+int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers);
+
+int hv_call_get_partition_property(u64 partition_id, u64 property_code,
+ u64 *property_value);
+
+int mshv_do_pre_guest_mode_work(ulong th_flags);
+
+#endif /* _MSHV_H */
diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c
new file mode 100644
index 000000000000..2575e6d7a71f
--- /dev/null
+++ b/drivers/hv/mshv_common.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation.
+ *
+ * This file contains functions that will be called from one or more modules.
+ * If any of these modules are configured to build, this file is built and just
+ * statically linked in.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/mshyperv.h>
+#include <linux/resume_user_mode.h>
+
+#include "mshv.h"
+
+#define HV_GET_REGISTER_BATCH_SIZE \
+ (HV_HYP_PAGE_SIZE / sizeof(union hv_register_value))
+#define HV_SET_REGISTER_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_registers)) \
+ / sizeof(struct hv_register_assoc))
+
+int hv_call_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers)
+{
+ struct hv_input_get_vp_registers *input_page;
+ union hv_register_value *output_page;
+ u16 completed = 0;
+ unsigned long remaining = count;
+ int rep_count, i;
+ u64 status = HV_STATUS_SUCCESS;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->vp_index = vp_index;
+ input_page->input_vtl.as_uint8 = input_vtl.as_uint8;
+ input_page->rsvd_z8 = 0;
+ input_page->rsvd_z16 = 0;
+
+ while (remaining) {
+ rep_count = min(remaining, HV_GET_REGISTER_BATCH_SIZE);
+ for (i = 0; i < rep_count; ++i)
+ input_page->names[i] = registers[i].name;
+
+ status = hv_do_rep_hypercall(HVCALL_GET_VP_REGISTERS, rep_count,
+ 0, input_page, output_page);
+ if (!hv_result_success(status))
+ break;
+
+ completed = hv_repcomp(status);
+ for (i = 0; i < completed; ++i)
+ registers[i].value = output_page[i];
+
+ registers += completed;
+ remaining -= completed;
+ }
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_get_vp_registers);
+
+int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers)
+{
+ struct hv_input_set_vp_registers *input_page;
+ u16 completed = 0;
+ unsigned long remaining = count;
+ int rep_count;
+ u64 status = HV_STATUS_SUCCESS;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->vp_index = vp_index;
+ input_page->input_vtl.as_uint8 = input_vtl.as_uint8;
+ input_page->rsvd_z8 = 0;
+ input_page->rsvd_z16 = 0;
+
+ while (remaining) {
+ rep_count = min(remaining, HV_SET_REGISTER_BATCH_SIZE);
+ memcpy(input_page->elements, registers,
+ sizeof(struct hv_register_assoc) * rep_count);
+
+ status = hv_do_rep_hypercall(HVCALL_SET_VP_REGISTERS, rep_count,
+ 0, input_page, NULL);
+ if (!hv_result_success(status))
+ break;
+
+ completed = hv_repcomp(status);
+ registers += completed;
+ remaining -= completed;
+ }
+
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_set_vp_registers);
+
+int hv_call_get_partition_property(u64 partition_id,
+ u64 property_code,
+ u64 *property_value)
+{
+ u64 status;
+ unsigned long flags;
+ struct hv_input_get_partition_property *input;
+ struct hv_output_get_partition_property *output;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+ memset(input, 0, sizeof(*input));
+ input->partition_id = partition_id;
+ input->property_code = property_code;
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_PROPERTY, input, output);
+
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ return hv_result_to_errno(status);
+ }
+ *property_value = output->property_value;
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hv_call_get_partition_property);
+
+/*
+ * Handle any pre-processing before going into the guest mode on this cpu, most
+ * notably call schedule(). Must be invoked with both preemption and
+ * interrupts enabled.
+ *
+ * Returns: 0 on success, -errno on error.
+ */
+int mshv_do_pre_guest_mode_work(ulong th_flags)
+{
+ if (th_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ return -EINTR;
+
+ if (th_flags & _TIF_NEED_RESCHED)
+ schedule();
+
+ if (th_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mshv_do_pre_guest_mode_work);
diff --git a/drivers/hv/mshv_eventfd.c b/drivers/hv/mshv_eventfd.c
new file mode 100644
index 000000000000..8dd22be2ca0b
--- /dev/null
+++ b/drivers/hv/mshv_eventfd.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * eventfd support for mshv
+ *
+ * Heavily inspired from KVM implementation of irqfd/ioeventfd. The basic
+ * framework code is taken from the kvm implementation.
+ *
+ * All credits to kvm developers.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/eventfd.h>
+
+#if IS_ENABLED(CONFIG_X86_64)
+#include <asm/apic.h>
+#endif
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+static struct workqueue_struct *irqfd_cleanup_wq;
+
+void mshv_register_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian)
+{
+ mutex_lock(&partition->pt_irq_lock);
+ hlist_add_head_rcu(&mian->link, &partition->irq_ack_notifier_list);
+ mutex_unlock(&partition->pt_irq_lock);
+}
+
+void mshv_unregister_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian)
+{
+ mutex_lock(&partition->pt_irq_lock);
+ hlist_del_init_rcu(&mian->link);
+ mutex_unlock(&partition->pt_irq_lock);
+ synchronize_rcu();
+}
+
+bool mshv_notify_acked_gsi(struct mshv_partition *partition, int gsi)
+{
+ struct mshv_irq_ack_notifier *mian;
+ bool acked = false;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mian, &partition->irq_ack_notifier_list,
+ link) {
+ if (mian->irq_ack_gsi == gsi) {
+ mian->irq_acked(mian);
+ acked = true;
+ }
+ }
+ rcu_read_unlock();
+
+ return acked;
+}
+
+#if IS_ENABLED(CONFIG_ARM64)
+static inline bool hv_should_clear_interrupt(enum hv_interrupt_type type)
+{
+ return false;
+}
+#elif IS_ENABLED(CONFIG_X86_64)
+static inline bool hv_should_clear_interrupt(enum hv_interrupt_type type)
+{
+ return type == HV_X64_INTERRUPT_TYPE_EXTINT;
+}
+#endif
+
+static void mshv_irqfd_resampler_ack(struct mshv_irq_ack_notifier *mian)
+{
+ struct mshv_irqfd_resampler *resampler;
+ struct mshv_partition *partition;
+ struct mshv_irqfd *irqfd;
+ int idx;
+
+ resampler = container_of(mian, struct mshv_irqfd_resampler,
+ rsmplr_notifier);
+ partition = resampler->rsmplr_partn;
+
+ idx = srcu_read_lock(&partition->pt_irq_srcu);
+
+ hlist_for_each_entry_rcu(irqfd, &resampler->rsmplr_irqfd_list,
+ irqfd_resampler_hnode) {
+ if (hv_should_clear_interrupt(irqfd->irqfd_lapic_irq.lapic_control.interrupt_type))
+ hv_call_clear_virtual_interrupt(partition->pt_id);
+
+ eventfd_signal(irqfd->irqfd_resamplefd);
+ }
+
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+}
+
+#if IS_ENABLED(CONFIG_X86_64)
+static bool
+mshv_vp_irq_vector_injected(union hv_vp_register_page_interrupt_vectors iv,
+ u32 vector)
+{
+ int i;
+
+ for (i = 0; i < iv.vector_count; i++) {
+ if (iv.vector[i] == vector)
+ return true;
+ }
+
+ return false;
+}
+
+static int mshv_vp_irq_try_set_vector(struct mshv_vp *vp, u32 vector)
+{
+ union hv_vp_register_page_interrupt_vectors iv, new_iv;
+
+ iv = vp->vp_register_page->interrupt_vectors;
+ new_iv = iv;
+
+ if (mshv_vp_irq_vector_injected(iv, vector))
+ return 0;
+
+ if (iv.vector_count >= HV_VP_REGISTER_PAGE_MAX_VECTOR_COUNT)
+ return -ENOSPC;
+
+ new_iv.vector[new_iv.vector_count++] = vector;
+
+ if (cmpxchg(&vp->vp_register_page->interrupt_vectors.as_uint64,
+ iv.as_uint64, new_iv.as_uint64) != iv.as_uint64)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int mshv_vp_irq_set_vector(struct mshv_vp *vp, u32 vector)
+{
+ int ret;
+
+ do {
+ ret = mshv_vp_irq_try_set_vector(vp, vector);
+ } while (ret == -EAGAIN && !need_resched());
+
+ return ret;
+}
+
+/*
+ * Try to raise irq for guest via shared vector array. hyp does the actual
+ * inject of the interrupt.
+ */
+static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
+{
+ struct mshv_partition *partition = irqfd->irqfd_partn;
+ struct mshv_lapic_irq *irq = &irqfd->irqfd_lapic_irq;
+ struct mshv_vp *vp;
+
+ if (!(ms_hyperv.ext_features &
+ HV_VP_DISPATCH_INTERRUPT_INJECTION_AVAILABLE))
+ return -EOPNOTSUPP;
+
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return -EOPNOTSUPP;
+
+ if (irq->lapic_control.logical_dest_mode)
+ return -EOPNOTSUPP;
+
+ vp = partition->pt_vp_array[irq->lapic_apic_id];
+
+ if (!vp->vp_register_page)
+ return -EOPNOTSUPP;
+
+ if (mshv_vp_irq_set_vector(vp, irq->lapic_vector))
+ return -EINVAL;
+
+ if (vp->run.flags.root_sched_dispatched &&
+ vp->vp_register_page->interrupt_vectors.as_uint64)
+ return -EBUSY;
+
+ wake_up(&vp->run.vp_suspend_queue);
+
+ return 0;
+}
+#else /* CONFIG_X86_64 */
+static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static void mshv_assert_irq_slow(struct mshv_irqfd *irqfd)
+{
+ struct mshv_partition *partition = irqfd->irqfd_partn;
+ struct mshv_lapic_irq *irq = &irqfd->irqfd_lapic_irq;
+ unsigned int seq;
+ int idx;
+
+ WARN_ON(irqfd->irqfd_resampler &&
+ !irq->lapic_control.level_triggered);
+
+ idx = srcu_read_lock(&partition->pt_irq_srcu);
+ if (irqfd->irqfd_girq_ent.guest_irq_num) {
+ if (!irqfd->irqfd_girq_ent.girq_entry_valid) {
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+ return;
+ }
+
+ do {
+ seq = read_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ } while (read_seqcount_retry(&irqfd->irqfd_irqe_sc, seq));
+ }
+
+ hv_call_assert_virtual_interrupt(irqfd->irqfd_partn->pt_id,
+ irq->lapic_vector, irq->lapic_apic_id,
+ irq->lapic_control);
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+}
+
+static void mshv_irqfd_resampler_shutdown(struct mshv_irqfd *irqfd)
+{
+ struct mshv_irqfd_resampler *rp = irqfd->irqfd_resampler;
+ struct mshv_partition *pt = rp->rsmplr_partn;
+
+ mutex_lock(&pt->irqfds_resampler_lock);
+
+ hlist_del_rcu(&irqfd->irqfd_resampler_hnode);
+ synchronize_srcu(&pt->pt_irq_srcu);
+
+ if (hlist_empty(&rp->rsmplr_irqfd_list)) {
+ hlist_del(&rp->rsmplr_hnode);
+ mshv_unregister_irq_ack_notifier(pt, &rp->rsmplr_notifier);
+ kfree(rp);
+ }
+
+ mutex_unlock(&pt->irqfds_resampler_lock);
+}
+
+/*
+ * Race-free decouple logic (ordering is critical)
+ */
+static void mshv_irqfd_shutdown(struct work_struct *work)
+{
+ struct mshv_irqfd *irqfd =
+ container_of(work, struct mshv_irqfd, irqfd_shutdown);
+
+ /*
+ * Synchronize with the wait-queue and unhook ourselves to prevent
+ * further events.
+ */
+ remove_wait_queue(irqfd->irqfd_wqh, &irqfd->irqfd_wait);
+
+ if (irqfd->irqfd_resampler) {
+ mshv_irqfd_resampler_shutdown(irqfd);
+ eventfd_ctx_put(irqfd->irqfd_resamplefd);
+ }
+
+ /*
+ * It is now safe to release the object's resources
+ */
+ eventfd_ctx_put(irqfd->irqfd_eventfd_ctx);
+ kfree(irqfd);
+}
+
+/* assumes partition->pt_irqfds_lock is held */
+static bool mshv_irqfd_is_active(struct mshv_irqfd *irqfd)
+{
+ return !hlist_unhashed(&irqfd->irqfd_hnode);
+}
+
+/*
+ * Mark the irqfd as inactive and schedule it for removal
+ *
+ * assumes partition->pt_irqfds_lock is held
+ */
+static void mshv_irqfd_deactivate(struct mshv_irqfd *irqfd)
+{
+ if (!mshv_irqfd_is_active(irqfd))
+ return;
+
+ hlist_del(&irqfd->irqfd_hnode);
+
+ queue_work(irqfd_cleanup_wq, &irqfd->irqfd_shutdown);
+}
+
+/*
+ * Called with wqh->lock held and interrupts disabled
+ */
+static int mshv_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
+ int sync, void *key)
+{
+ struct mshv_irqfd *irqfd = container_of(wait, struct mshv_irqfd,
+ irqfd_wait);
+ unsigned long flags = (unsigned long)key;
+ int idx;
+ unsigned int seq;
+ struct mshv_partition *pt = irqfd->irqfd_partn;
+ int ret = 0;
+
+ if (flags & POLLIN) {
+ u64 cnt;
+
+ eventfd_ctx_do_read(irqfd->irqfd_eventfd_ctx, &cnt);
+ idx = srcu_read_lock(&pt->pt_irq_srcu);
+ do {
+ seq = read_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ } while (read_seqcount_retry(&irqfd->irqfd_irqe_sc, seq));
+
+ /* An event has been signaled, raise an interrupt */
+ ret = mshv_try_assert_irq_fast(irqfd);
+ if (ret)
+ mshv_assert_irq_slow(irqfd);
+
+ srcu_read_unlock(&pt->pt_irq_srcu, idx);
+
+ ret = 1;
+ }
+
+ if (flags & POLLHUP) {
+ /* The eventfd is closing, detach from the partition */
+ unsigned long flags;
+
+ spin_lock_irqsave(&pt->pt_irqfds_lock, flags);
+
+ /*
+ * We must check if someone deactivated the irqfd before
+ * we could acquire the pt_irqfds_lock since the item is
+ * deactivated from the mshv side before it is unhooked from
+ * the wait-queue. If it is already deactivated, we can
+ * simply return knowing the other side will cleanup for us.
+ * We cannot race against the irqfd going away since the
+ * other side is required to acquire wqh->lock, which we hold
+ */
+ if (mshv_irqfd_is_active(irqfd))
+ mshv_irqfd_deactivate(irqfd);
+
+ spin_unlock_irqrestore(&pt->pt_irqfds_lock, flags);
+ }
+
+ return ret;
+}
+
+/* Must be called under pt_irqfds_lock */
+static void mshv_irqfd_update(struct mshv_partition *pt,
+ struct mshv_irqfd *irqfd)
+{
+ write_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ irqfd->irqfd_girq_ent = mshv_ret_girq_entry(pt,
+ irqfd->irqfd_irqnum);
+ mshv_copy_girq_info(&irqfd->irqfd_girq_ent, &irqfd->irqfd_lapic_irq);
+ write_seqcount_end(&irqfd->irqfd_irqe_sc);
+}
+
+void mshv_irqfd_routing_update(struct mshv_partition *pt)
+{
+ struct mshv_irqfd *irqfd;
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+ hlist_for_each_entry(irqfd, &pt->pt_irqfds_list, irqfd_hnode)
+ mshv_irqfd_update(pt, irqfd);
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+}
+
+static void mshv_irqfd_queue_proc(struct file *file, wait_queue_head_t *wqh,
+ poll_table *polltbl)
+{
+ struct mshv_irqfd *irqfd =
+ container_of(polltbl, struct mshv_irqfd, irqfd_polltbl);
+
+ irqfd->irqfd_wqh = wqh;
+ add_wait_queue_priority(wqh, &irqfd->irqfd_wait);
+}
+
+static int mshv_irqfd_assign(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
+ struct mshv_irqfd *irqfd, *tmp;
+ unsigned int events;
+ struct fd f;
+ int ret;
+ int idx;
+
+ irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
+ if (!irqfd)
+ return -ENOMEM;
+
+ irqfd->irqfd_partn = pt;
+ irqfd->irqfd_irqnum = args->gsi;
+ INIT_WORK(&irqfd->irqfd_shutdown, mshv_irqfd_shutdown);
+ seqcount_spinlock_init(&irqfd->irqfd_irqe_sc, &pt->pt_irqfds_lock);
+
+ f = fdget(args->fd);
+ if (!fd_file(f)) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ eventfd = eventfd_ctx_fileget(fd_file(f));
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto fail;
+ }
+
+ irqfd->irqfd_eventfd_ctx = eventfd;
+
+ if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE)) {
+ struct mshv_irqfd_resampler *rp;
+
+ resamplefd = eventfd_ctx_fdget(args->resamplefd);
+ if (IS_ERR(resamplefd)) {
+ ret = PTR_ERR(resamplefd);
+ goto fail;
+ }
+
+ irqfd->irqfd_resamplefd = resamplefd;
+
+ mutex_lock(&pt->irqfds_resampler_lock);
+
+ hlist_for_each_entry(rp, &pt->irqfds_resampler_list,
+ rsmplr_hnode) {
+ if (rp->rsmplr_notifier.irq_ack_gsi ==
+ irqfd->irqfd_irqnum) {
+ irqfd->irqfd_resampler = rp;
+ break;
+ }
+ }
+
+ if (!irqfd->irqfd_resampler) {
+ rp = kzalloc(sizeof(*rp), GFP_KERNEL_ACCOUNT);
+ if (!rp) {
+ ret = -ENOMEM;
+ mutex_unlock(&pt->irqfds_resampler_lock);
+ goto fail;
+ }
+
+ rp->rsmplr_partn = pt;
+ INIT_HLIST_HEAD(&rp->rsmplr_irqfd_list);
+ rp->rsmplr_notifier.irq_ack_gsi = irqfd->irqfd_irqnum;
+ rp->rsmplr_notifier.irq_acked =
+ mshv_irqfd_resampler_ack;
+
+ hlist_add_head(&rp->rsmplr_hnode,
+ &pt->irqfds_resampler_list);
+ mshv_register_irq_ack_notifier(pt,
+ &rp->rsmplr_notifier);
+ irqfd->irqfd_resampler = rp;
+ }
+
+ hlist_add_head_rcu(&irqfd->irqfd_resampler_hnode,
+ &irqfd->irqfd_resampler->rsmplr_irqfd_list);
+
+ mutex_unlock(&pt->irqfds_resampler_lock);
+ }
+
+ /*
+ * Install our own custom wake-up handling so we are notified via
+ * a callback whenever someone signals the underlying eventfd
+ */
+ init_waitqueue_func_entry(&irqfd->irqfd_wait, mshv_irqfd_wakeup);
+ init_poll_funcptr(&irqfd->irqfd_polltbl, mshv_irqfd_queue_proc);
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+ if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE) &&
+ !irqfd->irqfd_lapic_irq.lapic_control.level_triggered) {
+ /*
+ * Resample Fd must be for level triggered interrupt
+ * Otherwise return with failure
+ */
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+ hlist_for_each_entry(tmp, &pt->pt_irqfds_list, irqfd_hnode) {
+ if (irqfd->irqfd_eventfd_ctx != tmp->irqfd_eventfd_ctx)
+ continue;
+ /* This fd is used for another irq already. */
+ ret = -EBUSY;
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+ goto fail;
+ }
+
+ idx = srcu_read_lock(&pt->pt_irq_srcu);
+ mshv_irqfd_update(pt, irqfd);
+ hlist_add_head(&irqfd->irqfd_hnode, &pt->pt_irqfds_list);
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+
+ /*
+ * Check if there was an event already pending on the eventfd
+ * before we registered, and trigger it as if we didn't miss it.
+ */
+ events = vfs_poll(fd_file(f), &irqfd->irqfd_polltbl);
+
+ if (events & POLLIN)
+ mshv_assert_irq_slow(irqfd);
+
+ srcu_read_unlock(&pt->pt_irq_srcu, idx);
+ /*
+ * do not drop the file until the irqfd is fully initialized, otherwise
+ * we might race against the POLLHUP
+ */
+ fdput(f);
+
+ return 0;
+
+fail:
+ if (irqfd->irqfd_resampler)
+ mshv_irqfd_resampler_shutdown(irqfd);
+
+ if (resamplefd && !IS_ERR(resamplefd))
+ eventfd_ctx_put(resamplefd);
+
+ if (eventfd && !IS_ERR(eventfd))
+ eventfd_ctx_put(eventfd);
+
+ fdput(f);
+
+out:
+ kfree(irqfd);
+ return ret;
+}
+
+/*
+ * shutdown any irqfd's that match fd+gsi
+ */
+static int mshv_irqfd_deassign(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ struct mshv_irqfd *irqfd;
+ struct hlist_node *n;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ hlist_for_each_entry_safe(irqfd, n, &pt->pt_irqfds_list,
+ irqfd_hnode) {
+ if (irqfd->irqfd_eventfd_ctx == eventfd &&
+ irqfd->irqfd_irqnum == args->gsi)
+
+ mshv_irqfd_deactivate(irqfd);
+ }
+
+ eventfd_ctx_put(eventfd);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed
+ * so that we guarantee there will not be any more interrupts on this
+ * gsi once this deassign function returns.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+
+ return 0;
+}
+
+int mshv_set_unset_irqfd(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ if (args->flags & ~MSHV_IRQFD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (args->flags & BIT(MSHV_IRQFD_BIT_DEASSIGN))
+ return mshv_irqfd_deassign(pt, args);
+
+ return mshv_irqfd_assign(pt, args);
+}
+
+/*
+ * This function is called as the mshv VM fd is being released.
+ * Shutdown all irqfds that still remain open
+ */
+static void mshv_irqfd_release(struct mshv_partition *pt)
+{
+ struct mshv_irqfd *irqfd;
+ struct hlist_node *n;
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+
+ hlist_for_each_entry_safe(irqfd, n, &pt->pt_irqfds_list, irqfd_hnode)
+ mshv_irqfd_deactivate(irqfd);
+
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed
+ * since we do not take a mshv_partition* reference.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+}
+
+int mshv_irqfd_wq_init(void)
+{
+ irqfd_cleanup_wq = alloc_workqueue("mshv-irqfd-cleanup", 0, 0);
+ if (!irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mshv_irqfd_wq_cleanup(void)
+{
+ destroy_workqueue(irqfd_cleanup_wq);
+}
+
+/*
+ * --------------------------------------------------------------------
+ * ioeventfd: translate a MMIO memory write to an eventfd signal.
+ *
+ * userspace can register a MMIO address with an eventfd for receiving
+ * notification when the memory has been touched.
+ * --------------------------------------------------------------------
+ */
+
+static void ioeventfd_release(struct mshv_ioeventfd *p, u64 partition_id)
+{
+ if (p->iovntfd_doorbell_id > 0)
+ mshv_unregister_doorbell(partition_id, p->iovntfd_doorbell_id);
+ eventfd_ctx_put(p->iovntfd_eventfd);
+ kfree(p);
+}
+
+/* MMIO writes trigger an event if the addr/val match */
+static void ioeventfd_mmio_write(int doorbell_id, void *data)
+{
+ struct mshv_partition *partition = (struct mshv_partition *)data;
+ struct mshv_ioeventfd *p;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(p, &partition->ioeventfds_list, iovntfd_hnode)
+ if (p->iovntfd_doorbell_id == doorbell_id) {
+ eventfd_signal(p->iovntfd_eventfd);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static bool ioeventfd_check_collision(struct mshv_partition *pt,
+ struct mshv_ioeventfd *p)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *_p;
+
+ hlist_for_each_entry(_p, &pt->ioeventfds_list, iovntfd_hnode)
+ if (_p->iovntfd_addr == p->iovntfd_addr &&
+ _p->iovntfd_length == p->iovntfd_length &&
+ (_p->iovntfd_wildcard || p->iovntfd_wildcard ||
+ _p->iovntfd_datamatch == p->iovntfd_datamatch))
+ return true;
+
+ return false;
+}
+
+static int mshv_assign_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *p;
+ struct eventfd_ctx *eventfd;
+ u64 doorbell_flags = 0;
+ int ret;
+
+ /* This mutex is currently protecting ioeventfd.items list */
+ WARN_ON_ONCE(!mutex_is_locked(&pt->pt_mutex));
+
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_PIO))
+ return -EOPNOTSUPP;
+
+ /* must be natural-word sized */
+ switch (args->len) {
+ case 0:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_ANY;
+ break;
+ case 1:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_BYTE;
+ break;
+ case 2:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_WORD;
+ break;
+ case 4:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_DWORD;
+ break;
+ case 8:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_QWORD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check for range overflow */
+ if (args->addr + args->len < args->addr)
+ return -EINVAL;
+
+ /* check for extra flags that we don't understand */
+ if (args->flags & ~MSHV_IOEVENTFD_FLAGS_MASK)
+ return -EINVAL;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ p->iovntfd_addr = args->addr;
+ p->iovntfd_length = args->len;
+ p->iovntfd_eventfd = eventfd;
+
+ /* The datamatch feature is optional, otherwise this is a wildcard */
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_DATAMATCH)) {
+ p->iovntfd_datamatch = args->datamatch;
+ } else {
+ p->iovntfd_wildcard = true;
+ doorbell_flags |= HV_DOORBELL_FLAG_TRIGGER_ANY_VALUE;
+ }
+
+ if (ioeventfd_check_collision(pt, p)) {
+ ret = -EEXIST;
+ goto unlock_fail;
+ }
+
+ ret = mshv_register_doorbell(pt->pt_id, ioeventfd_mmio_write,
+ (void *)pt, p->iovntfd_addr,
+ p->iovntfd_datamatch, doorbell_flags);
+ if (ret < 0)
+ goto unlock_fail;
+
+ p->iovntfd_doorbell_id = ret;
+
+ hlist_add_head_rcu(&p->iovntfd_hnode, &pt->ioeventfds_list);
+
+ return 0;
+
+unlock_fail:
+ kfree(p);
+
+fail:
+ eventfd_ctx_put(eventfd);
+
+ return ret;
+}
+
+static int mshv_deassign_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *p;
+ struct eventfd_ctx *eventfd;
+ struct hlist_node *n;
+ int ret = -ENOENT;
+
+ /* This mutex is currently protecting ioeventfd.items list */
+ WARN_ON_ONCE(!mutex_is_locked(&pt->pt_mutex));
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ hlist_for_each_entry_safe(p, n, &pt->ioeventfds_list, iovntfd_hnode) {
+ bool wildcard = !(args->flags & BIT(MSHV_IOEVENTFD_BIT_DATAMATCH));
+
+ if (p->iovntfd_eventfd != eventfd ||
+ p->iovntfd_addr != args->addr ||
+ p->iovntfd_length != args->len ||
+ p->iovntfd_wildcard != wildcard)
+ continue;
+
+ if (!p->iovntfd_wildcard &&
+ p->iovntfd_datamatch != args->datamatch)
+ continue;
+
+ hlist_del_rcu(&p->iovntfd_hnode);
+ synchronize_rcu();
+ ioeventfd_release(p, pt->pt_id);
+ ret = 0;
+ break;
+ }
+
+ eventfd_ctx_put(eventfd);
+
+ return ret;
+}
+
+int mshv_set_unset_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ if ((args->flags & ~MSHV_IOEVENTFD_FLAGS_MASK) ||
+ mshv_field_nonzero(*args, rsvd))
+ return -EINVAL;
+
+ /* PIO not yet implemented */
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_PIO))
+ return -EOPNOTSUPP;
+
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_DEASSIGN))
+ return mshv_deassign_ioeventfd(pt, args);
+
+ return mshv_assign_ioeventfd(pt, args);
+}
+
+void mshv_eventfd_init(struct mshv_partition *pt)
+{
+ spin_lock_init(&pt->pt_irqfds_lock);
+ INIT_HLIST_HEAD(&pt->pt_irqfds_list);
+
+ INIT_HLIST_HEAD(&pt->irqfds_resampler_list);
+ mutex_init(&pt->irqfds_resampler_lock);
+
+ INIT_HLIST_HEAD(&pt->ioeventfds_list);
+}
+
+void mshv_eventfd_release(struct mshv_partition *pt)
+{
+ struct hlist_head items;
+ struct hlist_node *n;
+ struct mshv_ioeventfd *p;
+
+ hlist_move_list(&pt->ioeventfds_list, &items);
+ synchronize_rcu();
+
+ hlist_for_each_entry_safe(p, n, &items, iovntfd_hnode) {
+ hlist_del(&p->iovntfd_hnode);
+ ioeventfd_release(p, pt->pt_id);
+ }
+
+ mshv_irqfd_release(pt);
+}
diff --git a/drivers/hv/mshv_eventfd.h b/drivers/hv/mshv_eventfd.h
new file mode 100644
index 000000000000..332e7670a344
--- /dev/null
+++ b/drivers/hv/mshv_eventfd.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * irqfd: Allows an fd to be used to inject an interrupt to the guest.
+ * ioeventfd: Allow an fd to be used to receive a signal from the guest.
+ * All credit goes to kvm developers.
+ */
+
+#ifndef __LINUX_MSHV_EVENTFD_H
+#define __LINUX_MSHV_EVENTFD_H
+
+#include <linux/poll.h>
+
+#include "mshv.h"
+#include "mshv_root.h"
+
+/* struct to contain list of irqfds sharing an irq. Updates are protected by
+ * partition.irqfds.resampler_lock
+ */
+struct mshv_irqfd_resampler {
+ struct mshv_partition *rsmplr_partn;
+ struct hlist_head rsmplr_irqfd_list;
+ struct mshv_irq_ack_notifier rsmplr_notifier;
+ struct hlist_node rsmplr_hnode;
+};
+
+struct mshv_irqfd {
+ struct mshv_partition *irqfd_partn;
+ struct eventfd_ctx *irqfd_eventfd_ctx;
+ struct mshv_guest_irq_ent irqfd_girq_ent;
+ seqcount_spinlock_t irqfd_irqe_sc;
+ u32 irqfd_irqnum;
+ struct mshv_lapic_irq irqfd_lapic_irq;
+ struct hlist_node irqfd_hnode;
+ poll_table irqfd_polltbl;
+ wait_queue_head_t *irqfd_wqh;
+ wait_queue_entry_t irqfd_wait;
+ struct work_struct irqfd_shutdown;
+ struct mshv_irqfd_resampler *irqfd_resampler;
+ struct eventfd_ctx *irqfd_resamplefd;
+ struct hlist_node irqfd_resampler_hnode;
+};
+
+void mshv_eventfd_init(struct mshv_partition *partition);
+void mshv_eventfd_release(struct mshv_partition *partition);
+
+void mshv_register_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian);
+void mshv_unregister_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian);
+bool mshv_notify_acked_gsi(struct mshv_partition *partition, int gsi);
+
+int mshv_set_unset_irqfd(struct mshv_partition *partition,
+ struct mshv_user_irqfd *args);
+
+int mshv_irqfd_wq_init(void);
+void mshv_irqfd_wq_cleanup(void);
+
+struct mshv_ioeventfd {
+ struct hlist_node iovntfd_hnode;
+ u64 iovntfd_addr;
+ int iovntfd_length;
+ struct eventfd_ctx *iovntfd_eventfd;
+ u64 iovntfd_datamatch;
+ int iovntfd_doorbell_id;
+ bool iovntfd_wildcard;
+};
+
+int mshv_set_unset_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args);
+
+#endif /* __LINUX_MSHV_EVENTFD_H */
diff --git a/drivers/hv/mshv_irq.c b/drivers/hv/mshv_irq.c
new file mode 100644
index 000000000000..d0fb9ef734f4
--- /dev/null
+++ b/drivers/hv/mshv_irq.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+/* called from the ioctl code, user wants to update the guest irq table */
+int mshv_update_routing_table(struct mshv_partition *partition,
+ const struct mshv_user_irq_entry *ue,
+ unsigned int numents)
+{
+ struct mshv_girq_routing_table *new = NULL, *old;
+ u32 i, nr_rt_entries = 0;
+ int r = 0;
+
+ if (numents == 0)
+ goto swap_routes;
+
+ for (i = 0; i < numents; i++) {
+ if (ue[i].gsi >= MSHV_MAX_GUEST_IRQS)
+ return -EINVAL;
+
+ if (ue[i].address_hi)
+ return -EINVAL;
+
+ nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
+ }
+ nr_rt_entries += 1;
+
+ new = kzalloc(struct_size(new, mshv_girq_info_tbl, nr_rt_entries),
+ GFP_KERNEL_ACCOUNT);
+ if (!new)
+ return -ENOMEM;
+
+ new->num_rt_entries = nr_rt_entries;
+ for (i = 0; i < numents; i++) {
+ struct mshv_guest_irq_ent *girq;
+
+ girq = &new->mshv_girq_info_tbl[ue[i].gsi];
+
+ /*
+ * Allow only one to one mapping between GSI and MSI routing.
+ */
+ if (girq->guest_irq_num != 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ girq->guest_irq_num = ue[i].gsi;
+ girq->girq_addr_lo = ue[i].address_lo;
+ girq->girq_addr_hi = ue[i].address_hi;
+ girq->girq_irq_data = ue[i].data;
+ girq->girq_entry_valid = true;
+ }
+
+swap_routes:
+ mutex_lock(&partition->pt_irq_lock);
+ old = rcu_dereference_protected(partition->pt_girq_tbl, 1);
+ rcu_assign_pointer(partition->pt_girq_tbl, new);
+ mshv_irqfd_routing_update(partition);
+ mutex_unlock(&partition->pt_irq_lock);
+
+ synchronize_srcu_expedited(&partition->pt_irq_srcu);
+ new = old;
+
+out:
+ kfree(new);
+
+ return r;
+}
+
+/* vm is going away, kfree the irq routing table */
+void mshv_free_routing_table(struct mshv_partition *partition)
+{
+ struct mshv_girq_routing_table *rt =
+ rcu_access_pointer(partition->pt_girq_tbl);
+
+ kfree(rt);
+}
+
+struct mshv_guest_irq_ent
+mshv_ret_girq_entry(struct mshv_partition *partition, u32 irqnum)
+{
+ struct mshv_guest_irq_ent entry = { 0 };
+ struct mshv_girq_routing_table *girq_tbl;
+
+ girq_tbl = srcu_dereference_check(partition->pt_girq_tbl,
+ &partition->pt_irq_srcu,
+ lockdep_is_held(&partition->pt_irq_lock));
+ if (!girq_tbl || irqnum >= girq_tbl->num_rt_entries) {
+ /*
+ * Premature register_irqfd, setting valid_entry = 0
+ * would ignore this entry anyway
+ */
+ entry.guest_irq_num = irqnum;
+ return entry;
+ }
+
+ return girq_tbl->mshv_girq_info_tbl[irqnum];
+}
+
+void mshv_copy_girq_info(struct mshv_guest_irq_ent *ent,
+ struct mshv_lapic_irq *lirq)
+{
+ memset(lirq, 0, sizeof(*lirq));
+ if (!ent || !ent->girq_entry_valid)
+ return;
+
+ lirq->lapic_vector = ent->girq_irq_data & 0xFF;
+ lirq->lapic_apic_id = (ent->girq_addr_lo >> 12) & 0xFF;
+ lirq->lapic_control.interrupt_type = (ent->girq_irq_data & 0x700) >> 8;
+ lirq->lapic_control.level_triggered = (ent->girq_irq_data >> 15) & 0x1;
+ lirq->lapic_control.logical_dest_mode = (ent->girq_addr_lo >> 2) & 0x1;
+}
diff --git a/drivers/hv/mshv_portid_table.c b/drivers/hv/mshv_portid_table.c
new file mode 100644
index 000000000000..c349af1f0aaa
--- /dev/null
+++ b/drivers/hv/mshv_portid_table.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <asm/mshyperv.h>
+
+#include "mshv.h"
+#include "mshv_root.h"
+
+/*
+ * Ports and connections are hypervisor struct used for inter-partition
+ * communication. Port represents the source and connection represents
+ * the destination. Partitions are responsible for managing the port and
+ * connection ids.
+ *
+ */
+
+#define PORTID_MIN 1
+#define PORTID_MAX INT_MAX
+
+static DEFINE_IDR(port_table_idr);
+
+void
+mshv_port_table_fini(void)
+{
+ struct port_table_info *port_info;
+ unsigned long i, tmp;
+
+ idr_lock(&port_table_idr);
+ if (!idr_is_empty(&port_table_idr)) {
+ idr_for_each_entry_ul(&port_table_idr, port_info, tmp, i) {
+ port_info = idr_remove(&port_table_idr, i);
+ kfree_rcu(port_info, portbl_rcu);
+ }
+ }
+ idr_unlock(&port_table_idr);
+}
+
+int
+mshv_portid_alloc(struct port_table_info *info)
+{
+ int ret = 0;
+
+ idr_lock(&port_table_idr);
+ ret = idr_alloc(&port_table_idr, info, PORTID_MIN,
+ PORTID_MAX, GFP_KERNEL);
+ idr_unlock(&port_table_idr);
+
+ return ret;
+}
+
+void
+mshv_portid_free(int port_id)
+{
+ struct port_table_info *info;
+
+ idr_lock(&port_table_idr);
+ info = idr_remove(&port_table_idr, port_id);
+ WARN_ON(!info);
+ idr_unlock(&port_table_idr);
+
+ synchronize_rcu();
+ kfree(info);
+}
+
+int
+mshv_portid_lookup(int port_id, struct port_table_info *info)
+{
+ struct port_table_info *_info;
+ int ret = -ENOENT;
+
+ rcu_read_lock();
+ _info = idr_find(&port_table_idr, port_id);
+ rcu_read_unlock();
+
+ if (_info) {
+ *info = *_info;
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/hv/mshv_root.h b/drivers/hv/mshv_root.h
new file mode 100644
index 000000000000..e3931b0f1269
--- /dev/null
+++ b/drivers/hv/mshv_root.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ */
+
+#ifndef _MSHV_ROOT_H_
+#define _MSHV_ROOT_H_
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <linux/hashtable.h>
+#include <linux/dev_printk.h>
+#include <linux/build_bug.h>
+#include <uapi/linux/mshv.h>
+
+/*
+ * Hypervisor must be between these version numbers (inclusive)
+ * to guarantee compatibility
+ */
+#define MSHV_HV_MIN_VERSION (27744)
+#define MSHV_HV_MAX_VERSION (27751)
+
+static_assert(HV_HYP_PAGE_SIZE == MSHV_HV_PAGE_SIZE);
+
+#define MSHV_MAX_VPS 256
+
+#define MSHV_PARTITIONS_HASH_BITS 9
+
+#define MSHV_PIN_PAGES_BATCH_SIZE (0x10000000ULL / HV_HYP_PAGE_SIZE)
+
+struct mshv_vp {
+ u32 vp_index;
+ struct mshv_partition *vp_partition;
+ struct mutex vp_mutex;
+ struct hv_vp_register_page *vp_register_page;
+ struct hv_message *vp_intercept_msg_page;
+ void *vp_ghcb_page;
+ struct hv_stats_page *vp_stats_pages[2];
+ struct {
+ atomic64_t vp_signaled_count;
+ struct {
+ u64 intercept_suspend: 1;
+ u64 root_sched_blocked: 1; /* root scheduler only */
+ u64 root_sched_dispatched: 1; /* root scheduler only */
+ u64 reserved: 61;
+ } flags;
+ unsigned int kicked_by_hv;
+ wait_queue_head_t vp_suspend_queue;
+ } run;
+};
+
+#define vp_fmt(fmt) "p%lluvp%u: " fmt
+#define vp_devprintk(level, v, fmt, ...) \
+do { \
+ const struct mshv_vp *__vp = (v); \
+ const struct mshv_partition *__pt = __vp->vp_partition; \
+ dev_##level(__pt->pt_module_dev, vp_fmt(fmt), __pt->pt_id, \
+ __vp->vp_index, ##__VA_ARGS__); \
+} while (0)
+#define vp_emerg(v, fmt, ...) vp_devprintk(emerg, v, fmt, ##__VA_ARGS__)
+#define vp_crit(v, fmt, ...) vp_devprintk(crit, v, fmt, ##__VA_ARGS__)
+#define vp_alert(v, fmt, ...) vp_devprintk(alert, v, fmt, ##__VA_ARGS__)
+#define vp_err(v, fmt, ...) vp_devprintk(err, v, fmt, ##__VA_ARGS__)
+#define vp_warn(v, fmt, ...) vp_devprintk(warn, v, fmt, ##__VA_ARGS__)
+#define vp_notice(v, fmt, ...) vp_devprintk(notice, v, fmt, ##__VA_ARGS__)
+#define vp_info(v, fmt, ...) vp_devprintk(info, v, fmt, ##__VA_ARGS__)
+#define vp_dbg(v, fmt, ...) vp_devprintk(dbg, v, fmt, ##__VA_ARGS__)
+
+struct mshv_mem_region {
+ struct hlist_node hnode;
+ u64 nr_pages;
+ u64 start_gfn;
+ u64 start_uaddr;
+ u32 hv_map_flags;
+ struct {
+ u64 large_pages: 1; /* 2MiB */
+ u64 range_pinned: 1;
+ u64 reserved: 62;
+ } flags;
+ struct mshv_partition *partition;
+ struct page *pages[];
+};
+
+struct mshv_irq_ack_notifier {
+ struct hlist_node link;
+ unsigned int irq_ack_gsi;
+ void (*irq_acked)(struct mshv_irq_ack_notifier *mian);
+};
+
+struct mshv_partition {
+ struct device *pt_module_dev;
+
+ struct hlist_node pt_hnode;
+ u64 pt_id;
+ refcount_t pt_ref_count;
+ struct mutex pt_mutex;
+ struct hlist_head pt_mem_regions; // not ordered
+
+ u32 pt_vp_count;
+ struct mshv_vp *pt_vp_array[MSHV_MAX_VPS];
+
+ struct mutex pt_irq_lock;
+ struct srcu_struct pt_irq_srcu;
+ struct hlist_head irq_ack_notifier_list;
+
+ struct hlist_head pt_devices;
+
+ /*
+ * MSHV does not support more than one async hypercall in flight
+ * for a single partition. Thus, it is okay to define per partition
+ * async hypercall status.
+ */
+ struct completion async_hypercall;
+ u64 async_hypercall_status;
+
+ spinlock_t pt_irqfds_lock;
+ struct hlist_head pt_irqfds_list;
+ struct mutex irqfds_resampler_lock;
+ struct hlist_head irqfds_resampler_list;
+
+ struct hlist_head ioeventfds_list;
+
+ struct mshv_girq_routing_table __rcu *pt_girq_tbl;
+ u64 isolation_type;
+ bool import_completed;
+ bool pt_initialized;
+};
+
+#define pt_fmt(fmt) "p%llu: " fmt
+#define pt_devprintk(level, p, fmt, ...) \
+do { \
+ const struct mshv_partition *__pt = (p); \
+ dev_##level(__pt->pt_module_dev, pt_fmt(fmt), __pt->pt_id, \
+ ##__VA_ARGS__); \
+} while (0)
+#define pt_emerg(p, fmt, ...) pt_devprintk(emerg, p, fmt, ##__VA_ARGS__)
+#define pt_crit(p, fmt, ...) pt_devprintk(crit, p, fmt, ##__VA_ARGS__)
+#define pt_alert(p, fmt, ...) pt_devprintk(alert, p, fmt, ##__VA_ARGS__)
+#define pt_err(p, fmt, ...) pt_devprintk(err, p, fmt, ##__VA_ARGS__)
+#define pt_warn(p, fmt, ...) pt_devprintk(warn, p, fmt, ##__VA_ARGS__)
+#define pt_notice(p, fmt, ...) pt_devprintk(notice, p, fmt, ##__VA_ARGS__)
+#define pt_info(p, fmt, ...) pt_devprintk(info, p, fmt, ##__VA_ARGS__)
+#define pt_dbg(p, fmt, ...) pt_devprintk(dbg, p, fmt, ##__VA_ARGS__)
+
+struct mshv_lapic_irq {
+ u32 lapic_vector;
+ u64 lapic_apic_id;
+ union hv_interrupt_control lapic_control;
+};
+
+#define MSHV_MAX_GUEST_IRQS 4096
+
+/* representation of one guest irq entry, either msi or legacy */
+struct mshv_guest_irq_ent {
+ u32 girq_entry_valid; /* vfio looks at this */
+ u32 guest_irq_num; /* a unique number for each irq */
+ u32 girq_addr_lo; /* guest irq msi address info */
+ u32 girq_addr_hi;
+ u32 girq_irq_data; /* idt vector in some cases */
+};
+
+struct mshv_girq_routing_table {
+ u32 num_rt_entries;
+ struct mshv_guest_irq_ent mshv_girq_info_tbl[];
+};
+
+struct hv_synic_pages {
+ struct hv_message_page *synic_message_page;
+ struct hv_synic_event_flags_page *synic_event_flags_page;
+ struct hv_synic_event_ring_page *synic_event_ring_page;
+};
+
+struct mshv_root {
+ struct hv_synic_pages __percpu *synic_pages;
+ spinlock_t pt_ht_lock;
+ DECLARE_HASHTABLE(pt_htable, MSHV_PARTITIONS_HASH_BITS);
+};
+
+/*
+ * Callback for doorbell events.
+ * NOTE: This is called in interrupt context. Callback
+ * should defer slow and sleeping logic to later.
+ */
+typedef void (*doorbell_cb_t) (int doorbell_id, void *);
+
+/*
+ * port table information
+ */
+struct port_table_info {
+ struct rcu_head portbl_rcu;
+ enum hv_port_type hv_port_type;
+ union {
+ struct {
+ u64 reserved[2];
+ } hv_port_message;
+ struct {
+ u64 reserved[2];
+ } hv_port_event;
+ struct {
+ u64 reserved[2];
+ } hv_port_monitor;
+ struct {
+ doorbell_cb_t doorbell_cb;
+ void *data;
+ } hv_port_doorbell;
+ };
+};
+
+int mshv_update_routing_table(struct mshv_partition *partition,
+ const struct mshv_user_irq_entry *entries,
+ unsigned int numents);
+void mshv_free_routing_table(struct mshv_partition *partition);
+
+struct mshv_guest_irq_ent mshv_ret_girq_entry(struct mshv_partition *partition,
+ u32 irq_num);
+
+void mshv_copy_girq_info(struct mshv_guest_irq_ent *src_irq,
+ struct mshv_lapic_irq *dest_irq);
+
+void mshv_irqfd_routing_update(struct mshv_partition *partition);
+
+void mshv_port_table_fini(void);
+int mshv_portid_alloc(struct port_table_info *info);
+int mshv_portid_lookup(int port_id, struct port_table_info *info);
+void mshv_portid_free(int port_id);
+
+int mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb,
+ void *data, u64 gpa, u64 val, u64 flags);
+void mshv_unregister_doorbell(u64 partition_id, int doorbell_portid);
+
+void mshv_isr(void);
+int mshv_synic_init(unsigned int cpu);
+int mshv_synic_cleanup(unsigned int cpu);
+
+static inline bool mshv_partition_encrypted(struct mshv_partition *partition)
+{
+ return partition->isolation_type == HV_PARTITION_ISOLATION_TYPE_SNP;
+}
+
+struct mshv_partition *mshv_partition_get(struct mshv_partition *partition);
+void mshv_partition_put(struct mshv_partition *partition);
+struct mshv_partition *mshv_partition_find(u64 partition_id) __must_hold(RCU);
+
+/* hypercalls */
+
+int hv_call_withdraw_memory(u64 count, int node, u64 partition_id);
+int hv_call_create_partition(u64 flags,
+ struct hv_partition_creation_properties creation_properties,
+ union hv_partition_isolation_properties isolation_properties,
+ u64 *partition_id);
+int hv_call_initialize_partition(u64 partition_id);
+int hv_call_finalize_partition(u64 partition_id);
+int hv_call_delete_partition(u64 partition_id);
+int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs);
+int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags, struct page **pages);
+int hv_call_unmap_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags);
+int hv_call_delete_vp(u64 partition_id, u32 vp_index);
+int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
+ u64 dest_addr,
+ union hv_interrupt_control control);
+int hv_call_clear_virtual_interrupt(u64 partition_id);
+int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
+ union hv_gpa_page_access_state_flags state_flags,
+ int *written_total,
+ union hv_gpa_page_access_state *states);
+int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
+ struct hv_vp_state_data state_data,
+ /* Choose between pages and ret_output */
+ u64 page_count, struct page **pages,
+ union hv_output_get_vp_state *ret_output);
+int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
+ /* Choose between pages and bytes */
+ struct hv_vp_state_data state_data, u64 page_count,
+ struct page **pages, u32 num_bytes, u8 *bytes);
+int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page);
+int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl);
+int hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id, struct hv_port_info *port_info,
+ u8 port_vtl, u8 min_connection_vtl, int node);
+int hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id);
+int hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ union hv_connection_id connection_id,
+ struct hv_connection_info *connection_info,
+ u8 connection_vtl, int node);
+int hv_call_disconnect_port(u64 connection_partition_id,
+ union hv_connection_id connection_id);
+int hv_call_notify_port_ring_empty(u32 sint_index);
+int hv_call_map_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr);
+int hv_call_unmap_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity);
+int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
+ u64 page_struct_count, u32 host_access,
+ u32 flags, u8 acquire);
+
+extern struct mshv_root mshv_root;
+extern enum hv_scheduler_type hv_scheduler_type;
+extern u8 * __percpu *hv_synic_eventring_tail;
+
+#endif /* _MSHV_ROOT_H_ */
diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
new file mode 100644
index 000000000000..a222a16107f6
--- /dev/null
+++ b/drivers/hv/mshv_root_hv_call.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * Hypercall helper functions used by the mshv_root module.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_root.h"
+
+/* Determined empirically */
+#define HV_INIT_PARTITION_DEPOSIT_PAGES 208
+#define HV_MAP_GPA_DEPOSIT_PAGES 256
+#define HV_UMAP_GPA_PAGES 512
+
+#define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1)))
+
+#define HV_WITHDRAW_BATCH_SIZE (HV_HYP_PAGE_SIZE / sizeof(u64))
+#define HV_MAP_GPA_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \
+ / sizeof(u64))
+#define HV_GET_VP_STATE_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \
+ / sizeof(u64))
+#define HV_SET_VP_STATE_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \
+ / sizeof(u64))
+#define HV_GET_GPA_ACCESS_STATES_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \
+ / sizeof(union hv_gpa_page_access_state))
+#define HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT \
+ ((HV_HYP_PAGE_SIZE - \
+ sizeof(struct hv_input_modify_sparse_spa_page_host_access)) / \
+ sizeof(u64))
+
+int hv_call_withdraw_memory(u64 count, int node, u64 partition_id)
+{
+ struct hv_input_withdraw_memory *input_page;
+ struct hv_output_withdraw_memory *output_page;
+ struct page *page;
+ u16 completed;
+ unsigned long remaining = count;
+ u64 status;
+ int i;
+ unsigned long flags;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ output_page = page_address(page);
+
+ while (remaining) {
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input_page, 0, sizeof(*input_page));
+ input_page->partition_id = partition_id;
+ status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY,
+ min(remaining, HV_WITHDRAW_BATCH_SIZE),
+ 0, input_page, output_page);
+
+ local_irq_restore(flags);
+
+ completed = hv_repcomp(status);
+
+ for (i = 0; i < completed; i++)
+ __free_page(pfn_to_page(output_page->gpa_page_list[i]));
+
+ if (!hv_result_success(status)) {
+ if (hv_result(status) == HV_STATUS_NO_RESOURCES)
+ status = HV_STATUS_SUCCESS;
+ break;
+ }
+
+ remaining -= completed;
+ }
+ free_page((unsigned long)output_page);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_create_partition(u64 flags,
+ struct hv_partition_creation_properties creation_properties,
+ union hv_partition_isolation_properties isolation_properties,
+ u64 *partition_id)
+{
+ struct hv_input_create_partition *input;
+ struct hv_output_create_partition *output;
+ u64 status;
+ int ret;
+ unsigned long irq_flags;
+
+ do {
+ local_irq_save(irq_flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->flags = flags;
+ input->compatibility_version = HV_COMPATIBILITY_21_H2;
+
+ memcpy(&input->partition_creation_properties, &creation_properties,
+ sizeof(creation_properties));
+
+ memcpy(&input->isolation_properties, &isolation_properties,
+ sizeof(isolation_properties));
+
+ status = hv_do_hypercall(HVCALL_CREATE_PARTITION,
+ input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status))
+ *partition_id = output->partition_id;
+ local_irq_restore(irq_flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(irq_flags);
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ hv_current_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_initialize_partition(u64 partition_id)
+{
+ struct hv_input_initialize_partition input;
+ u64 status;
+ int ret;
+
+ input.partition_id = partition_id;
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
+ HV_INIT_PARTITION_DEPOSIT_PAGES);
+ if (ret)
+ return ret;
+
+ do {
+ status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION,
+ *(u64 *)&input);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_finalize_partition(u64 partition_id)
+{
+ struct hv_input_finalize_partition input;
+ u64 status;
+
+ input.partition_id = partition_id;
+ status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION,
+ *(u64 *)&input);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_delete_partition(u64 partition_id)
+{
+ struct hv_input_delete_partition input;
+ u64 status;
+
+ input.partition_id = partition_id;
+ status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input);
+
+ return hv_result_to_errno(status);
+}
+
+/* Ask the hypervisor to map guest ram pages or the guest mmio space */
+static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
+ u32 flags, struct page **pages, u64 mmio_spa)
+{
+ struct hv_input_map_gpa_pages *input_page;
+ u64 status, *pfnlist;
+ unsigned long irq_flags, large_shift = 0;
+ int ret = 0, done = 0;
+ u64 page_count = page_struct_count;
+
+ if (page_count == 0 || (pages && mmio_spa))
+ return -EINVAL;
+
+ if (flags & HV_MAP_GPA_LARGE_PAGE) {
+ if (mmio_spa)
+ return -EINVAL;
+
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong i, completed, remain = page_count - done;
+ int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->target_partition_id = partition_id;
+ input_page->target_gpa_base = gfn + (done << large_shift);
+ input_page->map_flags = flags;
+ pfnlist = input_page->source_gpa_page_list;
+
+ for (i = 0; i < rep_count; i++)
+ if (flags & HV_MAP_GPA_NO_ACCESS) {
+ pfnlist[i] = 0;
+ } else if (pages) {
+ u64 index = (done + i) << large_shift;
+
+ if (index >= page_struct_count) {
+ ret = -EINVAL;
+ break;
+ }
+ pfnlist[i] = page_to_pfn(pages[index]);
+ } else {
+ pfnlist[i] = mmio_spa + done + i;
+ }
+ if (ret)
+ break;
+
+ status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
+ input_page, NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+
+ if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
+ HV_MAP_GPA_DEPOSIT_PAGES);
+ if (ret)
+ break;
+
+ } else if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ done += completed;
+ }
+
+ if (ret && done) {
+ u32 unmap_flags = 0;
+
+ if (flags & HV_MAP_GPA_LARGE_PAGE)
+ unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
+ hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
+ }
+
+ return ret;
+}
+
+/* Ask the hypervisor to map guest ram pages */
+int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags, struct page **pages)
+{
+ return hv_do_map_gpa_hcall(partition_id, gpa_target, page_count,
+ flags, pages, 0);
+}
+
+/* Ask the hypervisor to map guest mmio space */
+int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs)
+{
+ int i;
+ u32 flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE |
+ HV_MAP_GPA_NOT_CACHED;
+
+ for (i = 0; i < numpgs; i++)
+ if (page_is_ram(mmio_spa + i))
+ return -EINVAL;
+
+ return hv_do_map_gpa_hcall(partition_id, gfn, numpgs, flags, NULL,
+ mmio_spa);
+}
+
+int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
+ u32 flags)
+{
+ struct hv_input_unmap_gpa_pages *input_page;
+ u64 status, page_count = page_count_4k;
+ unsigned long irq_flags, large_shift = 0;
+ int ret = 0, done = 0;
+
+ if (page_count == 0)
+ return -EINVAL;
+
+ if (flags & HV_UNMAP_GPA_LARGE_PAGE) {
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong completed, remain = page_count - done;
+ int rep_count = min(remain, HV_UMAP_GPA_PAGES);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->target_partition_id = partition_id;
+ input_page->target_gpa_base = gfn + (done << large_shift);
+ input_page->unmap_flags = flags;
+ status = hv_do_rep_hypercall(HVCALL_UNMAP_GPA_PAGES, rep_count,
+ 0, input_page, NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+ if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ done += completed;
+ }
+
+ return ret;
+}
+
+int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
+ union hv_gpa_page_access_state_flags state_flags,
+ int *written_total,
+ union hv_gpa_page_access_state *states)
+{
+ struct hv_input_get_gpa_pages_access_state *input_page;
+ union hv_gpa_page_access_state *output_page;
+ int completed = 0;
+ unsigned long remaining = count;
+ int rep_count, i;
+ u64 status = 0;
+ unsigned long flags;
+
+ *written_total = 0;
+ while (remaining) {
+ local_irq_save(flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->hv_gpa_page_number = gpa_base_pfn + *written_total;
+ input_page->flags = state_flags;
+ rep_count = min(remaining, HV_GET_GPA_ACCESS_STATES_BATCH_SIZE);
+
+ status = hv_do_rep_hypercall(HVCALL_GET_GPA_PAGES_ACCESS_STATES, rep_count,
+ 0, input_page, output_page);
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ break;
+ }
+ completed = hv_repcomp(status);
+ for (i = 0; i < completed; ++i)
+ states[i].as_uint8 = output_page[i].as_uint8;
+
+ local_irq_restore(flags);
+ states += completed;
+ *written_total += completed;
+ remaining -= completed;
+ }
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
+ u64 dest_addr,
+ union hv_interrupt_control control)
+{
+ struct hv_input_assert_virtual_interrupt *input;
+ unsigned long flags;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+ input->partition_id = partition_id;
+ input->vector = vector;
+ input->dest_addr = dest_addr;
+ input->control = control;
+ status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL);
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_delete_vp(u64 partition_id, u32 vp_index)
+{
+ union hv_input_delete_vp input = {};
+ u64 status;
+
+ input.partition_id = partition_id;
+ input.vp_index = vp_index;
+
+ status = hv_do_fast_hypercall16(HVCALL_DELETE_VP,
+ input.as_uint64[0], input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_delete_vp);
+
+int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
+ struct hv_vp_state_data state_data,
+ /* Choose between pages and ret_output */
+ u64 page_count, struct page **pages,
+ union hv_output_get_vp_state *ret_output)
+{
+ struct hv_input_get_vp_state *input;
+ union hv_output_get_vp_state *output;
+ u64 status;
+ int i;
+ u64 control;
+ unsigned long flags;
+ int ret = 0;
+
+ if (page_count > HV_GET_VP_STATE_BATCH_SIZE)
+ return -EINVAL;
+
+ if (!page_count && !ret_output)
+ return -EINVAL;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->state_data = state_data;
+ for (i = 0; i < page_count; i++)
+ input->output_data_pfns[i] = page_to_pfn(pages[i]);
+
+ control = (HVCALL_GET_VP_STATE) |
+ (page_count << HV_HYPERCALL_VARHEAD_OFFSET);
+
+ status = hv_do_hypercall(control, input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status) && ret_output)
+ memcpy(ret_output, output, sizeof(*output));
+
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
+ /* Choose between pages and bytes */
+ struct hv_vp_state_data state_data, u64 page_count,
+ struct page **pages, u32 num_bytes, u8 *bytes)
+{
+ struct hv_input_set_vp_state *input;
+ u64 status;
+ int i;
+ u64 control;
+ unsigned long flags;
+ int ret = 0;
+ u16 varhead_sz;
+
+ if (page_count > HV_SET_VP_STATE_BATCH_SIZE)
+ return -EINVAL;
+ if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE)
+ return -EINVAL;
+
+ if (num_bytes)
+ /* round up to 8 and divide by 8 */
+ varhead_sz = (num_bytes + 7) >> 3;
+ else if (page_count)
+ varhead_sz = page_count;
+ else
+ return -EINVAL;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->state_data = state_data;
+ if (num_bytes) {
+ memcpy((u8 *)input->data, bytes, num_bytes);
+ } else {
+ for (i = 0; i < page_count; i++)
+ input->data[i].pfns = page_to_pfn(pages[i]);
+ }
+
+ control = (HVCALL_SET_VP_STATE) |
+ (varhead_sz << HV_HYPERCALL_VARHEAD_OFFSET);
+
+ status = hv_do_hypercall(control, input, NULL);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page)
+{
+ struct hv_input_map_vp_state_page *input;
+ struct hv_output_map_vp_state_page *output;
+ u64 status;
+ int ret;
+ unsigned long flags;
+
+ do {
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->type = type;
+ input->input_vtl = input_vtl;
+
+ status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status))
+ *state_page = pfn_to_page(output->map_location);
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl)
+{
+ unsigned long flags;
+ u64 status;
+ struct hv_input_unmap_vp_state_page *input;
+
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input, 0, sizeof(*input));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->type = type;
+ input->input_vtl = input_vtl;
+
+ status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL);
+
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_clear_virtual_interrupt(u64 partition_id)
+{
+ int status;
+
+ status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT,
+ partition_id);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ struct hv_port_info *port_info,
+ u8 port_vtl, u8 min_connection_vtl, int node)
+{
+ struct hv_input_create_port *input;
+ unsigned long flags;
+ int ret = 0;
+ int status;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+
+ input->port_partition_id = port_partition_id;
+ input->port_id = port_id;
+ input->connection_partition_id = connection_partition_id;
+ input->port_info = *port_info;
+ input->port_vtl = port_vtl;
+ input->min_connection_vtl = min_connection_vtl;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL);
+ local_irq_restore(flags);
+ if (hv_result_success(status))
+ break;
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1);
+
+ } while (!ret);
+
+ return ret;
+}
+
+int
+hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id)
+{
+ union hv_input_delete_port input = { 0 };
+ int status;
+
+ input.port_partition_id = port_partition_id;
+ input.port_id = port_id;
+ status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT,
+ input.as_uint64[0],
+ input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ union hv_connection_id connection_id,
+ struct hv_connection_info *connection_info,
+ u8 connection_vtl, int node)
+{
+ struct hv_input_connect_port *input;
+ unsigned long flags;
+ int ret = 0, status;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+ input->port_partition_id = port_partition_id;
+ input->port_id = port_id;
+ input->connection_partition_id = connection_partition_id;
+ input->connection_id = connection_id;
+ input->connection_info = *connection_info;
+ input->connection_vtl = connection_vtl;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL);
+
+ local_irq_restore(flags);
+ if (hv_result_success(status))
+ break;
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ connection_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int
+hv_call_disconnect_port(u64 connection_partition_id,
+ union hv_connection_id connection_id)
+{
+ union hv_input_disconnect_port input = { 0 };
+ int status;
+
+ input.connection_partition_id = connection_partition_id;
+ input.connection_id = connection_id;
+ input.is_doorbell = 1;
+ status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT,
+ input.as_uint64[0],
+ input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_notify_port_ring_empty(u32 sint_index)
+{
+ union hv_input_notify_port_ring_empty input = { 0 };
+ int status;
+
+ input.sint_index = sint_index;
+ status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY,
+ input.as_uint64);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_map_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr)
+{
+ unsigned long flags;
+ struct hv_input_map_stats_page *input;
+ struct hv_output_map_stats_page *output;
+ u64 status, pfn;
+ int ret = 0;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->type = type;
+ input->identity = *identity;
+
+ status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output);
+ pfn = output->map_location;
+
+ local_irq_restore(flags);
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ if (hv_result_success(status))
+ break;
+ return ret;
+ }
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ hv_current_partition_id, 1);
+ if (ret)
+ return ret;
+ } while (!ret);
+
+ *addr = page_address(pfn_to_page(pfn));
+
+ return ret;
+}
+
+int hv_call_unmap_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity)
+{
+ unsigned long flags;
+ struct hv_input_unmap_stats_page *input;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->type = type;
+ input->identity = *identity;
+
+ status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL);
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
+ u64 page_struct_count, u32 host_access,
+ u32 flags, u8 acquire)
+{
+ struct hv_input_modify_sparse_spa_page_host_access *input_page;
+ u64 status;
+ int done = 0;
+ unsigned long irq_flags, large_shift = 0;
+ u64 page_count = page_struct_count;
+ u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS :
+ HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS;
+
+ if (page_count == 0)
+ return -EINVAL;
+
+ if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) {
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong i, completed, remain = page_count - done;
+ int rep_count = min(remain,
+ HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input_page, 0, sizeof(*input_page));
+ /* Only set the partition id if you are making the pages
+ * exclusive
+ */
+ if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE)
+ input_page->partition_id = partition_id;
+ input_page->flags = flags;
+ input_page->host_access = host_access;
+
+ for (i = 0; i < rep_count; i++) {
+ u64 index = (done + i) << large_shift;
+
+ if (index >= page_struct_count)
+ return -EINVAL;
+
+ input_page->spa_page_list[i] =
+ page_to_pfn(pages[index]);
+ }
+
+ status = hv_do_rep_hypercall(code, rep_count, 0, input_page,
+ NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+
+ if (!hv_result_success(status))
+ return hv_result_to_errno(status);
+
+ done += completed;
+ }
+
+ return 0;
+}
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
new file mode 100644
index 000000000000..72df774e410a
--- /dev/null
+++ b/drivers/hv/mshv_root_main.c
@@ -0,0 +1,2307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation.
+ *
+ * The main part of the mshv_root module, providing APIs to create
+ * and manage guest partitions.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/cpuhotplug.h>
+#include <linux/random.h>
+#include <asm/mshyperv.h>
+#include <linux/hyperv.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/page-flags.h>
+#include <linux/crash_dump.h>
+#include <linux/panic_notifier.h>
+#include <linux/vmalloc.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+MODULE_AUTHOR("Microsoft");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V root partition VMM interface /dev/mshv");
+
+/* TODO move this to mshyperv.h when needed outside driver */
+static inline bool hv_parent_partition(void)
+{
+ return hv_root_partition();
+}
+
+/* TODO move this to another file when debugfs code is added */
+enum hv_stats_vp_counters { /* HV_THREAD_COUNTER */
+#if defined(CONFIG_X86)
+ VpRootDispatchThreadBlocked = 201,
+#elif defined(CONFIG_ARM64)
+ VpRootDispatchThreadBlocked = 94,
+#endif
+ VpStatsMaxCounter
+};
+
+struct hv_stats_page {
+ union {
+ u64 vp_cntrs[VpStatsMaxCounter]; /* VP counters */
+ u8 data[HV_HYP_PAGE_SIZE];
+ };
+} __packed;
+
+struct mshv_root mshv_root;
+
+enum hv_scheduler_type hv_scheduler_type;
+
+/* Once we implement the fast extended hypercall ABI they can go away. */
+static void * __percpu *root_scheduler_input;
+static void * __percpu *root_scheduler_output;
+
+static long mshv_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_dev_open(struct inode *inode, struct file *filp);
+static int mshv_dev_release(struct inode *inode, struct file *filp);
+static int mshv_vp_release(struct inode *inode, struct file *filp);
+static long mshv_vp_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_partition_release(struct inode *inode, struct file *filp);
+static long mshv_partition_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_vp_mmap(struct file *file, struct vm_area_struct *vma);
+static vm_fault_t mshv_vp_fault(struct vm_fault *vmf);
+static int mshv_init_async_handler(struct mshv_partition *partition);
+static void mshv_async_hvcall_handler(void *data, u64 *status);
+
+static const union hv_input_vtl input_vtl_zero;
+static const union hv_input_vtl input_vtl_normal = {
+ .target_vtl = HV_NORMAL_VTL,
+ .use_target_vtl = 1,
+};
+
+static const struct vm_operations_struct mshv_vp_vm_ops = {
+ .fault = mshv_vp_fault,
+};
+
+static const struct file_operations mshv_vp_fops = {
+ .owner = THIS_MODULE,
+ .release = mshv_vp_release,
+ .unlocked_ioctl = mshv_vp_ioctl,
+ .llseek = noop_llseek,
+ .mmap = mshv_vp_mmap,
+};
+
+static const struct file_operations mshv_partition_fops = {
+ .owner = THIS_MODULE,
+ .release = mshv_partition_release,
+ .unlocked_ioctl = mshv_partition_ioctl,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations mshv_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = mshv_dev_open,
+ .release = mshv_dev_release,
+ .unlocked_ioctl = mshv_dev_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice mshv_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mshv",
+ .fops = &mshv_dev_fops,
+ .mode = 0600,
+};
+
+/*
+ * Only allow hypercalls that have a u64 partition id as the first member of
+ * the input structure.
+ * These are sorted by value.
+ */
+static u16 mshv_passthru_hvcalls[] = {
+ HVCALL_GET_PARTITION_PROPERTY,
+ HVCALL_SET_PARTITION_PROPERTY,
+ HVCALL_INSTALL_INTERCEPT,
+ HVCALL_GET_VP_REGISTERS,
+ HVCALL_SET_VP_REGISTERS,
+ HVCALL_TRANSLATE_VIRTUAL_ADDRESS,
+ HVCALL_CLEAR_VIRTUAL_INTERRUPT,
+ HVCALL_REGISTER_INTERCEPT_RESULT,
+ HVCALL_ASSERT_VIRTUAL_INTERRUPT,
+ HVCALL_GET_GPA_PAGES_ACCESS_STATES,
+ HVCALL_SIGNAL_EVENT_DIRECT,
+ HVCALL_POST_MESSAGE_DIRECT,
+ HVCALL_GET_VP_CPUID_VALUES,
+};
+
+static bool mshv_hvcall_is_async(u16 code)
+{
+ switch (code) {
+ case HVCALL_SET_PARTITION_PROPERTY:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static int mshv_ioctl_passthru_hvcall(struct mshv_partition *partition,
+ bool partition_locked,
+ void __user *user_args)
+{
+ u64 status;
+ int ret = 0, i;
+ bool is_async;
+ struct mshv_root_hvcall args;
+ struct page *page;
+ unsigned int pages_order;
+ void *input_pg = NULL;
+ void *output_pg = NULL;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.status || !args.in_ptr || args.in_sz < sizeof(u64) ||
+ mshv_field_nonzero(args, rsvd) || args.in_sz > HV_HYP_PAGE_SIZE)
+ return -EINVAL;
+
+ if (args.out_ptr && (!args.out_sz || args.out_sz > HV_HYP_PAGE_SIZE))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mshv_passthru_hvcalls); ++i)
+ if (args.code == mshv_passthru_hvcalls[i])
+ break;
+
+ if (i >= ARRAY_SIZE(mshv_passthru_hvcalls))
+ return -EINVAL;
+
+ is_async = mshv_hvcall_is_async(args.code);
+ if (is_async) {
+ /* async hypercalls can only be called from partition fd */
+ if (!partition_locked)
+ return -EINVAL;
+ ret = mshv_init_async_handler(partition);
+ if (ret)
+ return ret;
+ }
+
+ pages_order = args.out_ptr ? 1 : 0;
+ page = alloc_pages(GFP_KERNEL, pages_order);
+ if (!page)
+ return -ENOMEM;
+ input_pg = page_address(page);
+
+ if (args.out_ptr)
+ output_pg = (char *)input_pg + PAGE_SIZE;
+ else
+ output_pg = NULL;
+
+ if (copy_from_user(input_pg, (void __user *)args.in_ptr,
+ args.in_sz)) {
+ ret = -EFAULT;
+ goto free_pages_out;
+ }
+
+ /*
+ * NOTE: This only works because all the allowed hypercalls' input
+ * structs begin with a u64 partition_id field.
+ */
+ *(u64 *)input_pg = partition->pt_id;
+
+ if (args.reps)
+ status = hv_do_rep_hypercall(args.code, args.reps, 0,
+ input_pg, output_pg);
+ else
+ status = hv_do_hypercall(args.code, input_pg, output_pg);
+
+ if (hv_result(status) == HV_STATUS_CALL_PENDING) {
+ if (is_async) {
+ mshv_async_hvcall_handler(partition, &status);
+ } else { /* Paranoia check. This shouldn't happen! */
+ ret = -EBADFD;
+ goto free_pages_out;
+ }
+ }
+
+ if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition->pt_id, 1);
+ if (!ret)
+ ret = -EAGAIN;
+ } else if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ }
+
+ /*
+ * Always return the status and output data regardless of result.
+ * The VMM may need it to determine how to proceed. E.g. the status may
+ * contain the number of reps completed if a rep hypercall partially
+ * succeeded.
+ */
+ args.status = hv_result(status);
+ args.reps = args.reps ? hv_repcomp(status) : 0;
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ ret = -EFAULT;
+
+ if (output_pg &&
+ copy_to_user((void __user *)args.out_ptr, output_pg, args.out_sz))
+ ret = -EFAULT;
+
+free_pages_out:
+ free_pages((unsigned long)input_pg, pages_order);
+
+ return ret;
+}
+
+static inline bool is_ghcb_mapping_available(void)
+{
+#if IS_ENABLED(CONFIG_X86_64)
+ return ms_hyperv.ext_features & HV_VP_GHCB_ROOT_MAPPING_AVAILABLE;
+#else
+ return 0;
+#endif
+}
+
+static int mshv_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ struct hv_register_assoc *registers)
+{
+ return hv_call_get_vp_registers(vp_index, partition_id,
+ count, input_vtl_zero, registers);
+}
+
+static int mshv_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ struct hv_register_assoc *registers)
+{
+ return hv_call_set_vp_registers(vp_index, partition_id,
+ count, input_vtl_zero, registers);
+}
+
+/*
+ * Explicit guest vCPU suspend is asynchronous by nature (as it is requested by
+ * dom0 vCPU for guest vCPU) and thus it can race with "intercept" suspend,
+ * done by the hypervisor.
+ * "Intercept" suspend leads to asynchronous message delivery to dom0 which
+ * should be awaited to keep the VP loop consistent (i.e. no message pending
+ * upon VP resume).
+ * VP intercept suspend can't be done when the VP is explicitly suspended
+ * already, and thus can be only two possible race scenarios:
+ * 1. implicit suspend bit set -> explicit suspend bit set -> message sent
+ * 2. implicit suspend bit set -> message sent -> explicit suspend bit set
+ * Checking for implicit suspend bit set after explicit suspend request has
+ * succeeded in either case allows us to reliably identify, if there is a
+ * message to receive and deliver to VMM.
+ */
+static int
+mshv_suspend_vp(const struct mshv_vp *vp, bool *message_in_flight)
+{
+ struct hv_register_assoc explicit_suspend = {
+ .name = HV_REGISTER_EXPLICIT_SUSPEND
+ };
+ struct hv_register_assoc intercept_suspend = {
+ .name = HV_REGISTER_INTERCEPT_SUSPEND
+ };
+ union hv_explicit_suspend_register *es =
+ &explicit_suspend.value.explicit_suspend;
+ union hv_intercept_suspend_register *is =
+ &intercept_suspend.value.intercept_suspend;
+ int ret;
+
+ es->suspended = 1;
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &explicit_suspend);
+ if (ret) {
+ vp_err(vp, "Failed to explicitly suspend vCPU\n");
+ return ret;
+ }
+
+ ret = mshv_get_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &intercept_suspend);
+ if (ret) {
+ vp_err(vp, "Failed to get intercept suspend state\n");
+ return ret;
+ }
+
+ *message_in_flight = is->suspended;
+
+ return 0;
+}
+
+/*
+ * This function is used when VPs are scheduled by the hypervisor's
+ * scheduler.
+ *
+ * Caller has to make sure the registers contain cleared
+ * HV_REGISTER_INTERCEPT_SUSPEND and HV_REGISTER_EXPLICIT_SUSPEND registers
+ * exactly in this order (the hypervisor clears them sequentially) to avoid
+ * potential invalid clearing a newly arrived HV_REGISTER_INTERCEPT_SUSPEND
+ * after VP is released from HV_REGISTER_EXPLICIT_SUSPEND in case of the
+ * opposite order.
+ */
+static long mshv_run_vp_with_hyp_scheduler(struct mshv_vp *vp)
+{
+ long ret;
+ struct hv_register_assoc suspend_regs[2] = {
+ { .name = HV_REGISTER_INTERCEPT_SUSPEND },
+ { .name = HV_REGISTER_EXPLICIT_SUSPEND }
+ };
+ size_t count = ARRAY_SIZE(suspend_regs);
+
+ /* Resume VP execution */
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ count, suspend_regs);
+ if (ret) {
+ vp_err(vp, "Failed to resume vp execution. %lx\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible(vp->run.vp_suspend_queue,
+ vp->run.kicked_by_hv == 1);
+ if (ret) {
+ bool message_in_flight;
+
+ /*
+ * Otherwise the waiting was interrupted by a signal: suspend
+ * the vCPU explicitly and copy message in flight (if any).
+ */
+ ret = mshv_suspend_vp(vp, &message_in_flight);
+ if (ret)
+ return ret;
+
+ /* Return if no message in flight */
+ if (!message_in_flight)
+ return -EINTR;
+
+ /* Wait for the message in flight. */
+ wait_event(vp->run.vp_suspend_queue, vp->run.kicked_by_hv == 1);
+ }
+
+ /*
+ * Reset the flag to make the wait_event call above work
+ * next time.
+ */
+ vp->run.kicked_by_hv = 0;
+
+ return 0;
+}
+
+static int
+mshv_vp_dispatch(struct mshv_vp *vp, u32 flags,
+ struct hv_output_dispatch_vp *res)
+{
+ struct hv_input_dispatch_vp *input;
+ struct hv_output_dispatch_vp *output;
+ u64 status;
+
+ preempt_disable();
+ input = *this_cpu_ptr(root_scheduler_input);
+ output = *this_cpu_ptr(root_scheduler_output);
+
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+
+ input->partition_id = vp->vp_partition->pt_id;
+ input->vp_index = vp->vp_index;
+ input->time_slice = 0; /* Run forever until something happens */
+ input->spec_ctrl = 0; /* TODO: set sensible flags */
+ input->flags = flags;
+
+ vp->run.flags.root_sched_dispatched = 1;
+ status = hv_do_hypercall(HVCALL_DISPATCH_VP, input, output);
+ vp->run.flags.root_sched_dispatched = 0;
+
+ *res = *output;
+ preempt_enable();
+
+ if (!hv_result_success(status))
+ vp_err(vp, "%s: status %s\n", __func__,
+ hv_result_to_string(status));
+
+ return hv_result_to_errno(status);
+}
+
+static int
+mshv_vp_clear_explicit_suspend(struct mshv_vp *vp)
+{
+ struct hv_register_assoc explicit_suspend = {
+ .name = HV_REGISTER_EXPLICIT_SUSPEND,
+ .value.explicit_suspend.suspended = 0,
+ };
+ int ret;
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &explicit_suspend);
+
+ if (ret)
+ vp_err(vp, "Failed to unsuspend\n");
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_X86_64)
+static u64 mshv_vp_interrupt_pending(struct mshv_vp *vp)
+{
+ if (!vp->vp_register_page)
+ return 0;
+ return vp->vp_register_page->interrupt_vectors.as_uint64;
+}
+#else
+static u64 mshv_vp_interrupt_pending(struct mshv_vp *vp)
+{
+ return 0;
+}
+#endif
+
+static bool mshv_vp_dispatch_thread_blocked(struct mshv_vp *vp)
+{
+ struct hv_stats_page **stats = vp->vp_stats_pages;
+ u64 *self_vp_cntrs = stats[HV_STATS_AREA_SELF]->vp_cntrs;
+ u64 *parent_vp_cntrs = stats[HV_STATS_AREA_PARENT]->vp_cntrs;
+
+ if (self_vp_cntrs[VpRootDispatchThreadBlocked])
+ return self_vp_cntrs[VpRootDispatchThreadBlocked];
+ return parent_vp_cntrs[VpRootDispatchThreadBlocked];
+}
+
+static int
+mshv_vp_wait_for_hv_kick(struct mshv_vp *vp)
+{
+ int ret;
+
+ ret = wait_event_interruptible(vp->run.vp_suspend_queue,
+ (vp->run.kicked_by_hv == 1 &&
+ !mshv_vp_dispatch_thread_blocked(vp)) ||
+ mshv_vp_interrupt_pending(vp));
+ if (ret)
+ return -EINTR;
+
+ vp->run.flags.root_sched_blocked = 0;
+ vp->run.kicked_by_hv = 0;
+
+ return 0;
+}
+
+static int mshv_pre_guest_mode_work(struct mshv_vp *vp)
+{
+ const ulong work_flags = _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING |
+ _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME;
+ ulong th_flags;
+
+ th_flags = read_thread_flags();
+ while (th_flags & work_flags) {
+ int ret;
+
+ /* nb: following will call schedule */
+ ret = mshv_do_pre_guest_mode_work(th_flags);
+
+ if (ret)
+ return ret;
+
+ th_flags = read_thread_flags();
+ }
+
+ return 0;
+}
+
+/* Must be called with interrupts enabled */
+static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
+{
+ long ret;
+
+ if (vp->run.flags.root_sched_blocked) {
+ /*
+ * Dispatch state of this VP is blocked. Need to wait
+ * for the hypervisor to clear the blocked state before
+ * dispatching it.
+ */
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ return ret;
+ }
+
+ do {
+ u32 flags = 0;
+ struct hv_output_dispatch_vp output;
+
+ ret = mshv_pre_guest_mode_work(vp);
+ if (ret)
+ break;
+
+ if (vp->run.flags.intercept_suspend)
+ flags |= HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND;
+
+ if (mshv_vp_interrupt_pending(vp))
+ flags |= HV_DISPATCH_VP_FLAG_SCAN_INTERRUPT_INJECTION;
+
+ ret = mshv_vp_dispatch(vp, flags, &output);
+ if (ret)
+ break;
+
+ vp->run.flags.intercept_suspend = 0;
+
+ if (output.dispatch_state == HV_VP_DISPATCH_STATE_BLOCKED) {
+ if (output.dispatch_event ==
+ HV_VP_DISPATCH_EVENT_SUSPEND) {
+ /*
+ * TODO: remove the warning once VP canceling
+ * is supported
+ */
+ WARN_ONCE(atomic64_read(&vp->run.vp_signaled_count),
+ "%s: vp#%d: unexpected explicit suspend\n",
+ __func__, vp->vp_index);
+ /*
+ * Need to clear explicit suspend before
+ * dispatching.
+ * Explicit suspend is either:
+ * - set right after the first VP dispatch or
+ * - set explicitly via hypercall
+ * Since the latter case is not yet supported,
+ * simply clear it here.
+ */
+ ret = mshv_vp_clear_explicit_suspend(vp);
+ if (ret)
+ break;
+
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ break;
+ } else {
+ vp->run.flags.root_sched_blocked = 1;
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ break;
+ }
+ } else {
+ /* HV_VP_DISPATCH_STATE_READY */
+ if (output.dispatch_event ==
+ HV_VP_DISPATCH_EVENT_INTERCEPT)
+ vp->run.flags.intercept_suspend = 1;
+ }
+ } while (!vp->run.flags.intercept_suspend);
+
+ return ret;
+}
+
+static_assert(sizeof(struct hv_message) <= MSHV_RUN_VP_BUF_SZ,
+ "sizeof(struct hv_message) must not exceed MSHV_RUN_VP_BUF_SZ");
+
+static long mshv_vp_ioctl_run_vp(struct mshv_vp *vp, void __user *ret_msg)
+{
+ long rc;
+
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ rc = mshv_run_vp_with_root_scheduler(vp);
+ else
+ rc = mshv_run_vp_with_hyp_scheduler(vp);
+
+ if (rc)
+ return rc;
+
+ if (copy_to_user(ret_msg, vp->vp_intercept_msg_page,
+ sizeof(struct hv_message)))
+ rc = -EFAULT;
+
+ return rc;
+}
+
+static int
+mshv_vp_ioctl_get_set_state_pfn(struct mshv_vp *vp,
+ struct hv_vp_state_data state_data,
+ unsigned long user_pfn, size_t page_count,
+ bool is_set)
+{
+ int completed, ret = 0;
+ unsigned long check;
+ struct page **pages;
+
+ if (page_count > INT_MAX)
+ return -EINVAL;
+ /*
+ * Check the arithmetic for wraparound/overflow.
+ * The last page address in the buffer is:
+ * (user_pfn + (page_count - 1)) * PAGE_SIZE
+ */
+ if (check_add_overflow(user_pfn, (page_count - 1), &check))
+ return -EOVERFLOW;
+ if (check_mul_overflow(check, PAGE_SIZE, &check))
+ return -EOVERFLOW;
+
+ /* Pin user pages so hypervisor can copy directly to them */
+ pages = kcalloc(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (completed = 0; completed < page_count; completed += ret) {
+ unsigned long user_addr = (user_pfn + completed) * PAGE_SIZE;
+ int remaining = page_count - completed;
+
+ ret = pin_user_pages_fast(user_addr, remaining, FOLL_WRITE,
+ &pages[completed]);
+ if (ret < 0) {
+ vp_err(vp, "%s: Failed to pin user pages error %i\n",
+ __func__, ret);
+ goto unpin_pages;
+ }
+ }
+
+ if (is_set)
+ ret = hv_call_set_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, page_count, pages,
+ 0, NULL);
+ else
+ ret = hv_call_get_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, page_count, pages,
+ NULL);
+
+unpin_pages:
+ unpin_user_pages(pages, completed);
+ kfree(pages);
+ return ret;
+}
+
+static long
+mshv_vp_ioctl_get_set_state(struct mshv_vp *vp,
+ struct mshv_get_set_vp_state __user *user_args,
+ bool is_set)
+{
+ struct mshv_get_set_vp_state args;
+ long ret = 0;
+ union hv_output_get_vp_state vp_state;
+ u32 data_sz;
+ struct hv_vp_state_data state_data = {};
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.type >= MSHV_VP_STATE_COUNT || mshv_field_nonzero(args, rsvd) ||
+ !args.buf_sz || !PAGE_ALIGNED(args.buf_sz) ||
+ !PAGE_ALIGNED(args.buf_ptr))
+ return -EINVAL;
+
+ if (!access_ok((void __user *)args.buf_ptr, args.buf_sz))
+ return -EFAULT;
+
+ switch (args.type) {
+ case MSHV_VP_STATE_LAPIC:
+ state_data.type = HV_GET_SET_VP_STATE_LAPIC_STATE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_XSAVE:
+ {
+ u64 data_sz_64;
+
+ ret = hv_call_get_partition_property(vp->vp_partition->pt_id,
+ HV_PARTITION_PROPERTY_XSAVE_STATES,
+ &state_data.xsave.states.as_uint64);
+ if (ret)
+ return ret;
+
+ ret = hv_call_get_partition_property(vp->vp_partition->pt_id,
+ HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE,
+ &data_sz_64);
+ if (ret)
+ return ret;
+
+ data_sz = (u32)data_sz_64;
+ state_data.xsave.flags = 0;
+ /* Always request legacy states */
+ state_data.xsave.states.legacy_x87 = 1;
+ state_data.xsave.states.legacy_sse = 1;
+ state_data.type = HV_GET_SET_VP_STATE_XSAVE;
+ break;
+ }
+ case MSHV_VP_STATE_SIMP:
+ state_data.type = HV_GET_SET_VP_STATE_SIM_PAGE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_SIEFP:
+ state_data.type = HV_GET_SET_VP_STATE_SIEF_PAGE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_SYNTHETIC_TIMERS:
+ state_data.type = HV_GET_SET_VP_STATE_SYNTHETIC_TIMERS;
+ data_sz = sizeof(vp_state.synthetic_timers_state);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (copy_to_user(&user_args->buf_sz, &data_sz, sizeof(user_args->buf_sz)))
+ return -EFAULT;
+
+ if (data_sz > args.buf_sz)
+ return -EINVAL;
+
+ /* If the data is transmitted via pfns, delegate to helper */
+ if (state_data.type & HV_GET_SET_VP_STATE_TYPE_PFN) {
+ unsigned long user_pfn = PFN_DOWN(args.buf_ptr);
+ size_t page_count = PFN_DOWN(args.buf_sz);
+
+ return mshv_vp_ioctl_get_set_state_pfn(vp, state_data, user_pfn,
+ page_count, is_set);
+ }
+
+ /* Paranoia check - this shouldn't happen! */
+ if (data_sz > sizeof(vp_state)) {
+ vp_err(vp, "Invalid vp state data size!\n");
+ return -EINVAL;
+ }
+
+ if (is_set) {
+ if (copy_from_user(&vp_state, (__user void *)args.buf_ptr, data_sz))
+ return -EFAULT;
+
+ return hv_call_set_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, 0, NULL,
+ sizeof(vp_state), (u8 *)&vp_state);
+ }
+
+ ret = hv_call_get_vp_state(vp->vp_index, vp->vp_partition->pt_id,
+ state_data, 0, NULL, &vp_state);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)args.buf_ptr, &vp_state, data_sz))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+mshv_vp_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct mshv_vp *vp = filp->private_data;
+ long r = -ENOTTY;
+
+ if (mutex_lock_killable(&vp->vp_mutex))
+ return -EINTR;
+
+ switch (ioctl) {
+ case MSHV_RUN_VP:
+ r = mshv_vp_ioctl_run_vp(vp, (void __user *)arg);
+ break;
+ case MSHV_GET_VP_STATE:
+ r = mshv_vp_ioctl_get_set_state(vp, (void __user *)arg, false);
+ break;
+ case MSHV_SET_VP_STATE:
+ r = mshv_vp_ioctl_get_set_state(vp, (void __user *)arg, true);
+ break;
+ case MSHV_ROOT_HVCALL:
+ r = mshv_ioctl_passthru_hvcall(vp->vp_partition, false,
+ (void __user *)arg);
+ break;
+ default:
+ vp_warn(vp, "Invalid ioctl: %#x\n", ioctl);
+ break;
+ }
+ mutex_unlock(&vp->vp_mutex);
+
+ return r;
+}
+
+static vm_fault_t mshv_vp_fault(struct vm_fault *vmf)
+{
+ struct mshv_vp *vp = vmf->vma->vm_file->private_data;
+
+ switch (vmf->vma->vm_pgoff) {
+ case MSHV_VP_MMAP_OFFSET_REGISTERS:
+ vmf->page = virt_to_page(vp->vp_register_page);
+ break;
+ case MSHV_VP_MMAP_OFFSET_INTERCEPT_MESSAGE:
+ vmf->page = virt_to_page(vp->vp_intercept_msg_page);
+ break;
+ case MSHV_VP_MMAP_OFFSET_GHCB:
+ vmf->page = virt_to_page(vp->vp_ghcb_page);
+ break;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static int mshv_vp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct mshv_vp *vp = file->private_data;
+
+ switch (vma->vm_pgoff) {
+ case MSHV_VP_MMAP_OFFSET_REGISTERS:
+ if (!vp->vp_register_page)
+ return -ENODEV;
+ break;
+ case MSHV_VP_MMAP_OFFSET_INTERCEPT_MESSAGE:
+ if (!vp->vp_intercept_msg_page)
+ return -ENODEV;
+ break;
+ case MSHV_VP_MMAP_OFFSET_GHCB:
+ if (!vp->vp_ghcb_page)
+ return -ENODEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vma->vm_ops = &mshv_vp_vm_ops;
+ return 0;
+}
+
+static int
+mshv_vp_release(struct inode *inode, struct file *filp)
+{
+ struct mshv_vp *vp = filp->private_data;
+
+ /* Rest of VP cleanup happens in destroy_partition() */
+ mshv_partition_put(vp->vp_partition);
+ return 0;
+}
+
+static void mshv_vp_stats_unmap(u64 partition_id, u32 vp_index)
+{
+ union hv_stats_object_identity identity = {
+ .vp.partition_id = partition_id,
+ .vp.vp_index = vp_index,
+ };
+
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+
+ identity.vp.stats_area_type = HV_STATS_AREA_PARENT;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+}
+
+static int mshv_vp_stats_map(u64 partition_id, u32 vp_index,
+ void *stats_pages[])
+{
+ union hv_stats_object_identity identity = {
+ .vp.partition_id = partition_id,
+ .vp.vp_index = vp_index,
+ };
+ int err;
+
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ err = hv_call_map_stat_page(HV_STATS_OBJECT_VP, &identity,
+ &stats_pages[HV_STATS_AREA_SELF]);
+ if (err)
+ return err;
+
+ identity.vp.stats_area_type = HV_STATS_AREA_PARENT;
+ err = hv_call_map_stat_page(HV_STATS_OBJECT_VP, &identity,
+ &stats_pages[HV_STATS_AREA_PARENT]);
+ if (err)
+ goto unmap_self;
+
+ return 0;
+
+unmap_self:
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+ return err;
+}
+
+static long
+mshv_partition_ioctl_create_vp(struct mshv_partition *partition,
+ void __user *arg)
+{
+ struct mshv_create_vp args;
+ struct mshv_vp *vp;
+ struct page *intercept_message_page, *register_page, *ghcb_page;
+ void *stats_pages[2];
+ long ret;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ if (args.vp_index >= MSHV_MAX_VPS)
+ return -EINVAL;
+
+ if (partition->pt_vp_array[args.vp_index])
+ return -EEXIST;
+
+ ret = hv_call_create_vp(NUMA_NO_NODE, partition->pt_id, args.vp_index,
+ 0 /* Only valid for root partition VPs */);
+ if (ret)
+ return ret;
+
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero,
+ &intercept_message_page);
+ if (ret)
+ goto destroy_vp;
+
+ if (!mshv_partition_encrypted(partition)) {
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero,
+ &register_page);
+ if (ret)
+ goto unmap_intercept_message_page;
+ }
+
+ if (mshv_partition_encrypted(partition) &&
+ is_ghcb_mapping_available()) {
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal,
+ &ghcb_page);
+ if (ret)
+ goto unmap_register_page;
+ }
+
+ if (hv_parent_partition()) {
+ ret = mshv_vp_stats_map(partition->pt_id, args.vp_index,
+ stats_pages);
+ if (ret)
+ goto unmap_ghcb_page;
+ }
+
+ vp = kzalloc(sizeof(*vp), GFP_KERNEL);
+ if (!vp)
+ goto unmap_stats_pages;
+
+ vp->vp_partition = mshv_partition_get(partition);
+ if (!vp->vp_partition) {
+ ret = -EBADF;
+ goto free_vp;
+ }
+
+ mutex_init(&vp->vp_mutex);
+ init_waitqueue_head(&vp->run.vp_suspend_queue);
+ atomic64_set(&vp->run.vp_signaled_count, 0);
+
+ vp->vp_index = args.vp_index;
+ vp->vp_intercept_msg_page = page_to_virt(intercept_message_page);
+ if (!mshv_partition_encrypted(partition))
+ vp->vp_register_page = page_to_virt(register_page);
+
+ if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available())
+ vp->vp_ghcb_page = page_to_virt(ghcb_page);
+
+ if (hv_parent_partition())
+ memcpy(vp->vp_stats_pages, stats_pages, sizeof(stats_pages));
+
+ /*
+ * Keep anon_inode_getfd last: it installs fd in the file struct and
+ * thus makes the state accessible in user space.
+ */
+ ret = anon_inode_getfd("mshv_vp", &mshv_vp_fops, vp,
+ O_RDWR | O_CLOEXEC);
+ if (ret < 0)
+ goto put_partition;
+
+ /* already exclusive with the partition mutex for all ioctls */
+ partition->pt_vp_count++;
+ partition->pt_vp_array[args.vp_index] = vp;
+
+ return ret;
+
+put_partition:
+ mshv_partition_put(partition);
+free_vp:
+ kfree(vp);
+unmap_stats_pages:
+ if (hv_parent_partition())
+ mshv_vp_stats_unmap(partition->pt_id, args.vp_index);
+unmap_ghcb_page:
+ if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available()) {
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal);
+ }
+unmap_register_page:
+ if (!mshv_partition_encrypted(partition)) {
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero);
+ }
+unmap_intercept_message_page:
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero);
+destroy_vp:
+ hv_call_delete_vp(partition->pt_id, args.vp_index);
+ return ret;
+}
+
+static int mshv_init_async_handler(struct mshv_partition *partition)
+{
+ if (completion_done(&partition->async_hypercall)) {
+ pt_err(partition,
+ "Cannot issue async hypercall while another one in progress!\n");
+ return -EPERM;
+ }
+
+ reinit_completion(&partition->async_hypercall);
+ return 0;
+}
+
+static void mshv_async_hvcall_handler(void *data, u64 *status)
+{
+ struct mshv_partition *partition = data;
+
+ wait_for_completion(&partition->async_hypercall);
+ pt_dbg(partition, "Async hypercall completed!\n");
+
+ *status = partition->async_hypercall_status;
+}
+
+static int
+mshv_partition_region_share(struct mshv_mem_region *region)
+{
+ u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_SHARED;
+
+ if (region->flags.large_pages)
+ flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
+
+ return hv_call_modify_spa_host_access(region->partition->pt_id,
+ region->pages, region->nr_pages,
+ HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE,
+ flags, true);
+}
+
+static int
+mshv_partition_region_unshare(struct mshv_mem_region *region)
+{
+ u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE;
+
+ if (region->flags.large_pages)
+ flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
+
+ return hv_call_modify_spa_host_access(region->partition->pt_id,
+ region->pages, region->nr_pages,
+ 0,
+ flags, false);
+}
+
+static int
+mshv_region_remap_pages(struct mshv_mem_region *region, u32 map_flags,
+ u64 page_offset, u64 page_count)
+{
+ if (page_offset + page_count > region->nr_pages)
+ return -EINVAL;
+
+ if (region->flags.large_pages)
+ map_flags |= HV_MAP_GPA_LARGE_PAGE;
+
+ /* ask the hypervisor to map guest ram */
+ return hv_call_map_gpa_pages(region->partition->pt_id,
+ region->start_gfn + page_offset,
+ page_count, map_flags,
+ region->pages + page_offset);
+}
+
+static int
+mshv_region_map(struct mshv_mem_region *region)
+{
+ u32 map_flags = region->hv_map_flags;
+
+ return mshv_region_remap_pages(region, map_flags,
+ 0, region->nr_pages);
+}
+
+static void
+mshv_region_evict_pages(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
+{
+ if (region->flags.range_pinned)
+ unpin_user_pages(region->pages + page_offset, page_count);
+
+ memset(region->pages + page_offset, 0,
+ page_count * sizeof(struct page *));
+}
+
+static void
+mshv_region_evict(struct mshv_mem_region *region)
+{
+ mshv_region_evict_pages(region, 0, region->nr_pages);
+}
+
+static int
+mshv_region_populate_pages(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
+{
+ u64 done_count, nr_pages;
+ struct page **pages;
+ __u64 userspace_addr;
+ int ret;
+
+ if (page_offset + page_count > region->nr_pages)
+ return -EINVAL;
+
+ for (done_count = 0; done_count < page_count; done_count += ret) {
+ pages = region->pages + page_offset + done_count;
+ userspace_addr = region->start_uaddr +
+ (page_offset + done_count) *
+ HV_HYP_PAGE_SIZE;
+ nr_pages = min(page_count - done_count,
+ MSHV_PIN_PAGES_BATCH_SIZE);
+
+ /*
+ * Pinning assuming 4k pages works for large pages too.
+ * All page structs within the large page are returned.
+ *
+ * Pin requests are batched because pin_user_pages_fast
+ * with the FOLL_LONGTERM flag does a large temporary
+ * allocation of contiguous memory.
+ */
+ if (region->flags.range_pinned)
+ ret = pin_user_pages_fast(userspace_addr,
+ nr_pages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ pages);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (ret < 0)
+ goto release_pages;
+ }
+
+ if (PageHuge(region->pages[page_offset]))
+ region->flags.large_pages = true;
+
+ return 0;
+
+release_pages:
+ mshv_region_evict_pages(region, page_offset, done_count);
+ return ret;
+}
+
+static int
+mshv_region_populate(struct mshv_mem_region *region)
+{
+ return mshv_region_populate_pages(region, 0, region->nr_pages);
+}
+
+static struct mshv_mem_region *
+mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn)
+{
+ struct mshv_mem_region *region;
+
+ hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
+ if (gfn >= region->start_gfn &&
+ gfn < region->start_gfn + region->nr_pages)
+ return region;
+ }
+
+ return NULL;
+}
+
+static struct mshv_mem_region *
+mshv_partition_region_by_uaddr(struct mshv_partition *partition, u64 uaddr)
+{
+ struct mshv_mem_region *region;
+
+ hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
+ if (uaddr >= region->start_uaddr &&
+ uaddr < region->start_uaddr +
+ (region->nr_pages << HV_HYP_PAGE_SHIFT))
+ return region;
+ }
+
+ return NULL;
+}
+
+/*
+ * NB: caller checks and makes sure mem->size is page aligned
+ * Returns: 0 with regionpp updated on success, or -errno
+ */
+static int mshv_partition_create_region(struct mshv_partition *partition,
+ struct mshv_user_mem_region *mem,
+ struct mshv_mem_region **regionpp,
+ bool is_mmio)
+{
+ struct mshv_mem_region *region;
+ u64 nr_pages = HVPFN_DOWN(mem->size);
+
+ /* Reject overlapping regions */
+ if (mshv_partition_region_by_gfn(partition, mem->guest_pfn) ||
+ mshv_partition_region_by_gfn(partition, mem->guest_pfn + nr_pages - 1) ||
+ mshv_partition_region_by_uaddr(partition, mem->userspace_addr) ||
+ mshv_partition_region_by_uaddr(partition, mem->userspace_addr + mem->size - 1))
+ return -EEXIST;
+
+ region = vzalloc(sizeof(*region) + sizeof(struct page *) * nr_pages);
+ if (!region)
+ return -ENOMEM;
+
+ region->nr_pages = nr_pages;
+ region->start_gfn = mem->guest_pfn;
+ region->start_uaddr = mem->userspace_addr;
+ region->hv_map_flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_ADJUSTABLE;
+ if (mem->flags & BIT(MSHV_SET_MEM_BIT_WRITABLE))
+ region->hv_map_flags |= HV_MAP_GPA_WRITABLE;
+ if (mem->flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE))
+ region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE;
+
+ /* Note: large_pages flag populated when we pin the pages */
+ if (!is_mmio)
+ region->flags.range_pinned = true;
+
+ region->partition = partition;
+
+ *regionpp = region;
+
+ return 0;
+}
+
+/*
+ * Map guest ram. if snp, make sure to release that from the host first
+ * Side Effects: In case of failure, pages are unpinned when feasible.
+ */
+static int
+mshv_partition_mem_region_map(struct mshv_mem_region *region)
+{
+ struct mshv_partition *partition = region->partition;
+ int ret;
+
+ ret = mshv_region_populate(region);
+ if (ret) {
+ pt_err(partition, "Failed to populate memory region: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /*
+ * For an SNP partition it is a requirement that for every memory region
+ * that we are going to map for this partition we should make sure that
+ * host access to that region is released. This is ensured by doing an
+ * additional hypercall which will update the SLAT to release host
+ * access to guest memory regions.
+ */
+ if (mshv_partition_encrypted(partition)) {
+ ret = mshv_partition_region_unshare(region);
+ if (ret) {
+ pt_err(partition,
+ "Failed to unshare memory region (guest_pfn: %llu): %d\n",
+ region->start_gfn, ret);
+ goto evict_region;
+ }
+ }
+
+ ret = mshv_region_map(region);
+ if (ret && mshv_partition_encrypted(partition)) {
+ int shrc;
+
+ shrc = mshv_partition_region_share(region);
+ if (!shrc)
+ goto evict_region;
+
+ pt_err(partition,
+ "Failed to share memory region (guest_pfn: %llu): %d\n",
+ region->start_gfn, shrc);
+ /*
+ * Don't unpin if marking shared failed because pages are no
+ * longer mapped in the host, ie root, anymore.
+ */
+ goto err_out;
+ }
+
+ return 0;
+
+evict_region:
+ mshv_region_evict(region);
+err_out:
+ return ret;
+}
+
+/*
+ * This maps two things: guest RAM and for pci passthru mmio space.
+ *
+ * mmio:
+ * - vfio overloads vm_pgoff to store the mmio start pfn/spa.
+ * - Two things need to happen for mapping mmio range:
+ * 1. mapped in the uaddr so VMM can access it.
+ * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it.
+ *
+ * This function takes care of the second. The first one is managed by vfio,
+ * and hence is taken care of via vfio_pci_mmap_fault().
+ */
+static long
+mshv_map_user_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region mem)
+{
+ struct mshv_mem_region *region;
+ struct vm_area_struct *vma;
+ bool is_mmio;
+ ulong mmio_pfn;
+ long ret;
+
+ if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) ||
+ !access_ok((const void *)mem.userspace_addr, mem.size))
+ return -EINVAL;
+
+ mmap_read_lock(current->mm);
+ vma = vma_lookup(current->mm, mem.userspace_addr);
+ is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0;
+ mmio_pfn = is_mmio ? vma->vm_pgoff : 0;
+ mmap_read_unlock(current->mm);
+
+ if (!vma)
+ return -EINVAL;
+
+ ret = mshv_partition_create_region(partition, &mem, &region,
+ is_mmio);
+ if (ret)
+ return ret;
+
+ if (is_mmio)
+ ret = hv_call_map_mmio_pages(partition->pt_id, mem.guest_pfn,
+ mmio_pfn, HVPFN_DOWN(mem.size));
+ else
+ ret = mshv_partition_mem_region_map(region);
+
+ if (ret)
+ goto errout;
+
+ /* Install the new region */
+ hlist_add_head(&region->hnode, &partition->pt_mem_regions);
+
+ return 0;
+
+errout:
+ vfree(region);
+ return ret;
+}
+
+/* Called for unmapping both the guest ram and the mmio space */
+static long
+mshv_unmap_user_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region mem)
+{
+ struct mshv_mem_region *region;
+ u32 unmap_flags = 0;
+
+ if (!(mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP)))
+ return -EINVAL;
+
+ region = mshv_partition_region_by_gfn(partition, mem.guest_pfn);
+ if (!region)
+ return -EINVAL;
+
+ /* Paranoia check */
+ if (region->start_uaddr != mem.userspace_addr ||
+ region->start_gfn != mem.guest_pfn ||
+ region->nr_pages != HVPFN_DOWN(mem.size))
+ return -EINVAL;
+
+ hlist_del(&region->hnode);
+
+ if (region->flags.large_pages)
+ unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
+
+ /* ignore unmap failures and continue as process may be exiting */
+ hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn,
+ region->nr_pages, unmap_flags);
+
+ mshv_region_evict(region);
+
+ vfree(region);
+ return 0;
+}
+
+static long
+mshv_partition_ioctl_set_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region __user *user_mem)
+{
+ struct mshv_user_mem_region mem;
+
+ if (copy_from_user(&mem, user_mem, sizeof(mem)))
+ return -EFAULT;
+
+ if (!mem.size ||
+ !PAGE_ALIGNED(mem.size) ||
+ !PAGE_ALIGNED(mem.userspace_addr) ||
+ (mem.flags & ~MSHV_SET_MEM_FLAGS_MASK) ||
+ mshv_field_nonzero(mem, rsvd))
+ return -EINVAL;
+
+ if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP))
+ return mshv_unmap_user_memory(partition, mem);
+
+ return mshv_map_user_memory(partition, mem);
+}
+
+static long
+mshv_partition_ioctl_ioeventfd(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_ioeventfd args;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ return mshv_set_unset_ioeventfd(partition, &args);
+}
+
+static long
+mshv_partition_ioctl_irqfd(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_irqfd args;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ return mshv_set_unset_irqfd(partition, &args);
+}
+
+static long
+mshv_partition_ioctl_get_gpap_access_bitmap(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_gpap_access_bitmap args;
+ union hv_gpa_page_access_state *states;
+ long ret, i;
+ union hv_gpa_page_access_state_flags hv_flags = {};
+ u8 hv_type_mask;
+ ulong bitmap_buf_sz, states_buf_sz;
+ int written = 0;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.access_type >= MSHV_GPAP_ACCESS_TYPE_COUNT ||
+ args.access_op >= MSHV_GPAP_ACCESS_OP_COUNT ||
+ mshv_field_nonzero(args, rsvd) || !args.page_count ||
+ !args.bitmap_ptr)
+ return -EINVAL;
+
+ if (check_mul_overflow(args.page_count, sizeof(*states), &states_buf_sz))
+ return -E2BIG;
+
+ /* Num bytes needed to store bitmap; one bit per page rounded up */
+ bitmap_buf_sz = DIV_ROUND_UP(args.page_count, 8);
+
+ /* Sanity check */
+ if (bitmap_buf_sz > states_buf_sz)
+ return -EBADFD;
+
+ switch (args.access_type) {
+ case MSHV_GPAP_ACCESS_TYPE_ACCESSED:
+ hv_type_mask = 1;
+ if (args.access_op == MSHV_GPAP_ACCESS_OP_CLEAR) {
+ hv_flags.clear_accessed = 1;
+ /* not accessed implies not dirty */
+ hv_flags.clear_dirty = 1;
+ } else { /* MSHV_GPAP_ACCESS_OP_SET */
+ hv_flags.set_accessed = 1;
+ }
+ break;
+ case MSHV_GPAP_ACCESS_TYPE_DIRTY:
+ hv_type_mask = 2;
+ if (args.access_op == MSHV_GPAP_ACCESS_OP_CLEAR) {
+ hv_flags.clear_dirty = 1;
+ } else { /* MSHV_GPAP_ACCESS_OP_SET */
+ hv_flags.set_dirty = 1;
+ /* dirty implies accessed */
+ hv_flags.set_accessed = 1;
+ }
+ break;
+ }
+
+ states = vzalloc(states_buf_sz);
+ if (!states)
+ return -ENOMEM;
+
+ ret = hv_call_get_gpa_access_states(partition->pt_id, args.page_count,
+ args.gpap_base, hv_flags, &written,
+ states);
+ if (ret)
+ goto free_return;
+
+ /*
+ * Overwrite states buffer with bitmap - the bits in hv_type_mask
+ * correspond to bitfields in hv_gpa_page_access_state
+ */
+ for (i = 0; i < written; ++i)
+ __assign_bit(i, (ulong *)states,
+ states[i].as_uint8 & hv_type_mask);
+
+ /* zero the unused bits in the last byte(s) of the returned bitmap */
+ for (i = written; i < bitmap_buf_sz * 8; ++i)
+ __clear_bit(i, (ulong *)states);
+
+ if (copy_to_user((void __user *)args.bitmap_ptr, states, bitmap_buf_sz))
+ ret = -EFAULT;
+
+free_return:
+ vfree(states);
+ return ret;
+}
+
+static long
+mshv_partition_ioctl_set_msi_routing(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_irq_entry *entries = NULL;
+ struct mshv_user_irq_table args;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.nr > MSHV_MAX_GUEST_IRQS ||
+ mshv_field_nonzero(args, rsvd))
+ return -EINVAL;
+
+ if (args.nr) {
+ struct mshv_user_irq_table __user *urouting = user_args;
+
+ entries = vmemdup_user(urouting->entries,
+ array_size(sizeof(*entries),
+ args.nr));
+ if (IS_ERR(entries))
+ return PTR_ERR(entries);
+ }
+ ret = mshv_update_routing_table(partition, entries, args.nr);
+ kvfree(entries);
+
+ return ret;
+}
+
+static long
+mshv_partition_ioctl_initialize(struct mshv_partition *partition)
+{
+ long ret;
+
+ if (partition->pt_initialized)
+ return 0;
+
+ ret = hv_call_initialize_partition(partition->pt_id);
+ if (ret)
+ goto withdraw_mem;
+
+ partition->pt_initialized = true;
+
+ return 0;
+
+withdraw_mem:
+ hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id);
+
+ return ret;
+}
+
+static long
+mshv_partition_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct mshv_partition *partition = filp->private_data;
+ long ret;
+ void __user *uarg = (void __user *)arg;
+
+ if (mutex_lock_killable(&partition->pt_mutex))
+ return -EINTR;
+
+ switch (ioctl) {
+ case MSHV_INITIALIZE_PARTITION:
+ ret = mshv_partition_ioctl_initialize(partition);
+ break;
+ case MSHV_SET_GUEST_MEMORY:
+ ret = mshv_partition_ioctl_set_memory(partition, uarg);
+ break;
+ case MSHV_CREATE_VP:
+ ret = mshv_partition_ioctl_create_vp(partition, uarg);
+ break;
+ case MSHV_IRQFD:
+ ret = mshv_partition_ioctl_irqfd(partition, uarg);
+ break;
+ case MSHV_IOEVENTFD:
+ ret = mshv_partition_ioctl_ioeventfd(partition, uarg);
+ break;
+ case MSHV_SET_MSI_ROUTING:
+ ret = mshv_partition_ioctl_set_msi_routing(partition, uarg);
+ break;
+ case MSHV_GET_GPAP_ACCESS_BITMAP:
+ ret = mshv_partition_ioctl_get_gpap_access_bitmap(partition,
+ uarg);
+ break;
+ case MSHV_ROOT_HVCALL:
+ ret = mshv_ioctl_passthru_hvcall(partition, true, uarg);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ mutex_unlock(&partition->pt_mutex);
+ return ret;
+}
+
+static int
+disable_vp_dispatch(struct mshv_vp *vp)
+{
+ int ret;
+ struct hv_register_assoc dispatch_suspend = {
+ .name = HV_REGISTER_DISPATCH_SUSPEND,
+ .value.dispatch_suspend.suspended = 1,
+ };
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &dispatch_suspend);
+ if (ret)
+ vp_err(vp, "failed to suspend\n");
+
+ return ret;
+}
+
+static int
+get_vp_signaled_count(struct mshv_vp *vp, u64 *count)
+{
+ int ret;
+ struct hv_register_assoc root_signal_count = {
+ .name = HV_REGISTER_VP_ROOT_SIGNAL_COUNT,
+ };
+
+ ret = mshv_get_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &root_signal_count);
+
+ if (ret) {
+ vp_err(vp, "Failed to get root signal count");
+ *count = 0;
+ return ret;
+ }
+
+ *count = root_signal_count.value.reg64;
+
+ return ret;
+}
+
+static void
+drain_vp_signals(struct mshv_vp *vp)
+{
+ u64 hv_signal_count;
+ u64 vp_signal_count;
+
+ get_vp_signaled_count(vp, &hv_signal_count);
+
+ vp_signal_count = atomic64_read(&vp->run.vp_signaled_count);
+
+ /*
+ * There should be at most 1 outstanding notification, but be extra
+ * careful anyway.
+ */
+ while (hv_signal_count != vp_signal_count) {
+ WARN_ON(hv_signal_count - vp_signal_count != 1);
+
+ if (wait_event_interruptible(vp->run.vp_suspend_queue,
+ vp->run.kicked_by_hv == 1))
+ break;
+ vp->run.kicked_by_hv = 0;
+ vp_signal_count = atomic64_read(&vp->run.vp_signaled_count);
+ }
+}
+
+static void drain_all_vps(const struct mshv_partition *partition)
+{
+ int i;
+ struct mshv_vp *vp;
+
+ /*
+ * VPs are reachable from ISR. It is safe to not take the partition
+ * lock because nobody else can enter this function and drop the
+ * partition from the list.
+ */
+ for (i = 0; i < MSHV_MAX_VPS; i++) {
+ vp = partition->pt_vp_array[i];
+ if (!vp)
+ continue;
+ /*
+ * Disable dispatching of the VP in the hypervisor. After this
+ * the hypervisor guarantees it won't generate any signals for
+ * the VP and the hypervisor's VP signal count won't change.
+ */
+ disable_vp_dispatch(vp);
+ drain_vp_signals(vp);
+ }
+}
+
+static void
+remove_partition(struct mshv_partition *partition)
+{
+ spin_lock(&mshv_root.pt_ht_lock);
+ hlist_del_rcu(&partition->pt_hnode);
+ spin_unlock(&mshv_root.pt_ht_lock);
+
+ synchronize_rcu();
+}
+
+/*
+ * Tear down a partition and remove it from the list.
+ * Partition's refcount must be 0
+ */
+static void destroy_partition(struct mshv_partition *partition)
+{
+ struct mshv_vp *vp;
+ struct mshv_mem_region *region;
+ int i, ret;
+ struct hlist_node *n;
+
+ if (refcount_read(&partition->pt_ref_count)) {
+ pt_err(partition,
+ "Attempt to destroy partition but refcount > 0\n");
+ return;
+ }
+
+ if (partition->pt_initialized) {
+ /*
+ * We only need to drain signals for root scheduler. This should be
+ * done before removing the partition from the partition list.
+ */
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ drain_all_vps(partition);
+
+ /* Remove vps */
+ for (i = 0; i < MSHV_MAX_VPS; ++i) {
+ vp = partition->pt_vp_array[i];
+ if (!vp)
+ continue;
+
+ if (hv_parent_partition())
+ mshv_vp_stats_unmap(partition->pt_id, vp->vp_index);
+
+ if (vp->vp_register_page) {
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero);
+ vp->vp_register_page = NULL;
+ }
+
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero);
+ vp->vp_intercept_msg_page = NULL;
+
+ if (vp->vp_ghcb_page) {
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal);
+ vp->vp_ghcb_page = NULL;
+ }
+
+ kfree(vp);
+
+ partition->pt_vp_array[i] = NULL;
+ }
+
+ /* Deallocates and unmaps everything including vcpus, GPA mappings etc */
+ hv_call_finalize_partition(partition->pt_id);
+
+ partition->pt_initialized = false;
+ }
+
+ remove_partition(partition);
+
+ /* Remove regions, regain access to the memory and unpin the pages */
+ hlist_for_each_entry_safe(region, n, &partition->pt_mem_regions,
+ hnode) {
+ hlist_del(&region->hnode);
+
+ if (mshv_partition_encrypted(partition)) {
+ ret = mshv_partition_region_share(region);
+ if (ret) {
+ pt_err(partition,
+ "Failed to regain access to memory, unpinning user pages will fail and crash the host error: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ mshv_region_evict(region);
+
+ vfree(region);
+ }
+
+ /* Withdraw and free all pages we deposited */
+ hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id);
+ hv_call_delete_partition(partition->pt_id);
+
+ mshv_free_routing_table(partition);
+ kfree(partition);
+}
+
+struct
+mshv_partition *mshv_partition_get(struct mshv_partition *partition)
+{
+ if (refcount_inc_not_zero(&partition->pt_ref_count))
+ return partition;
+ return NULL;
+}
+
+struct
+mshv_partition *mshv_partition_find(u64 partition_id)
+ __must_hold(RCU)
+{
+ struct mshv_partition *p;
+
+ hash_for_each_possible_rcu(mshv_root.pt_htable, p, pt_hnode,
+ partition_id)
+ if (p->pt_id == partition_id)
+ return p;
+
+ return NULL;
+}
+
+void
+mshv_partition_put(struct mshv_partition *partition)
+{
+ if (refcount_dec_and_test(&partition->pt_ref_count))
+ destroy_partition(partition);
+}
+
+static int
+mshv_partition_release(struct inode *inode, struct file *filp)
+{
+ struct mshv_partition *partition = filp->private_data;
+
+ mshv_eventfd_release(partition);
+
+ cleanup_srcu_struct(&partition->pt_irq_srcu);
+
+ mshv_partition_put(partition);
+
+ return 0;
+}
+
+static int
+add_partition(struct mshv_partition *partition)
+{
+ spin_lock(&mshv_root.pt_ht_lock);
+
+ hash_add_rcu(mshv_root.pt_htable, &partition->pt_hnode,
+ partition->pt_id);
+
+ spin_unlock(&mshv_root.pt_ht_lock);
+
+ return 0;
+}
+
+static long
+mshv_ioctl_create_partition(void __user *user_arg, struct device *module_dev)
+{
+ struct mshv_create_partition args;
+ u64 creation_flags;
+ struct hv_partition_creation_properties creation_properties = {};
+ union hv_partition_isolation_properties isolation_properties = {};
+ struct mshv_partition *partition;
+ struct file *file;
+ int fd;
+ long ret;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ if ((args.pt_flags & ~MSHV_PT_FLAGS_MASK) ||
+ args.pt_isolation >= MSHV_PT_ISOLATION_COUNT)
+ return -EINVAL;
+
+ /* Only support EXO partitions */
+ creation_flags = HV_PARTITION_CREATION_FLAG_EXO_PARTITION |
+ HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED;
+
+ if (args.pt_flags & BIT(MSHV_PT_BIT_LAPIC))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED;
+ if (args.pt_flags & BIT(MSHV_PT_BIT_X2APIC))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE;
+ if (args.pt_flags & BIT(MSHV_PT_BIT_GPA_SUPER_PAGES))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_GPA_SUPER_PAGES_ENABLED;
+
+ switch (args.pt_isolation) {
+ case MSHV_PT_ISOLATION_NONE:
+ isolation_properties.isolation_type =
+ HV_PARTITION_ISOLATION_TYPE_NONE;
+ break;
+ }
+
+ partition = kzalloc(sizeof(*partition), GFP_KERNEL);
+ if (!partition)
+ return -ENOMEM;
+
+ partition->pt_module_dev = module_dev;
+ partition->isolation_type = isolation_properties.isolation_type;
+
+ refcount_set(&partition->pt_ref_count, 1);
+
+ mutex_init(&partition->pt_mutex);
+
+ mutex_init(&partition->pt_irq_lock);
+
+ init_completion(&partition->async_hypercall);
+
+ INIT_HLIST_HEAD(&partition->irq_ack_notifier_list);
+
+ INIT_HLIST_HEAD(&partition->pt_devices);
+
+ INIT_HLIST_HEAD(&partition->pt_mem_regions);
+
+ mshv_eventfd_init(partition);
+
+ ret = init_srcu_struct(&partition->pt_irq_srcu);
+ if (ret)
+ goto free_partition;
+
+ ret = hv_call_create_partition(creation_flags,
+ creation_properties,
+ isolation_properties,
+ &partition->pt_id);
+ if (ret)
+ goto cleanup_irq_srcu;
+
+ ret = add_partition(partition);
+ if (ret)
+ goto delete_partition;
+
+ ret = mshv_init_async_handler(partition);
+ if (ret)
+ goto remove_partition;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto remove_partition;
+ }
+
+ file = anon_inode_getfile("mshv_partition", &mshv_partition_fops,
+ partition, O_RDWR);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto put_fd;
+ }
+
+ fd_install(fd, file);
+
+ return fd;
+
+put_fd:
+ put_unused_fd(fd);
+remove_partition:
+ remove_partition(partition);
+delete_partition:
+ hv_call_delete_partition(partition->pt_id);
+cleanup_irq_srcu:
+ cleanup_srcu_struct(&partition->pt_irq_srcu);
+free_partition:
+ kfree(partition);
+
+ return ret;
+}
+
+static long mshv_dev_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct miscdevice *misc = filp->private_data;
+
+ switch (ioctl) {
+ case MSHV_CREATE_PARTITION:
+ return mshv_ioctl_create_partition((void __user *)arg,
+ misc->this_device);
+ }
+
+ return -ENOTTY;
+}
+
+static int
+mshv_dev_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int
+mshv_dev_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int mshv_cpuhp_online;
+static int mshv_root_sched_online;
+
+static const char *scheduler_type_to_string(enum hv_scheduler_type type)
+{
+ switch (type) {
+ case HV_SCHEDULER_TYPE_LP:
+ return "classic scheduler without SMT";
+ case HV_SCHEDULER_TYPE_LP_SMT:
+ return "classic scheduler with SMT";
+ case HV_SCHEDULER_TYPE_CORE_SMT:
+ return "core scheduler";
+ case HV_SCHEDULER_TYPE_ROOT:
+ return "root scheduler";
+ default:
+ return "unknown scheduler";
+ };
+}
+
+/* TODO move this to hv_common.c when needed outside */
+static int __init hv_retrieve_scheduler_type(enum hv_scheduler_type *out)
+{
+ struct hv_input_get_system_property *input;
+ struct hv_output_get_system_property *output;
+ unsigned long flags;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+ input->property_id = HV_SYSTEM_PROPERTY_SCHEDULER_TYPE;
+
+ status = hv_do_hypercall(HVCALL_GET_SYSTEM_PROPERTY, input, output);
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ pr_err("%s: %s\n", __func__, hv_result_to_string(status));
+ return hv_result_to_errno(status);
+ }
+
+ *out = output->scheduler_type;
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/* Retrieve and stash the supported scheduler type */
+static int __init mshv_retrieve_scheduler_type(struct device *dev)
+{
+ int ret;
+
+ ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Hypervisor using %s\n",
+ scheduler_type_to_string(hv_scheduler_type));
+
+ switch (hv_scheduler_type) {
+ case HV_SCHEDULER_TYPE_CORE_SMT:
+ case HV_SCHEDULER_TYPE_LP_SMT:
+ case HV_SCHEDULER_TYPE_ROOT:
+ case HV_SCHEDULER_TYPE_LP:
+ /* Supported scheduler, nothing to do */
+ break;
+ default:
+ dev_err(dev, "unsupported scheduler 0x%x, bailing.\n",
+ hv_scheduler_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mshv_root_scheduler_init(unsigned int cpu)
+{
+ void **inputarg, **outputarg, *p;
+
+ inputarg = (void **)this_cpu_ptr(root_scheduler_input);
+ outputarg = (void **)this_cpu_ptr(root_scheduler_output);
+
+ /* Allocate two consecutive pages. One for input, one for output. */
+ p = kmalloc(2 * HV_HYP_PAGE_SIZE, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ *inputarg = p;
+ *outputarg = (char *)p + HV_HYP_PAGE_SIZE;
+
+ return 0;
+}
+
+static int mshv_root_scheduler_cleanup(unsigned int cpu)
+{
+ void *p, **inputarg, **outputarg;
+
+ inputarg = (void **)this_cpu_ptr(root_scheduler_input);
+ outputarg = (void **)this_cpu_ptr(root_scheduler_output);
+
+ p = *inputarg;
+
+ *inputarg = NULL;
+ *outputarg = NULL;
+
+ kfree(p);
+
+ return 0;
+}
+
+/* Must be called after retrieving the scheduler type */
+static int
+root_scheduler_init(struct device *dev)
+{
+ int ret;
+
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return 0;
+
+ root_scheduler_input = alloc_percpu(void *);
+ root_scheduler_output = alloc_percpu(void *);
+
+ if (!root_scheduler_input || !root_scheduler_output) {
+ dev_err(dev, "Failed to allocate root scheduler buffers\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_root_sched",
+ mshv_root_scheduler_init,
+ mshv_root_scheduler_cleanup);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to setup root scheduler state: %i\n", ret);
+ goto out;
+ }
+
+ mshv_root_sched_online = ret;
+
+ return 0;
+
+out:
+ free_percpu(root_scheduler_input);
+ free_percpu(root_scheduler_output);
+ return ret;
+}
+
+static void
+root_scheduler_deinit(void)
+{
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return;
+
+ cpuhp_remove_state(mshv_root_sched_online);
+ free_percpu(root_scheduler_input);
+ free_percpu(root_scheduler_output);
+}
+
+static int mshv_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ cpuhp_remove_state(mshv_cpuhp_online);
+ return 0;
+}
+
+struct notifier_block mshv_reboot_nb = {
+ .notifier_call = mshv_reboot_notify,
+};
+
+static void mshv_root_partition_exit(void)
+{
+ unregister_reboot_notifier(&mshv_reboot_nb);
+ root_scheduler_deinit();
+}
+
+static int __init mshv_root_partition_init(struct device *dev)
+{
+ int err;
+
+ if (mshv_retrieve_scheduler_type(dev))
+ return -ENODEV;
+
+ err = root_scheduler_init(dev);
+ if (err)
+ return err;
+
+ err = register_reboot_notifier(&mshv_reboot_nb);
+ if (err)
+ goto root_sched_deinit;
+
+ return 0;
+
+root_sched_deinit:
+ root_scheduler_deinit();
+ return err;
+}
+
+static int __init mshv_parent_partition_init(void)
+{
+ int ret;
+ struct device *dev;
+ union hv_hypervisor_version_info version_info;
+
+ if (!hv_root_partition() || is_kdump_kernel())
+ return -ENODEV;
+
+ if (hv_get_hypervisor_version(&version_info))
+ return -ENODEV;
+
+ ret = misc_register(&mshv_dev);
+ if (ret)
+ return ret;
+
+ dev = mshv_dev.this_device;
+
+ if (version_info.build_number < MSHV_HV_MIN_VERSION ||
+ version_info.build_number > MSHV_HV_MAX_VERSION) {
+ dev_err(dev, "Running on unvalidated Hyper-V version\n");
+ dev_err(dev, "Versions: current: %u min: %u max: %u\n",
+ version_info.build_number, MSHV_HV_MIN_VERSION,
+ MSHV_HV_MAX_VERSION);
+ }
+
+ mshv_root.synic_pages = alloc_percpu(struct hv_synic_pages);
+ if (!mshv_root.synic_pages) {
+ dev_err(dev, "Failed to allocate percpu synic page\n");
+ ret = -ENOMEM;
+ goto device_deregister;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_synic",
+ mshv_synic_init,
+ mshv_synic_cleanup);
+ if (ret < 0) {
+ dev_err(dev, "Failed to setup cpu hotplug state: %i\n", ret);
+ goto free_synic_pages;
+ }
+
+ mshv_cpuhp_online = ret;
+
+ ret = mshv_root_partition_init(dev);
+ if (ret)
+ goto remove_cpu_state;
+
+ ret = mshv_irqfd_wq_init();
+ if (ret)
+ goto exit_partition;
+
+ spin_lock_init(&mshv_root.pt_ht_lock);
+ hash_init(mshv_root.pt_htable);
+
+ hv_setup_mshv_handler(mshv_isr);
+
+ return 0;
+
+exit_partition:
+ if (hv_root_partition())
+ mshv_root_partition_exit();
+remove_cpu_state:
+ cpuhp_remove_state(mshv_cpuhp_online);
+free_synic_pages:
+ free_percpu(mshv_root.synic_pages);
+device_deregister:
+ misc_deregister(&mshv_dev);
+ return ret;
+}
+
+static void __exit mshv_parent_partition_exit(void)
+{
+ hv_setup_mshv_handler(NULL);
+ mshv_port_table_fini();
+ misc_deregister(&mshv_dev);
+ mshv_irqfd_wq_cleanup();
+ if (hv_root_partition())
+ mshv_root_partition_exit();
+ cpuhp_remove_state(mshv_cpuhp_online);
+ free_percpu(mshv_root.synic_pages);
+}
+
+module_init(mshv_parent_partition_init);
+module_exit(mshv_parent_partition_exit);
diff --git a/drivers/hv/mshv_synic.c b/drivers/hv/mshv_synic.c
new file mode 100644
index 000000000000..e6b6381b7c36
--- /dev/null
+++ b/drivers/hv/mshv_synic.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * mshv_root module's main interrupt handler and associated functionality.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/random.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+
+static u32 synic_event_ring_get_queued_port(u32 sint_index)
+{
+ struct hv_synic_event_ring_page **event_ring_page;
+ volatile struct hv_synic_event_ring *ring;
+ struct hv_synic_pages *spages;
+ u8 **synic_eventring_tail;
+ u32 message;
+ u8 tail;
+
+ spages = this_cpu_ptr(mshv_root.synic_pages);
+ event_ring_page = &spages->synic_event_ring_page;
+ synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
+
+ if (unlikely(!*synic_eventring_tail)) {
+ pr_debug("Missing synic event ring tail!\n");
+ return 0;
+ }
+ tail = (*synic_eventring_tail)[sint_index];
+
+ if (unlikely(!*event_ring_page)) {
+ pr_debug("Missing synic event ring page!\n");
+ return 0;
+ }
+
+ ring = &(*event_ring_page)->sint_event_ring[sint_index];
+
+ /*
+ * Get the message.
+ */
+ message = ring->data[tail];
+
+ if (!message) {
+ if (ring->ring_full) {
+ /*
+ * Ring is marked full, but we would have consumed all
+ * the messages. Notify the hypervisor that ring is now
+ * empty and check again.
+ */
+ ring->ring_full = 0;
+ hv_call_notify_port_ring_empty(sint_index);
+ message = ring->data[tail];
+ }
+
+ if (!message) {
+ ring->signal_masked = 0;
+ /*
+ * Unmask the signal and sync with hypervisor
+ * before one last check for any message.
+ */
+ mb();
+ message = ring->data[tail];
+
+ /*
+ * Ok, lets bail out.
+ */
+ if (!message)
+ return 0;
+ }
+
+ ring->signal_masked = 1;
+ }
+
+ /*
+ * Clear the message in the ring buffer.
+ */
+ ring->data[tail] = 0;
+
+ if (++tail == HV_SYNIC_EVENT_RING_MESSAGE_COUNT)
+ tail = 0;
+
+ (*synic_eventring_tail)[sint_index] = tail;
+
+ return message;
+}
+
+static bool
+mshv_doorbell_isr(struct hv_message *msg)
+{
+ struct hv_notification_message_payload *notification;
+ u32 port;
+
+ if (msg->header.message_type != HVMSG_SYNIC_SINT_INTERCEPT)
+ return false;
+
+ notification = (struct hv_notification_message_payload *)msg->u.payload;
+ if (notification->sint_index != HV_SYNIC_DOORBELL_SINT_INDEX)
+ return false;
+
+ while ((port = synic_event_ring_get_queued_port(HV_SYNIC_DOORBELL_SINT_INDEX))) {
+ struct port_table_info ptinfo = { 0 };
+
+ if (mshv_portid_lookup(port, &ptinfo)) {
+ pr_debug("Failed to get port info from port_table!\n");
+ continue;
+ }
+
+ if (ptinfo.hv_port_type != HV_PORT_TYPE_DOORBELL) {
+ pr_debug("Not a doorbell port!, port: %d, port_type: %d\n",
+ port, ptinfo.hv_port_type);
+ continue;
+ }
+
+ /* Invoke the callback */
+ ptinfo.hv_port_doorbell.doorbell_cb(port,
+ ptinfo.hv_port_doorbell.data);
+ }
+
+ return true;
+}
+
+static bool mshv_async_call_completion_isr(struct hv_message *msg)
+{
+ bool handled = false;
+ struct hv_async_completion_message_payload *async_msg;
+ struct mshv_partition *partition;
+ u64 partition_id;
+
+ if (msg->header.message_type != HVMSG_ASYNC_CALL_COMPLETION)
+ goto out;
+
+ async_msg =
+ (struct hv_async_completion_message_payload *)msg->u.payload;
+
+ partition_id = async_msg->partition_id;
+
+ /*
+ * Hold this lock for the rest of the isr, because the partition could
+ * be released anytime.
+ * e.g. the MSHV_RUN_VP thread could wake on another cpu; it could
+ * release the partition unless we hold this!
+ */
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n", partition_id);
+ goto unlock_out;
+ }
+
+ partition->async_hypercall_status = async_msg->status;
+ complete(&partition->async_hypercall);
+
+ handled = true;
+
+unlock_out:
+ rcu_read_unlock();
+out:
+ return handled;
+}
+
+static void kick_vp(struct mshv_vp *vp)
+{
+ atomic64_inc(&vp->run.vp_signaled_count);
+ vp->run.kicked_by_hv = 1;
+ wake_up(&vp->run.vp_suspend_queue);
+}
+
+static void
+handle_bitset_message(const struct hv_vp_signal_bitset_scheduler_message *msg)
+{
+ int bank_idx, vps_signaled = 0, bank_mask_size;
+ struct mshv_partition *partition;
+ const struct hv_vpset *vpset;
+ const u64 *bank_contents;
+ u64 partition_id = msg->partition_id;
+
+ if (msg->vp_bitset.bitset.format != HV_GENERIC_SET_SPARSE_4K) {
+ pr_debug("scheduler message format is not HV_GENERIC_SET_SPARSE_4K");
+ return;
+ }
+
+ if (msg->vp_count == 0) {
+ pr_debug("scheduler message with no VP specified");
+ return;
+ }
+
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n", partition_id);
+ goto unlock_out;
+ }
+
+ vpset = &msg->vp_bitset.bitset;
+
+ bank_idx = -1;
+ bank_contents = vpset->bank_contents;
+ bank_mask_size = sizeof(vpset->valid_bank_mask) * BITS_PER_BYTE;
+
+ while (true) {
+ int vp_bank_idx = -1;
+ int vp_bank_size = sizeof(*bank_contents) * BITS_PER_BYTE;
+ int vp_index;
+
+ bank_idx = find_next_bit((unsigned long *)&vpset->valid_bank_mask,
+ bank_mask_size, bank_idx + 1);
+ if (bank_idx == bank_mask_size)
+ break;
+
+ while (true) {
+ struct mshv_vp *vp;
+
+ vp_bank_idx = find_next_bit((unsigned long *)bank_contents,
+ vp_bank_size, vp_bank_idx + 1);
+ if (vp_bank_idx == vp_bank_size)
+ break;
+
+ vp_index = (bank_idx * vp_bank_size) + vp_bank_idx;
+
+ /* This shouldn't happen, but just in case. */
+ if (unlikely(vp_index >= MSHV_MAX_VPS)) {
+ pr_debug("VP index %u out of bounds\n",
+ vp_index);
+ goto unlock_out;
+ }
+
+ vp = partition->pt_vp_array[vp_index];
+ if (unlikely(!vp)) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ goto unlock_out;
+ }
+
+ kick_vp(vp);
+ vps_signaled++;
+ }
+
+ bank_contents++;
+ }
+
+unlock_out:
+ rcu_read_unlock();
+
+ if (vps_signaled != msg->vp_count)
+ pr_debug("asked to signal %u VPs but only did %u\n",
+ msg->vp_count, vps_signaled);
+}
+
+static void
+handle_pair_message(const struct hv_vp_signal_pair_scheduler_message *msg)
+{
+ struct mshv_partition *partition = NULL;
+ struct mshv_vp *vp;
+ int idx;
+
+ rcu_read_lock();
+
+ for (idx = 0; idx < msg->vp_count; idx++) {
+ u64 partition_id = msg->partition_ids[idx];
+ u32 vp_index = msg->vp_indexes[idx];
+
+ if (idx == 0 || partition->pt_id != partition_id) {
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n",
+ partition_id);
+ break;
+ }
+ }
+
+ /* This shouldn't happen, but just in case. */
+ if (unlikely(vp_index >= MSHV_MAX_VPS)) {
+ pr_debug("VP index %u out of bounds\n", vp_index);
+ break;
+ }
+
+ vp = partition->pt_vp_array[vp_index];
+ if (!vp) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ break;
+ }
+
+ kick_vp(vp);
+ }
+
+ rcu_read_unlock();
+}
+
+static bool
+mshv_scheduler_isr(struct hv_message *msg)
+{
+ if (msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_BITSET &&
+ msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_PAIR)
+ return false;
+
+ if (msg->header.message_type == HVMSG_SCHEDULER_VP_SIGNAL_BITSET)
+ handle_bitset_message((struct hv_vp_signal_bitset_scheduler_message *)
+ msg->u.payload);
+ else
+ handle_pair_message((struct hv_vp_signal_pair_scheduler_message *)
+ msg->u.payload);
+
+ return true;
+}
+
+static bool
+mshv_intercept_isr(struct hv_message *msg)
+{
+ struct mshv_partition *partition;
+ bool handled = false;
+ struct mshv_vp *vp;
+ u64 partition_id;
+ u32 vp_index;
+
+ partition_id = msg->header.sender;
+
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n",
+ partition_id);
+ goto unlock_out;
+ }
+
+ if (msg->header.message_type == HVMSG_X64_APIC_EOI) {
+ /*
+ * Check if this gsi is registered in the
+ * ack_notifier list and invoke the callback
+ * if registered.
+ */
+
+ /*
+ * If there is a notifier, the ack callback is supposed
+ * to handle the VMEXIT. So we need not pass this message
+ * to vcpu thread.
+ */
+ struct hv_x64_apic_eoi_message *eoi_msg =
+ (struct hv_x64_apic_eoi_message *)&msg->u.payload[0];
+
+ if (mshv_notify_acked_gsi(partition, eoi_msg->interrupt_vector)) {
+ handled = true;
+ goto unlock_out;
+ }
+ }
+
+ /*
+ * We should get an opaque intercept message here for all intercept
+ * messages, since we're using the mapped VP intercept message page.
+ *
+ * The intercept message will have been placed in intercept message
+ * page at this point.
+ *
+ * Make sure the message type matches our expectation.
+ */
+ if (msg->header.message_type != HVMSG_OPAQUE_INTERCEPT) {
+ pr_debug("wrong message type %d", msg->header.message_type);
+ goto unlock_out;
+ }
+
+ /*
+ * Since we directly index the vp, and it has to exist for us to be here
+ * (because the vp is only deleted when the partition is), no additional
+ * locking is needed here
+ */
+ vp_index =
+ ((struct hv_opaque_intercept_message *)msg->u.payload)->vp_index;
+ vp = partition->pt_vp_array[vp_index];
+ if (unlikely(!vp)) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ goto unlock_out;
+ }
+
+ kick_vp(vp);
+
+ handled = true;
+
+unlock_out:
+ rcu_read_unlock();
+
+ return handled;
+}
+
+void mshv_isr(void)
+{
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_message *msg;
+ bool handled;
+
+ if (unlikely(!(*msg_page))) {
+ pr_debug("Missing synic page!\n");
+ return;
+ }
+
+ msg = &((*msg_page)->sint_message[HV_SYNIC_INTERCEPTION_SINT_INDEX]);
+
+ /*
+ * If the type isn't set, there isn't really a message;
+ * it may be some other hyperv interrupt
+ */
+ if (msg->header.message_type == HVMSG_NONE)
+ return;
+
+ handled = mshv_doorbell_isr(msg);
+
+ if (!handled)
+ handled = mshv_scheduler_isr(msg);
+
+ if (!handled)
+ handled = mshv_async_call_completion_isr(msg);
+
+ if (!handled)
+ handled = mshv_intercept_isr(msg);
+
+ if (handled) {
+ /*
+ * Acknowledge message with hypervisor if another message is
+ * pending.
+ */
+ msg->header.message_type = HVMSG_NONE;
+ /*
+ * Ensure the write is complete so the hypervisor will deliver
+ * the next message if available.
+ */
+ mb();
+ if (msg->header.message_flags.msg_pending)
+ hv_set_non_nested_msr(HV_MSR_EOM, 0);
+
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR);
+#endif
+ } else {
+ pr_warn_once("%s: unknown message type 0x%x\n", __func__,
+ msg->header.message_type);
+ }
+}
+
+int mshv_synic_init(unsigned int cpu)
+{
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ union hv_synic_sirbp sirbp;
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ union hv_synic_sint sint;
+#endif
+ union hv_synic_scontrol sctrl;
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_synic_event_flags_page **event_flags_page =
+ &spages->synic_event_flags_page;
+ struct hv_synic_event_ring_page **event_ring_page =
+ &spages->synic_event_ring_page;
+
+ /* Setup the Synic's message page */
+ simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
+ simp.simp_enabled = true;
+ *msg_page = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+
+ if (!(*msg_page))
+ return -EFAULT;
+
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+
+ /* Setup the Synic's event flags page */
+ siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
+ siefp.siefp_enabled = true;
+ *event_flags_page = memremap(siefp.base_siefp_gpa << PAGE_SHIFT,
+ PAGE_SIZE, MEMREMAP_WB);
+
+ if (!(*event_flags_page))
+ goto cleanup;
+
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+
+ /* Setup the Synic's event ring page */
+ sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
+ sirbp.sirbp_enabled = true;
+ *event_ring_page = memremap(sirbp.base_sirbp_gpa << PAGE_SHIFT,
+ PAGE_SIZE, MEMREMAP_WB);
+
+ if (!(*event_ring_page))
+ goto cleanup;
+
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ /* Enable intercepts */
+ sint.as_uint64 = 0;
+ sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ sint.masked = false;
+ sint.auto_eoi = hv_recommend_using_aeoi();
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Doorbell SINT */
+ sint.as_uint64 = 0;
+ sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ sint.masked = false;
+ sint.as_intercept = 1;
+ sint.auto_eoi = hv_recommend_using_aeoi();
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
+ sint.as_uint64);
+#endif
+
+ /* Enable global synic bit */
+ sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
+ sctrl.enable = 1;
+ hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+
+ return 0;
+
+cleanup:
+ if (*event_ring_page) {
+ sirbp.sirbp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+ memunmap(*event_ring_page);
+ }
+ if (*event_flags_page) {
+ siefp.siefp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ memunmap(*event_flags_page);
+ }
+ if (*msg_page) {
+ simp.simp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ memunmap(*msg_page);
+ }
+
+ return -EFAULT;
+}
+
+int mshv_synic_cleanup(unsigned int cpu)
+{
+ union hv_synic_sint sint;
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ union hv_synic_sirbp sirbp;
+ union hv_synic_scontrol sctrl;
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_synic_event_flags_page **event_flags_page =
+ &spages->synic_event_flags_page;
+ struct hv_synic_event_ring_page **event_ring_page =
+ &spages->synic_event_ring_page;
+
+ /* Disable the interrupt */
+ sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX);
+ sint.masked = true;
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Disable Doorbell SINT */
+ sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX);
+ sint.masked = true;
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Disable Synic's event ring page */
+ sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
+ sirbp.sirbp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+ memunmap(*event_ring_page);
+
+ /* Disable Synic's event flags page */
+ siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
+ siefp.siefp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ memunmap(*event_flags_page);
+
+ /* Disable Synic's message page */
+ simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
+ simp.simp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ memunmap(*msg_page);
+
+ /* Disable global synic bit */
+ sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
+ sctrl.enable = 0;
+ hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+
+ return 0;
+}
+
+int
+mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb, void *data,
+ u64 gpa, u64 val, u64 flags)
+{
+ struct hv_connection_info connection_info = { 0 };
+ union hv_connection_id connection_id = { 0 };
+ struct port_table_info *port_table_info;
+ struct hv_port_info port_info = { 0 };
+ union hv_port_id port_id = { 0 };
+ int ret;
+
+ port_table_info = kmalloc(sizeof(*port_table_info), GFP_KERNEL);
+ if (!port_table_info)
+ return -ENOMEM;
+
+ port_table_info->hv_port_type = HV_PORT_TYPE_DOORBELL;
+ port_table_info->hv_port_doorbell.doorbell_cb = doorbell_cb;
+ port_table_info->hv_port_doorbell.data = data;
+ ret = mshv_portid_alloc(port_table_info);
+ if (ret < 0) {
+ kfree(port_table_info);
+ return ret;
+ }
+
+ port_id.u.id = ret;
+ port_info.port_type = HV_PORT_TYPE_DOORBELL;
+ port_info.doorbell_port_info.target_sint = HV_SYNIC_DOORBELL_SINT_INDEX;
+ port_info.doorbell_port_info.target_vp = HV_ANY_VP;
+ ret = hv_call_create_port(hv_current_partition_id, port_id, partition_id,
+ &port_info,
+ 0, 0, NUMA_NO_NODE);
+
+ if (ret < 0) {
+ mshv_portid_free(port_id.u.id);
+ return ret;
+ }
+
+ connection_id.u.id = port_id.u.id;
+ connection_info.port_type = HV_PORT_TYPE_DOORBELL;
+ connection_info.doorbell_connection_info.gpa = gpa;
+ connection_info.doorbell_connection_info.trigger_value = val;
+ connection_info.doorbell_connection_info.flags = flags;
+
+ ret = hv_call_connect_port(hv_current_partition_id, port_id, partition_id,
+ connection_id, &connection_info, 0, NUMA_NO_NODE);
+ if (ret < 0) {
+ hv_call_delete_port(hv_current_partition_id, port_id);
+ mshv_portid_free(port_id.u.id);
+ return ret;
+ }
+
+ // lets use the port_id as the doorbell_id
+ return port_id.u.id;
+}
+
+void
+mshv_unregister_doorbell(u64 partition_id, int doorbell_portid)
+{
+ union hv_port_id port_id = { 0 };
+ union hv_connection_id connection_id = { 0 };
+
+ connection_id.u.id = doorbell_portid;
+ hv_call_disconnect_port(partition_id, connection_id);
+
+ port_id.u.id = doorbell_portid;
+ hv_call_delete_port(hv_current_partition_id, port_id);
+
+ mshv_portid_free(doorbell_portid);
+}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0f6cd44fff29..8d3cff42bdbb 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1611,16 +1611,16 @@ static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
-static ssize_t target_cpu_store(struct vmbus_channel *channel,
- const char *buf, size_t count)
+
+int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu)
{
- u32 target_cpu, origin_cpu;
- ssize_t ret = count;
+ u32 origin_cpu;
+ int ret = 0;
- if (vmbus_proto_version < VERSION_WIN10_V4_1)
- return -EIO;
+ lockdep_assert_cpus_held();
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
- if (sscanf(buf, "%uu", &target_cpu) != 1)
+ if (vmbus_proto_version < VERSION_WIN10_V4_1)
return -EIO;
/* Validate target_cpu for the cpumask_test_cpu() operation below. */
@@ -1630,22 +1630,17 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
return -EINVAL;
- /* No CPUs should come up or down during this. */
- cpus_read_lock();
-
- if (!cpu_online(target_cpu)) {
- cpus_read_unlock();
+ if (!cpu_online(target_cpu))
return -EINVAL;
- }
/*
- * Synchronizes target_cpu_store() and channel closure:
+ * Synchronizes vmbus_channel_set_cpu() and channel closure:
*
* { Initially: state = CHANNEL_OPENED }
*
* CPU1 CPU2
*
- * [target_cpu_store()] [vmbus_disconnect_ring()]
+ * [vmbus_channel_set_cpu()] [vmbus_disconnect_ring()]
*
* LOCK channel_mutex LOCK channel_mutex
* LOAD r1 = state LOAD r2 = state
@@ -1660,7 +1655,6 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
* Note. The host processes the channel messages "sequentially", in
* the order in which they are received on a per-partition basis.
*/
- mutex_lock(&vmbus_connection.channel_mutex);
/*
* Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
@@ -1668,17 +1662,17 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
*/
if (channel->state != CHANNEL_OPENED_STATE) {
ret = -EIO;
- goto cpu_store_unlock;
+ goto end;
}
origin_cpu = channel->target_cpu;
if (target_cpu == origin_cpu)
- goto cpu_store_unlock;
+ goto end;
if (vmbus_send_modifychannel(channel,
hv_cpu_number_to_vp_number(target_cpu))) {
ret = -EIO;
- goto cpu_store_unlock;
+ goto end;
}
/*
@@ -1708,10 +1702,26 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
origin_cpu, target_cpu);
}
-cpu_store_unlock:
+end:
+ return ret;
+}
+
+static ssize_t target_cpu_store(struct vmbus_channel *channel,
+ const char *buf, size_t count)
+{
+ u32 target_cpu;
+ ssize_t ret;
+
+ if (sscanf(buf, "%uu", &target_cpu) != 1)
+ return -EIO;
+
+ cpus_read_lock();
+ mutex_lock(&vmbus_connection.channel_mutex);
+ ret = vmbus_channel_set_cpu(channel, target_cpu);
mutex_unlock(&vmbus_connection.channel_mutex);
cpus_read_unlock();
- return ret;
+
+ return ret ?: count;
}
static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
@@ -2262,12 +2272,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
struct resource *iter;
mutex_lock(&hyperv_mmio_lock);
+
+ /*
+ * If all bytes of the MMIO range to be released are within the
+ * special case fb_mmio shadow region, skip releasing the shadow
+ * region since no corresponding __request_region() was done
+ * in vmbus_allocate_mmio().
+ */
+ if (fb_mmio && start >= fb_mmio->start &&
+ (start + size - 1 <= fb_mmio->end))
+ goto skip_shadow_release;
+
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
__release_region(iter, start, size);
}
+
+skip_shadow_release:
release_mem_region(start, size);
mutex_unlock(&hyperv_mmio_lock);
@@ -2646,7 +2669,7 @@ static int __init hv_acpi_init(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
- if (hv_root_partition && !hv_nested)
+ if (hv_root_partition() && !hv_nested)
return 0;
/*
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index fa3351351825..79bc67ffb998 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -273,8 +273,8 @@ static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = {
static const u16 NCT6776_REG_TOLERANCE_H[] = {
0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c };
-static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 };
-static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = {
0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 2c1a60577728..3d98e3371fff 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -1216,7 +1216,7 @@ static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
* recorded value for 'drvdata->ccitmin' to workaround
* this problem.
*/
- if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) {
+ if (is_midr_in_range_list(etm_wrong_ccitmin_cpus)) {
if (drvdata->ccitmin == 256)
drvdata->ccitmin = 4;
}
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index e9496fe97baa..495eb1dc8ac5 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -81,10 +81,8 @@ static int stm_heartbeat_init(void)
stm_heartbeat[i].data.type = STM_USER;
stm_heartbeat[i].data.link = stm_heartbeat_link;
stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
- hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
- stm_heartbeat[i].hrtimer.function =
- stm_heartbeat_hrtimer_handler;
+ hrtimer_setup(&stm_heartbeat[i].hrtimer, stm_heartbeat_hrtimer_handler,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
if (ret)
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 544c94e86b89..1eac35838040 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -485,6 +485,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids);
static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (ali1535_setup(dev)) {
dev_warn(&dev->dev,
"ALI1535 not detected, module not inserted.\n");
@@ -496,7 +498,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
"SMBus ALI1535 adapter at %04x", ali1535_offset);
- return i2c_add_adapter(&ali1535_adapter);
+ ret = i2c_add_adapter(&ali1535_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
+ return ret;
}
static void ali1535_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 4761c7208102..418d11266671 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -472,6 +472,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids);
static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (ali15x3_setup(dev)) {
dev_err(&dev->dev,
"ALI15X3 not detected, module not inserted.\n");
@@ -483,7 +485,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name),
"SMBus ALI15X3 adapter at %04x", ali15x3_smba);
- return i2c_add_adapter(&ali15x3_adapter);
+ ret = i2c_add_adapter(&ali15x3_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
+ return ret;
}
static void ali15x3_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index 143165300949..ef7370d3dbea 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -327,13 +327,11 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
amd_mp2_irq_isr, irq_flag, dev_name(&pci_dev->dev), privdata);
if (rc) {
pci_err(pci_dev, "Failure requesting irq %i: %d\n", privdata->dev_irq, rc);
- goto free_irq_vectors;
+ goto err_dma_mask;
}
return rc;
-free_irq_vectors:
- free_irq(privdata->dev_irq, privdata);
err_dma_mask:
pci_clear_master(pci_dev);
err_pci_enable:
@@ -376,7 +374,6 @@ static void amd_mp2_pci_remove(struct pci_dev *pci_dev)
pm_runtime_forbid(&pci_dev->dev);
pm_runtime_get_noresume(&pci_dev->dev);
- free_irq(privdata->dev_irq, privdata);
pci_clear_master(pci_dev);
amd_mp2_clear_reg(privdata);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index ee0d25b498cb..9e5d454d8318 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1723,8 +1723,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&i2c_imx->slave_lock);
- hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- i2c_imx->slave_timer.function = i2c_imx_slave_timeout;
+ hrtimer_setup(&i2c_imx->slave_timer, i2c_imx_slave_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
match = device_get_match_data(&pdev->dev);
if (match)
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 92faf03d64cf..f18c3e74b076 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1048,23 +1048,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes,
return 0;
}
-static irqreturn_t
-omap_i2c_isr(int irq, void *dev_id)
-{
- struct omap_i2c_dev *omap = dev_id;
- irqreturn_t ret = IRQ_HANDLED;
- u16 mask;
- u16 stat;
-
- stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
-
- if (stat & mask)
- ret = IRQ_WAKE_THREAD;
-
- return ret;
-}
-
static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
{
u16 bits;
@@ -1095,8 +1078,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
}
if (stat & OMAP_I2C_STAT_NACK) {
- err |= OMAP_I2C_STAT_NACK;
+ omap->cmd_err |= OMAP_I2C_STAT_NACK;
omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
+
+ if (!(stat & ~OMAP_I2C_STAT_NACK)) {
+ err = -EAGAIN;
+ break;
+ }
}
if (stat & OMAP_I2C_STAT_AL) {
@@ -1472,7 +1460,7 @@ omap_i2c_probe(struct platform_device *pdev)
IRQF_NO_SUSPEND, pdev->name, omap);
else
r = devm_request_threaded_irq(&pdev->dev, omap->irq,
- omap_i2c_isr, omap_i2c_isr_thread,
+ NULL, omap_i2c_isr_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
pdev->name, omap);
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 3505cf29cedd..a19c3d251804 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids);
static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (sis630_setup(dev)) {
dev_err(&dev->dev,
"SIS630 compatible bus not detected, "
@@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
"SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
- return i2c_add_adapter(&sis630_adapter);
+ ret = i2c_add_adapter(&sis630_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
+ return ret;
}
static void sis630_remove(struct pci_dev *dev)
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 0a3c37510079..a34af1ba09bd 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,3 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
+obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 0fdb1d1316c4..5687089e406a 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -58,6 +58,7 @@
#include <asm/spec-ctrl.h>
#include <asm/tsc.h>
#include <asm/fpu/api.h>
+#include <asm/smp.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -229,6 +230,15 @@ static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
return 0;
}
+static void intel_idle_enter_dead(struct cpuidle_device *dev, int index)
+{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+
+ mwait_play_dead(eax);
+}
+
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
@@ -1804,6 +1814,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
mark_tsc_unstable("TSC halts in idle");
state->enter = intel_idle;
+ state->enter_dead = intel_idle_enter_dead;
state->enter_s2idle = intel_idle_s2idle;
}
}
@@ -2153,6 +2164,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
!cpuidle_state_table[cstate].enter_s2idle)
break;
+ if (!cpuidle_state_table[cstate].enter_dead)
+ cpuidle_state_table[cstate].enter_dead = intel_idle_enter_dead;
+
/* If marked as unusable, skip this state. */
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 7dde5713973f..49560059f4b7 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -812,9 +812,7 @@ static int tsc2046_adc_probe(struct spi_device *spi)
spin_lock_init(&priv->state_lock);
priv->state = TSC2046_STATE_SHUTDOWN;
- hrtimer_init(&priv->trig_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- priv->trig_timer.function = tsc2046_adc_timer;
+ hrtimer_setup(&priv->trig_timer, tsc2046_adc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ret = devm_iio_trigger_register(dev, trig);
if (ret) {
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index 716c795d08fb..82c72baccb62 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -145,8 +145,8 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
trig_info->swt.trigger->ops = &iio_hrtimer_trigger_ops;
trig_info->swt.trigger->dev.groups = iio_hrtimer_attr_groups;
- hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
- trig_info->timer.function = iio_hrtimer_trig_handler;
+ hrtimer_setup(&trig_info->timer, iio_hrtimer_trig_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_HARD);
trig_info->sampling_frequency[0] = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency[0];
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 3721446c6ba4..502a79136d4d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -53,12 +53,6 @@
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
-#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
-#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
/* Number of MRs to reserve for PF, leaving remainder for VFs */
#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index a94c8c5387d9..4659a2f73364 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -2130,8 +2130,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
* memory for the function and all child VFs
*/
rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx,
- BNXT_RE_MAX_QPC_COUNT);
+ &rdev->qplib_ctx);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate RCFW Channel: %#x\n", rc);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 5336f74297f8..457eecb99f96 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1217,8 +1217,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
qp->path_mtu =
CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
}
- qp->modify_flags &=
- ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
/* Bono FW require the max_dest_rd_atomic to be >= 1 */
if (qp->max_dest_rd_atomic < 1)
qp->max_dest_rd_atomic = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 17e62f22683b..d23074383428 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -915,7 +915,6 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
@@ -924,8 +923,7 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz)
+ struct bnxt_qplib_ctx *ctx)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
@@ -969,12 +967,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
if (!rcfw->crsqe_tbl)
goto fail;
- /* Allocate one extra to hold the QP1 entries */
- rcfw->qp_tbl_size = qp_tbl_sz + 1;
- rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
- GFP_KERNEL);
- if (!rcfw->qp_tbl)
- goto fail;
spin_lock_init(&rcfw->tbl_lock);
rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 88814cb3aa74..ff873c5f1b25 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -262,8 +262,7 @@ static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz);
+ struct bnxt_qplib_ctx *ctx);
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
@@ -285,9 +284,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
+
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
{
/* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
- return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
+ return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
}
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 02922a0987ad..6cd05207ffed 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -871,6 +871,7 @@ int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
+ kfree(res->rcfw->qp_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
@@ -878,12 +879,20 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_dev_attr *dev_attr;
int rc;
res->netdev = netdev;
dev_attr = res->dattr;
+ /* Allocate one extra to hold the QP1 entries */
+ rcfw->qp_tbl_size = max_t(u32, BNXT_RE_MAX_QPC_COUNT + 1, dev_attr->max_qp);
+ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
+ GFP_KERNEL);
+ if (!rcfw->qp_tbl)
+ return -ENOMEM;
+
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 711990232de1..6a13927674b4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -49,6 +49,13 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_58818 0xd818
#define CHIP_NUM_57608 0x1760
+#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
+#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
@@ -600,4 +607,9 @@ static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
}
+static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
+{
+ return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4ccd4405355a..f231e886ad9d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -176,6 +176,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+ if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
+ attr->max_srq += le16_to_cpu(sb->max_srq_ext);
+
bnxt_qplib_query_version(rcfw, attr->fw_ver);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 0ee60fdc18b3..7eceb3e9f4ce 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2215,11 +2215,12 @@ struct creq_query_func_resp_sb {
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED 0x40UL
#define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
__le16 max_xp_qp_size;
__le16 create_qp_batch_size;
__le16 destroy_qp_batch_size;
- __le16 reserved16;
+ __le16 max_srq_ext;
__le64 reserved64;
};
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index cbac4a442d9e..d6fbd9c2b8b4 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -635,12 +635,11 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
spin_lock_init(&ppd->cca_timer_lock);
for (i = 0; i < OPA_MAX_SLS; i++) {
- hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
ppd->cca_timer[i].ppd = ppd;
ppd->cca_timer[i].sl = i;
ppd->cca_timer[i].ccti = 0;
- ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
+ hrtimer_setup(&ppd->cca_timer[i].hrtimer, cca_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 950c133d4220..6ee911f6885b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -175,8 +175,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
ida_destroy(&hr_dev->xrcd_ida.ida);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ida_destroy(&hr_dev->srq_table.srq_ida.ida);
+ xa_destroy(&hr_dev->srq_table.xa);
+ }
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 4106423a1b39..3a5c93c9fb3e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -537,5 +537,6 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
ida_destroy(&hr_dev->cq_table.bank[i].ida);
+ xa_destroy(&hr_dev->cq_table.array);
mutex_destroy(&hr_dev->cq_table.bank_mutex);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 605562122ecc..ca0798224e56 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1361,6 +1361,11 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
return ret;
}
+/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming
+ * the bt page size is not expanded by cal_best_bt_pg_sz()
+ */
+#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800
+
/* construct the base address table and link them by address hop config */
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
@@ -1369,6 +1374,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
{
const struct hns_roce_buf_region *r;
int ofs, end;
+ int loop;
int unit;
int ret;
int i;
@@ -1386,7 +1392,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
continue;
end = r->offset + r->count;
- for (ofs = r->offset; ofs < end; ofs += unit) {
+ for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+
ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
hem_list->mid_bt[i],
&hem_list->btm_bt);
@@ -1443,9 +1452,14 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
+ int loop = 1;
int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+ loop++;
+
if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index ae24c81c9812..cf89a8db4f64 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -183,7 +183,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_DEVICE_RC_RNR_NAK_GEN;
props->max_send_sge = hr_dev->caps.max_sq_sg;
props->max_recv_sge = hr_dev->caps.max_rq_sg;
- props->max_sge_rd = 1;
+ props->max_sge_rd = hr_dev->caps.max_sq_sg;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
props->max_mr = hr_dev->caps.num_mtpts;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9e2e76c59406..8901c142c1b6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -868,12 +868,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp *ucmd,
struct hns_roce_ib_create_qp_resp *resp)
{
+ bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
+ bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
- if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
+ if (has_sdb) {
ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
if (ret) {
ibdev_err(ibdev,
@@ -884,7 +886,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
}
- if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
+ if (has_rdb) {
ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
if (ret) {
ibdev_err(ibdev,
@@ -898,7 +900,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
return 0;
err_sdb:
- if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
+ if (has_sdb)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
@@ -1119,24 +1121,23 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibucontext);
hr_qp->config = uctx->config;
ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
- if (ret)
+ if (ret) {
ibdev_err(ibdev,
"failed to set user SQ size, ret = %d.\n",
ret);
+ return ret;
+ }
ret = set_congest_param(hr_dev, hr_qp, ucmd);
- if (ret)
- return ret;
} else {
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
+ default_congest_type(hr_dev, hr_qp);
ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
if (ret)
ibdev_err(ibdev,
"failed to set kernel SQ size, ret = %d.\n",
ret);
-
- default_congest_type(hr_dev, hr_qp);
}
return ret;
@@ -1219,7 +1220,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
min(udata->outlen, sizeof(resp)));
if (ret) {
ibdev_err(ibdev, "copy qp resp failed!\n");
- goto err_store;
+ goto err_flow_ctrl;
}
}
@@ -1602,6 +1603,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
xa_destroy(&hr_dev->qp_table.dip_xa);
+ xa_destroy(&hr_dev->qp_table_xa);
mutex_destroy(&hr_dev->qp_table.bank_mutex);
mutex_destroy(&hr_dev->qp_table.scc_mutex);
}
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 99036afb3aef..531a57f9ee7e 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -50,11 +50,12 @@ static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
return sport;
}
-static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+static int create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
struct rdma_ah_init_attr *init_attr)
{
struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
enum ib_gid_type gid_type;
+ int rate_val;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
@@ -67,8 +68,10 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.tclass = grh->traffic_class;
}
- ah->av.stat_rate_sl =
- (mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
+ rate_val = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr));
+ if (rate_val < 0)
+ return rate_val;
+ ah->av.stat_rate_sl = rate_val << 4;
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (init_attr->xmit_slave)
@@ -89,6 +92,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
}
+
+ return 0;
}
int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
@@ -121,8 +126,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
return err;
}
- create_ib_ah(dev, ah, init_attr);
- return 0;
+ return create_ib_ah(dev, ah, init_attr);
}
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e6203e26cc06..614009fb9632 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1107,9 +1107,8 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
}
/* initialize timers needed for rc qp */
timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
- hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- qp->s_rnr_timer.function = rvt_rc_rnr_retry;
+ hrtimer_setup(&qp->s_rnr_timer, rvt_rc_rnr_retry, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Driver needs to set up it's private QP structure and do any
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 1ba4a0c8726a..e27478fe9456 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -38,10 +38,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
}
/* initialize rxe device parameters */
-static void rxe_init_device_param(struct rxe_dev *rxe)
+static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
{
- struct net_device *ndev;
-
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
rxe->attr.vendor_id = RXE_VENDOR_ID;
@@ -74,15 +72,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
- ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
- if (!ndev)
- return;
-
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
ndev->dev_addr);
- dev_put(ndev);
-
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
@@ -115,18 +107,13 @@ static void rxe_init_port_param(struct rxe_port *port)
/* initialize port state, note IB convention that HCA ports are always
* numbered from 1
*/
-static void rxe_init_ports(struct rxe_dev *rxe)
+static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev)
{
struct rxe_port *port = &rxe->port;
- struct net_device *ndev;
rxe_init_port_param(port);
- ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
- if (!ndev)
- return;
addrconf_addr_eui48((unsigned char *)&port->port_guid,
ndev->dev_addr);
- dev_put(ndev);
spin_lock_init(&port->port_lock);
}
@@ -144,12 +131,12 @@ static void rxe_init_pools(struct rxe_dev *rxe)
}
/* initialize rxe device state */
-static void rxe_init(struct rxe_dev *rxe)
+static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev)
{
/* init default device parameters */
- rxe_init_device_param(rxe);
+ rxe_init_device_param(rxe, ndev);
- rxe_init_ports(rxe);
+ rxe_init_ports(rxe, ndev);
rxe_init_pools(rxe);
/* init pending mmap list */
@@ -184,7 +171,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
struct net_device *ndev)
{
- rxe_init(rxe);
+ rxe_init(rxe, ndev);
rxe_set_mtu(rxe, mtu);
return rxe_register_device(rxe, ibdev_name, ndev);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 8fe2a51df649..c33e6f33265b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -140,6 +140,7 @@ static const struct xpad_device {
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
@@ -177,6 +178,7 @@ static const struct xpad_device {
{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
{ 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX },
{ 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4503, "Mad Catz Racing Wheel", 0, XTYPE_XBOXONE },
{ 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
{ 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
@@ -238,6 +240,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
@@ -276,12 +279,15 @@ static const struct xpad_device {
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
@@ -306,7 +312,7 @@ static const struct xpad_device {
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
- { 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 },
+ { 0x1a86, 0xe310, "Legion Go S", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -343,6 +349,7 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
@@ -366,6 +373,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x581a, "ThrustMaster XB1 Classic Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
@@ -374,10 +382,15 @@ static const struct xpad_device {
{ 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
+ { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
@@ -385,11 +398,16 @@ static const struct xpad_device {
{ 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0603, "Nacon Pro Compact controller for Xbox", 0, XTYPE_XBOXONE },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0614, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
{ 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
+ { 0x3285, 0x0662, "Nacon Revolution5 Pro", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
+ { 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
+ { 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -488,6 +506,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x044f), /* Thrustmaster Xbox One controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */
@@ -519,8 +538,9 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
- XPAD_XBOX360_VENDOR(0x1a86), /* QH Electronics */
+ XPAD_XBOX360_VENDOR(0x1a86), /* Nanjing Qinheng Microelectronics (WCH) */
XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
+ XPAD_XBOX360_VENDOR(0x1ee9), /* ZOTAC Technology Limited */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2345), /* Machenike Controllers */
@@ -528,17 +548,20 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
- XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOX360_VENDOR(0x2993), /* TECNO Mobile */
XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */
- XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */
- XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
- XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke Xbox One pad */
- XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
+ XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Controllers */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Controllers */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e95), /* SCUF Gaming Controller */
XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
@@ -691,7 +714,9 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
+ XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 22022d11470d..80b917944b51 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -100,11 +100,11 @@ enum iqs7222_reg_key_id {
enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_STAT,
- IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_CYCLE,
IQS7222_REG_GRP_GLBL,
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
+ IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_SLDR,
IQS7222_REG_GRP_TPAD,
IQS7222_REG_GRP_GPIO,
@@ -286,6 +286,7 @@ static const struct iqs7222_event_desc iqs7222_tp_events[] = {
struct iqs7222_reg_grp_desc {
u16 base;
+ u16 val_len;
int num_row;
int num_col;
};
@@ -342,6 +343,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAC00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -400,6 +402,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAC00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -454,6 +457,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xC400,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -496,6 +500,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xC400,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -543,6 +548,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAA00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -600,6 +606,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAA00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -656,6 +663,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -712,6 +720,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -768,6 +777,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -1604,7 +1614,7 @@ static int iqs7222_force_comms(struct iqs7222_private *iqs7222)
}
static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
- u16 reg, void *val, u16 num_val)
+ u16 reg, void *val, u16 val_len)
{
u8 reg_buf[sizeof(__be16)];
int ret, i;
@@ -1619,7 +1629,7 @@ static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
{
.addr = client->addr,
.flags = I2C_M_RD,
- .len = num_val * sizeof(__le16),
+ .len = val_len,
.buf = (u8 *)val,
},
};
@@ -1675,7 +1685,7 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
__le16 val_buf;
int error;
- error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1);
+ error = iqs7222_read_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
if (error)
return error;
@@ -1685,10 +1695,9 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
}
static int iqs7222_write_burst(struct iqs7222_private *iqs7222,
- u16 reg, const void *val, u16 num_val)
+ u16 reg, const void *val, u16 val_len)
{
int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8);
- int val_len = num_val * sizeof(__le16);
int msg_len = reg_len + val_len;
int ret, i;
struct i2c_client *client = iqs7222->client;
@@ -1747,7 +1756,7 @@ static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val)
{
__le16 val_buf = cpu_to_le16(val);
- return iqs7222_write_burst(iqs7222, reg, &val_buf, 1);
+ return iqs7222_write_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
}
static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
@@ -1831,30 +1840,14 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
/*
* Acknowledge reset before writing any registers in case the device
- * suffers a spurious reset during initialization. Because this step
- * may change the reserved fields of the second filter beta register,
- * its cache must be updated.
- *
- * Writing the second filter beta register, in turn, may clobber the
- * system status register. As such, the filter beta register pair is
- * written first to protect against this hazard.
+ * suffers a spurious reset during initialization.
*/
if (dir == WRITE) {
- u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
- u16 filt_setup;
-
error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
iqs7222->sys_setup[0] |
IQS7222_SYS_SETUP_ACK_RESET);
if (error)
return error;
-
- error = iqs7222_read_word(iqs7222, reg, &filt_setup);
- if (error)
- return error;
-
- iqs7222->filt_setup[1] &= GENMASK(7, 0);
- iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
}
/*
@@ -1883,6 +1876,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
int num_col = dev_desc->reg_grps[i].num_col;
u16 reg = dev_desc->reg_grps[i].base;
__le16 *val_buf;
+ u16 val_len = dev_desc->reg_grps[i].val_len ? : num_col * sizeof(*val_buf);
u16 *val;
if (!num_col)
@@ -1900,7 +1894,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
switch (dir) {
case READ:
error = iqs7222_read_burst(iqs7222, reg,
- val_buf, num_col);
+ val_buf, val_len);
for (k = 0; k < num_col; k++)
val[k] = le16_to_cpu(val_buf[k]);
break;
@@ -1909,7 +1903,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
for (k = 0; k < num_col; k++)
val_buf[k] = cpu_to_le16(val[k]);
error = iqs7222_write_burst(iqs7222, reg,
- val_buf, num_col);
+ val_buf, val_len);
break;
default:
@@ -1962,7 +1956,7 @@ static int iqs7222_dev_info(struct iqs7222_private *iqs7222)
int error, i;
error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id,
- ARRAY_SIZE(dev_id));
+ sizeof(dev_id));
if (error)
return error;
@@ -2915,7 +2909,7 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
__le16 status[IQS7222_MAX_COLS_STAT];
error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status,
- num_stat);
+ num_stat * sizeof(*status));
if (error)
return error;
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 127cfdc8668a..6ed9fc34948c 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1080,16 +1080,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/* Mivvy M310 */
@@ -1159,9 +1157,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
/*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
- * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
- * none of them have an external PS/2 port so this can safely be set for
- * all of them.
+ * after suspend fixable with the forcenorestore quirk.
* Clevo barebones come with board_vendor and/or system_vendor set to
* either the very generic string "Notebook" and/or a different value
* for each individual reseller. The only somewhat universal way to
@@ -1171,29 +1167,25 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
@@ -1205,29 +1197,19 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
- /*
- * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
- * the keyboard very laggy for ~5 seconds after boot and
- * sometimes also after resume.
- * However both are required for the keyboard to not fail
- * completely sometimes after boot or resume.
- */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
/*
* At least one modern Clevo barebone has the touchpad connected both
@@ -1243,17 +1225,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
- SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
- SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX |
+ SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
- SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
- SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX |
+ SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
@@ -1265,8 +1245,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "P640RE"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1277,16 +1262,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1297,8 +1280,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1309,8 +1291,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1321,8 +1302,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1333,22 +1313,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PB51RF"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PB71RD"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PC70DR"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PCX0DX_GN20"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
{
@@ -1361,15 +1362,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 066dc04003fa..67264c5b49cb 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1021,7 +1021,7 @@ static int ads7846_setup_pendown(struct spi_device *spi,
if (pdata->get_pendown_state) {
ts->get_pendown_state = pdata->get_pendown_state;
} else {
- ts->gpio_pendown = gpiod_get(&spi->dev, "pendown", GPIOD_IN);
+ ts->gpio_pendown = devm_gpiod_get(&spi->dev, "pendown", GPIOD_IN);
if (IS_ERR(ts->gpio_pendown)) {
dev_err(&spi->dev, "failed to request pendown GPIO\n");
return PTR_ERR(ts->gpio_pendown);
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index 3fc03cf0ca23..7f8cfdd106fa 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -165,7 +165,7 @@ struct goodix_berlin_core {
struct device *dev;
struct regmap *regmap;
struct regulator *avdd;
- struct regulator *iovdd;
+ struct regulator *vddio;
struct gpio_desc *reset_gpio;
struct touchscreen_properties props;
struct goodix_berlin_fw_version fw_version;
@@ -248,22 +248,22 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
{
int error;
- error = regulator_enable(cd->iovdd);
+ error = regulator_enable(cd->vddio);
if (error) {
- dev_err(cd->dev, "Failed to enable iovdd: %d\n", error);
+ dev_err(cd->dev, "Failed to enable vddio: %d\n", error);
return error;
}
- /* Vendor waits 3ms for IOVDD to settle */
+ /* Vendor waits 3ms for VDDIO to settle */
usleep_range(3000, 3100);
error = regulator_enable(cd->avdd);
if (error) {
dev_err(cd->dev, "Failed to enable avdd: %d\n", error);
- goto err_iovdd_disable;
+ goto err_vddio_disable;
}
- /* Vendor waits 15ms for IOVDD to settle */
+ /* Vendor waits 15ms for AVDD to settle */
usleep_range(15000, 15100);
gpiod_set_value_cansleep(cd->reset_gpio, 0);
@@ -283,8 +283,8 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
err_dev_reset:
gpiod_set_value_cansleep(cd->reset_gpio, 1);
regulator_disable(cd->avdd);
-err_iovdd_disable:
- regulator_disable(cd->iovdd);
+err_vddio_disable:
+ regulator_disable(cd->vddio);
return error;
}
@@ -292,7 +292,7 @@ static void goodix_berlin_power_off(struct goodix_berlin_core *cd)
{
gpiod_set_value_cansleep(cd->reset_gpio, 1);
regulator_disable(cd->avdd);
- regulator_disable(cd->iovdd);
+ regulator_disable(cd->vddio);
}
static int goodix_berlin_read_version(struct goodix_berlin_core *cd)
@@ -744,10 +744,10 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
return dev_err_probe(dev, PTR_ERR(cd->avdd),
"Failed to request avdd regulator\n");
- cd->iovdd = devm_regulator_get(dev, "iovdd");
- if (IS_ERR(cd->iovdd))
- return dev_err_probe(dev, PTR_ERR(cd->iovdd),
- "Failed to request iovdd regulator\n");
+ cd->vddio = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(cd->vddio))
+ return dev_err_probe(dev, PTR_ERR(cd->vddio),
+ "Failed to request vddio regulator\n");
error = goodix_berlin_power_on(cd);
if (error) {
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
index abeae9102323..3c8bbe284b73 100644
--- a/drivers/input/touchscreen/imagis.c
+++ b/drivers/input/touchscreen/imagis.c
@@ -22,6 +22,7 @@
#define IST3032C_WHOAMI 0x32c
#define IST3038C_WHOAMI 0x38c
+#define IST3038H_WHOAMI 0x38d
#define IST3038B_REG_CHIPID 0x30
#define IST3038B_WHOAMI 0x30380b
@@ -428,11 +429,19 @@ static const struct imagis_properties imagis_3038c_data = {
.protocol_b = true,
};
+static const struct imagis_properties imagis_3038h_data = {
+ .interrupt_msg_cmd = IST3038C_REG_INTR_MESSAGE,
+ .touch_coord_cmd = IST3038C_REG_TOUCH_COORD,
+ .whoami_cmd = IST3038C_REG_CHIPID,
+ .whoami_val = IST3038H_WHOAMI,
+};
+
static const struct of_device_id imagis_of_match[] = {
{ .compatible = "imagis,ist3032c", .data = &imagis_3032c_data },
{ .compatible = "imagis,ist3038", .data = &imagis_3038_data },
{ .compatible = "imagis,ist3038b", .data = &imagis_3038b_data },
{ .compatible = "imagis,ist3038c", .data = &imagis_3038c_data },
+ { .compatible = "imagis,ist3038h", .data = &imagis_3038h_data },
{ },
};
MODULE_DEVICE_TABLE(of, imagis_of_match);
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 27941245e962..88d376090e6e 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1153,11 +1153,13 @@ static const struct i2c_device_id wdt87xx_dev_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wdt87xx_dev_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id wdt87xx_acpi_id[] = {
{ "WDHT0001", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id);
+#endif
static struct i2c_driver wdt87xx_driver = {
.probe = wdt87xx_ts_probe,
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 2a86aa5d54c6..761ab647f372 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -130,7 +130,7 @@ static int __init hyperv_prepare_irq_remapping(void)
x86_init.hyper.msi_ext_dest_id())
return -ENODEV;
- if (hv_root_partition) {
+ if (hv_root_partition()) {
name = "HYPERV-ROOT-IR";
ops = &hyperv_root_ir_domain_ops;
} else {
@@ -151,7 +151,7 @@ static int __init hyperv_prepare_irq_remapping(void)
return -ENOMEM;
}
- if (hv_root_partition)
+ if (hv_root_partition())
return 0; /* The rest is only relevant to guests */
/*
@@ -217,7 +217,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
status = hv_unmap_ioapic_interrupt(ioapic_id, &entry);
if (status != HV_STATUS_SUCCESS)
- pr_debug("%s: unexpected unmap status %lld\n", __func__, status);
+ hv_status_debug(status, "failed to unmap\n");
data->entry.ioapic_rte.as_uint64 = 0;
data->entry.source = 0; /* Invalid source */
@@ -228,7 +228,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
vector, &entry);
if (status != HV_STATUS_SUCCESS) {
- pr_err("%s: map hypercall failed, status %lld\n", __func__, status);
+ hv_status_err(status, "map failed\n");
return;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c11b9965c4ad..083fa9578bb7 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -590,13 +590,7 @@ config RISCV_IMSIC
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_MSI_IRQ
-
-config RISCV_IMSIC_PCI
- bool
- depends on RISCV_IMSIC
- depends on PCI
- depends on PCI_MSI
- default RISCV_IMSIC
+ select IRQ_MSI_LIB
config SIFIVE_PLIC
bool
@@ -752,6 +746,18 @@ config MCHP_EIC
help
Support for Microchip External Interrupt Controller.
+config SOPHGO_SG2042_MSI
+ bool "Sophgo SG2042 MSI Controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on PCI
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_MSI_LIB
+ select PCI_MSI
+ help
+ Support for the Sophgo SG2042 MSI Controller.
+ This on-chip interrupt controller enables MSI sources to be
+ routed to the primary PLIC controller on SoC.
+
config SUNPLUS_SP7021_INTC
bool "Sunplus SP7021 interrupt controller" if COMPILE_TEST
default SOC_SP7021
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 25e9ad29b8c4..dd60e597491d 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -128,4 +128,5 @@ obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
+obj-$(CONFIG_SOPHGO_SG2042_MSI) += irq-sg2042-msi.o
obj-$(CONFIG_SUNPLUS_SP7021_INTC) += irq-sp7021-intc.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 2b1684c60e3c..974dc088c853 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -409,15 +409,15 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
* in use, and be cleared when coming back from the handler.
*/
if (is_kernel_in_hyp_mode() &&
- (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
+ (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_VGIC_MI));
- if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
+ if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2))) {
pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
- sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
}
}
@@ -841,7 +841,7 @@ static int aic_init_cpu(unsigned int cpu)
VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
/* vGIC maintenance IRQ */
- sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
/* PMC FIQ */
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index f4f8e9fadbbf..d7948c55f542 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -154,24 +153,20 @@ static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-static int __init
-davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
- struct device_node *node)
+static int __init davinci_cp_intc_do_init(struct resource *res, unsigned int num_irqs,
+ struct device_node *node)
{
- unsigned int num_regs = BITS_TO_LONGS(config->num_irqs);
+ unsigned int num_regs = BITS_TO_LONGS(num_irqs);
int offset, irq_base;
void __iomem *req;
- req = request_mem_region(config->reg.start,
- resource_size(&config->reg),
- "davinci-cp-intc");
+ req = request_mem_region(res->start, resource_size(res), "davinci-cp-intc");
if (!req) {
pr_err("%s: register range busy\n", __func__);
return -EBUSY;
}
- davinci_cp_intc_base = ioremap(config->reg.start,
- resource_size(&config->reg));
+ davinci_cp_intc_base = ioremap(res->start, resource_size(res));
if (!davinci_cp_intc_base) {
pr_err("%s: unable to ioremap register range\n", __func__);
return -EINVAL;
@@ -184,8 +179,7 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
/* Disable system interrupts */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(~0,
- DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
+ davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
/* Set to normal mode, no nesting, no priority hold */
davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL);
@@ -193,28 +187,25 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
/* Clear system interrupt status */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(~0,
- DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
+ davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
/* Enable nIRQ (what about nFIQ?) */
davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+ /* 4 channels per register */
+ num_regs = (num_irqs + 3) >> 2;
/* Default all priorities to channel 7. */
- num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(0x07070707,
- DAVINCI_CP_INTC_CHAN_MAP(offset));
+ davinci_cp_intc_write(0x07070707, DAVINCI_CP_INTC_CHAN_MAP(offset));
- irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
if (irq_base < 0) {
- pr_err("%s: unable to allocate interrupt descriptors: %d\n",
- __func__, irq_base);
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n", __func__, irq_base);
return irq_base;
}
- davinci_cp_intc_irq_domain = irq_domain_add_legacy(
- node, config->num_irqs, irq_base, 0,
- &davinci_cp_intc_irq_domain_ops, NULL);
+ davinci_cp_intc_irq_domain = irq_domain_add_legacy(node, num_irqs, irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops, NULL);
if (!davinci_cp_intc_irq_domain) {
pr_err("%s: unable to create an interrupt domain\n", __func__);
@@ -229,31 +220,25 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
return 0;
}
-int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config)
-{
- return davinci_cp_intc_do_init(config, NULL);
-}
-
static int __init davinci_cp_intc_of_init(struct device_node *node,
struct device_node *parent)
{
- struct davinci_cp_intc_config config = { };
+ unsigned int num_irqs;
+ struct resource res;
int ret;
- ret = of_address_to_resource(node, 0, &config.reg);
+ ret = of_address_to_resource(node, 0, &res);
if (ret) {
- pr_err("%s: unable to get the register range from device-tree\n",
- __func__);
+ pr_err("%s: unable to get the register range from device-tree\n", __func__);
return ret;
}
- ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs);
+ ret = of_property_read_u32(node, "ti,intc-size", &num_irqs);
if (ret) {
- pr_err("%s: unable to read the 'ti,intc-size' property\n",
- __func__);
+ pr_err("%s: unable to read the 'ti,intc-size' property\n", __func__);
return ret;
}
- return davinci_cp_intc_do_init(&config, node);
+ return davinci_cp_intc_do_init(&res, num_irqs, node);
}
IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index be35c5349986..1e3476c335ca 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -255,6 +255,7 @@ static void __init gicv2m_teardown(void)
static struct msi_parent_ops gicv2m_msi_parent_ops = {
.supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
.required_flags = GICV2M_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "GICv2m-",
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
index e150365fbe89..bdb04c808148 100644
--- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
@@ -203,6 +203,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.supported_flags = ITS_MSI_FLAGS_SUPPORTED,
.required_flags = ITS_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "ITS-",
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 8c3ec5734f1e..f30ed281882f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -205,13 +205,15 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static gfp_t gfp_flags_quirk;
+
static struct page *its_alloc_pages_node(int node, gfp_t gfp,
unsigned int order)
{
struct page *page;
int ret = 0;
- page = alloc_pages_node(node, gfp, order);
+ page = alloc_pages_node(node, gfp | gfp_flags_quirk, order);
if (!page)
return NULL;
@@ -4887,6 +4889,17 @@ static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
return true;
}
+static bool __maybe_unused its_enable_rk3568002(void *data)
+{
+ if (!of_machine_is_compatible("rockchip,rk3566") &&
+ !of_machine_is_compatible("rockchip,rk3568"))
+ return false;
+
+ gfp_flags_quirk |= GFP_DMA32;
+
+ return true;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -4954,6 +4967,14 @@ static const struct gic_quirk its_quirks[] = {
.property = "dma-noncoherent",
.init = its_set_non_coherent,
},
+#ifdef CONFIG_ROCKCHIP_ERRATUM_3568002
+ {
+ .desc = "ITS: Rockchip erratum RK3568002",
+ .iidr = 0x0201743b,
+ .mask = 0xffffffff,
+ .init = its_enable_rk3568002,
+ },
+#endif
{
}
};
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index 3fe870f8ee17..3e1d8a1cda5e 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -201,6 +201,7 @@ static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
.supported_flags = MBI_MSI_FLAGS_SUPPORTED,
.required_flags = MBI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "MBI-",
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index b0e9788c0045..afbfcce3b1e3 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -24,7 +24,7 @@
#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
-#define CHAN_MAX_OUTPUT_INT 0x8
+#define CHAN_MAX_OUTPUT_INT 0xF
struct irqsteer_data {
void __iomem *regs;
@@ -228,10 +228,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
for (i = 0; i < data->irq_count; i++) {
data->irq[i] = irq_of_parse_and_map(np, i);
- if (!data->irq[i]) {
- ret = -EINVAL;
- goto out;
- }
+ if (!data->irq[i])
+ break;
irq_set_chained_handler_and_data(data->irq[i],
imx_irqsteer_irq_handler,
@@ -254,9 +252,13 @@ static void imx_irqsteer_remove(struct platform_device *pdev)
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < irqsteer_data->irq_count; i++)
+ for (i = 0; i < irqsteer_data->irq_count; i++) {
+ if (!irqsteer_data->irq[i])
+ break;
+
irq_set_chained_handler_and_data(irqsteer_data->irq[i],
NULL, NULL);
+ }
irq_domain_remove(irqsteer_data->domain);
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 4342a21de1eb..69aacdfc8bef 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -214,6 +214,7 @@ static void imx_mu_msi_irq_handler(struct irq_desc *desc)
static const struct msi_parent_ops imx_mu_msi_parent_ops = {
.supported_flags = IMX_MU_MSI_FLAGS_SUPPORTED,
.required_flags = IMX_MU_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "MU-MSI-",
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index bd337ecddb40..9c62108b3ad5 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -146,6 +146,7 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = {
static struct msi_parent_ops pch_msi_parent_ops = {
.required_flags = PCH_MSI_FLAGS_REQUIRED,
.supported_flags = PCH_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PCI_MSI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.prefix = "PCH-",
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index d8e29fc0d406..51464c6257f3 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -28,6 +28,7 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct msi_domain_info *info)
{
const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+ struct irq_chip *chip = info->chip;
u32 required_flags;
/* Parent ops available? */
@@ -92,10 +93,10 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
info->flags |= required_flags;
/* Chip updates for all child bus types */
- if (!info->chip->irq_eoi)
- info->chip->irq_eoi = irq_chip_eoi_parent;
- if (!info->chip->irq_ack)
- info->chip->irq_ack = irq_chip_ack_parent;
+ if (!chip->irq_eoi && (pops->chip_flags & MSI_CHIP_FLAG_SET_EOI))
+ chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_ack && (pops->chip_flags & MSI_CHIP_FLAG_SET_ACK))
+ chip->irq_ack = irq_chip_ack_parent;
/*
* The device MSI domain can never have a set affinity callback. It
@@ -105,7 +106,7 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* device MSI domain aside of mask/unmask which is provided e.g. by
* PCI/MSI device domains.
*/
- info->chip->irq_set_affinity = msi_domain_set_affinity;
+ chip->irq_set_affinity = msi_domain_set_affinity;
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index 2b6183919ea4..d67f93f6d750 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -161,6 +161,7 @@ static const struct irq_domain_ops gicp_domain_ops = {
static const struct msi_parent_ops gicp_msi_parent_ops = {
.supported_flags = GICP_MSI_FLAGS_SUPPORTED,
.required_flags = GICP_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "GICP-",
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index ff19bfd258dc..28f7e81df94f 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -157,6 +157,7 @@ static const struct irq_domain_ops odmi_domain_ops = {
static const struct msi_parent_ops odmi_msi_parent_ops = {
.supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
.required_flags = ODMI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "ODMI-",
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 065166ab5dbc..ebd4a9014e8d 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -356,6 +356,7 @@ static void mvebu_sei_reset(struct mvebu_sei *sei)
static const struct msi_parent_ops sei_msi_parent_ops = {
.supported_flags = SEI_MSI_FLAGS_SUPPORTED,
.required_flags = SEI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PLATFORM_MSI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.prefix = "SEI-",
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 99e27e01b0b1..6a2e41f02446 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -541,43 +541,36 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
return -ENODEV;
parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- dev_err(&pdev->dev, "cannot find parent domain\n");
- return -ENODEV;
- }
+ if (!parent_domain)
+ return dev_err_probe(dev, -ENODEV, "cannot find parent domain\n");
- rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
+ rzg2l_irqc_data = devm_kzalloc(dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
if (!rzg2l_irqc_data)
return -ENOMEM;
rzg2l_irqc_data->irqchip = irq_chip;
- rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ rzg2l_irqc_data->base = devm_of_iomap(dev, dev->of_node, 0, NULL);
if (IS_ERR(rzg2l_irqc_data->base))
return PTR_ERR(rzg2l_irqc_data->base);
ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
- if (ret) {
- dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- return ret;
- }
-
- resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(resetn))
- return PTR_ERR(resetn);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot parse interrupts: %d\n", ret);
- ret = reset_control_deassert(resetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- return ret;
+ resetn = devm_reset_control_get_exclusive_deasserted(dev, NULL);
+ if (IS_ERR(resetn)) {
+ return dev_err_probe(dev, PTR_ERR(resetn),
+ "failed to acquire deasserted reset: %d\n", ret);
}
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
- goto pm_disable;
- }
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "devm_pm_runtime_enable failed: %d\n", ret);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "pm_runtime_resume_and_get failed: %d\n", ret);
raw_spin_lock_init(&rzg2l_irqc_data->lock);
@@ -585,9 +578,8 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
node, &rzg2l_irqc_domain_ops,
rzg2l_irqc_data);
if (!irq_domain) {
- dev_err(&pdev->dev, "failed to add irq domain\n");
- ret = -ENOMEM;
- goto pm_put;
+ pm_runtime_put(dev);
+ return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
}
register_syscore_ops(&rzg2l_irqc_syscore_ops);
@@ -604,13 +596,6 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
dev = NULL;
return 0;
-
-pm_put:
- pm_runtime_put(&pdev->dev);
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(resetn);
- return ret;
}
static int __init rzg2l_irqc_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index fe2d29e91026..3d5b5fdf9bde 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -64,11 +64,18 @@
#define ICU_TINT_LEVEL_HIGH 2
#define ICU_TINT_LEVEL_LOW 3
-#define ICU_TSSR_K(tint_nr) ((tint_nr) / 4)
-#define ICU_TSSR_TSSEL_N(tint_nr) ((tint_nr) % 4)
-#define ICU_TSSR_TSSEL_PREP(tssel, n) ((tssel) << ((n) * 8))
-#define ICU_TSSR_TSSEL_MASK(n) ICU_TSSR_TSSEL_PREP(0x7F, n)
-#define ICU_TSSR_TIEN(n) (BIT(7) << ((n) * 8))
+#define ICU_TSSR_TSSEL_PREP(tssel, n, field_width) ((tssel) << ((n) * (field_width)))
+#define ICU_TSSR_TSSEL_MASK(n, field_width) \
+({\
+ typeof(field_width) (_field_width) = (field_width); \
+ ICU_TSSR_TSSEL_PREP((GENMASK(((_field_width) - 2), 0)), (n), _field_width); \
+})
+
+#define ICU_TSSR_TIEN(n, field_width) \
+({\
+ typeof(field_width) (_field_width) = (field_width); \
+ BIT((_field_width) - 1) << ((n) * (_field_width)); \
+})
#define ICU_TITSR_K(tint_nr) ((tint_nr) / 16)
#define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16)
@@ -78,20 +85,36 @@
#define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
#define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
-#define ICU_PB5_TINT 0x55
+#define ICU_RZG3E_TINT_OFFSET 0x800
+#define ICU_RZG3E_TSSEL_MAX_VAL 0x8c
+#define ICU_RZV2H_TSSEL_MAX_VAL 0x55
+
+/**
+ * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
+ * @tssel_lut: TINT lookup table
+ * @t_offs: TINT offset
+ * @max_tssel: TSSEL max value
+ * @field_width: TSSR field width
+ */
+struct rzv2h_hw_info {
+ const u8 *tssel_lut;
+ u16 t_offs;
+ u8 max_tssel;
+ u8 field_width;
+};
/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
- * @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
+ * @info: Pointer to struct rzv2h_hw_info
*/
struct rzv2h_icu_priv {
void __iomem *base;
- const struct irq_chip *irqchip;
struct irq_fwspec fwspec[ICU_NUM_IRQ];
raw_spinlock_t lock;
+ const struct rzv2h_hw_info *info;
};
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
@@ -111,7 +134,7 @@ static void rzv2h_icu_eoi(struct irq_data *d)
tintirq_nr = hw_irq - ICU_TINT_START;
bit = BIT(tintirq_nr);
if (!irqd_is_level_type(d))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
} else if (hw_irq >= ICU_IRQ_START) {
tintirq_nr = hw_irq - ICU_IRQ_START;
bit = BIT(tintirq_nr);
@@ -130,21 +153,23 @@ static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
unsigned int hw_irq = irqd_to_hwirq(d);
u32 tint_nr, tssel_n, k, tssr;
+ u8 nr_tint;
if (hw_irq < ICU_TINT_START)
return;
tint_nr = hw_irq - ICU_TINT_START;
- k = ICU_TSSR_K(tint_nr);
- tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+ nr_tint = 32 / priv->info->field_width;
+ k = tint_nr / nr_tint;
+ tssel_n = tint_nr % nr_tint;
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(k));
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(k));
if (enable)
- tssr |= ICU_TSSR_TIEN(tssel_n);
+ tssr |= ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
else
- tssr &= ~ICU_TSSR_TIEN(tssel_n);
- writel_relaxed(tssr, priv->base + ICU_TSSR(k));
+ tssr &= ~ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k));
}
static void rzv2h_icu_irq_disable(struct irq_data *d)
@@ -247,8 +272,8 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
u32 bit = BIT(tint_nr);
int k = tint_nr / 16;
- tsctr = readl_relaxed(priv->base + ICU_TSCTR);
- titsr = readl_relaxed(priv->base + ICU_TITSR(k));
+ tsctr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSCTR);
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(k));
titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
/*
@@ -257,7 +282,7 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
*/
if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
(titsel == ICU_TINT_EDGE_FALLING)))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
}
static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
@@ -268,6 +293,7 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
unsigned int hwirq;
u32 tint, sense;
int tint_nr;
+ u8 nr_tint;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_LOW:
@@ -290,39 +316,42 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
+ priv = irq_data_to_priv(d);
tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
- if (tint > ICU_PB5_TINT)
+ if (tint > priv->info->max_tssel)
return -EINVAL;
- priv = irq_data_to_priv(d);
- hwirq = irqd_to_hwirq(d);
+ if (priv->info->tssel_lut)
+ tint = priv->info->tssel_lut[tint];
+ hwirq = irqd_to_hwirq(d);
tint_nr = hwirq - ICU_TINT_START;
- tssr_k = ICU_TSSR_K(tint_nr);
- tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+ nr_tint = 32 / priv->info->field_width;
+ tssr_k = tint_nr / nr_tint;
+ tssel_n = tint_nr % nr_tint;
+ tien = ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
titsr_k = ICU_TITSR_K(tint_nr);
titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
- tien = ICU_TSSR_TIEN(titsel_n);
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k));
- tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien);
- tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n);
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
+ tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width) | tien);
+ tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n, priv->info->field_width);
- writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
- titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k));
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
- writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k));
+ writel_relaxed(titsr, priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
rzv2h_clear_tint_int(priv, hwirq);
- writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr | tien, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
return 0;
}
@@ -390,7 +419,7 @@ static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigne
if (hwirq > (ICU_NUM_IRQ - 1))
return -EINVAL;
- ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &rzv2h_icu_chip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
@@ -421,7 +450,13 @@ static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device
return 0;
}
-static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+static void rzv2h_icu_put_device(void *data)
+{
+ put_device(data);
+}
+
+static int rzv2h_icu_init_common(struct device_node *node, struct device_node *parent,
+ const struct rzv2h_hw_info *hw_info)
{
struct irq_domain *irq_domain, *parent_domain;
struct rzv2h_icu_priv *rzv2h_icu_data;
@@ -433,50 +468,48 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
if (!pdev)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2h_icu_put_device,
+ &pdev->dev);
+ if (ret < 0)
+ return ret;
+
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
- ret = -ENODEV;
- goto put_dev;
+ return -ENODEV;
}
rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
- if (!rzv2h_icu_data) {
- ret = -ENOMEM;
- goto put_dev;
- }
-
- rzv2h_icu_data->irqchip = &rzv2h_icu_chip;
+ if (!rzv2h_icu_data)
+ return -ENOMEM;
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
- if (IS_ERR(rzv2h_icu_data->base)) {
- ret = PTR_ERR(rzv2h_icu_data->base);
- goto put_dev;
- }
+ if (IS_ERR(rzv2h_icu_data->base))
+ return PTR_ERR(rzv2h_icu_data->base);
ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- goto put_dev;
+ return ret;
}
- resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ resetn = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
if (IS_ERR(resetn)) {
ret = PTR_ERR(resetn);
- goto put_dev;
+ dev_err(&pdev->dev, "failed to acquire deasserted reset: %d\n", ret);
+ return ret;
}
- ret = reset_control_deassert(resetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- goto put_dev;
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "devm_pm_runtime_enable failed, %d\n", ret);
+ return ret;
}
- pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
- goto pm_disable;
+ return ret;
}
raw_spin_lock_init(&rzv2h_icu_data->lock);
@@ -489,6 +522,8 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
goto pm_put;
}
+ rzv2h_icu_data->info = hw_info;
+
/*
* coccicheck complains about a missing put_device call before returning, but it's a false
* positive. We still need &pdev->dev after successfully returning from this function.
@@ -497,16 +532,61 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
pm_put:
pm_runtime_put(&pdev->dev);
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(resetn);
-put_dev:
- put_device(&pdev->dev);
return ret;
}
+/* Mapping based on port index on Table 4.2-6 and TSSEL bits on Table 4.6-4 */
+static const u8 rzg3e_tssel_lut[] = {
+ 81, 82, 83, 84, 85, 86, 87, 88, /* P00-P07 */
+ 89, 90, 91, 92, 93, 94, 95, 96, /* P10-P17 */
+ 111, 112, /* P20-P21 */
+ 97, 98, 99, 100, 101, 102, 103, 104, /* P30-P37 */
+ 105, 106, 107, 108, 109, 110, /* P40-P45 */
+ 113, 114, 115, 116, 117, 118, 119, /* P50-P56 */
+ 120, 121, 122, 123, 124, 125, 126, /* P60-P66 */
+ 127, 128, 129, 130, 131, 132, 133, 134, /* P70-P77 */
+ 135, 136, 137, 138, 139, 140, /* P80-P85 */
+ 43, 44, 45, 46, 47, 48, 49, 50, /* PA0-PA7 */
+ 51, 52, 53, 54, 55, 56, 57, 58, /* PB0-PB7 */
+ 59, 60, 61, /* PC0-PC2 */
+ 62, 63, 64, 65, 66, 67, 68, 69, /* PD0-PD7 */
+ 70, 71, 72, 73, 74, 75, 76, 77, /* PE0-PE7 */
+ 78, 79, 80, /* PF0-PF2 */
+ 25, 26, 27, 28, 29, 30, 31, 32, /* PG0-PG7 */
+ 33, 34, 35, 36, 37, 38, /* PH0-PH5 */
+ 4, 5, 6, 7, 8, /* PJ0-PJ4 */
+ 39, 40, 41, 42, /* PK0-PK3 */
+ 9, 10, 11, 12, 21, 22, 23, 24, /* PL0-PL7 */
+ 13, 14, 15, 16, 17, 18, 19, 20, /* PM0-PM7 */
+ 0, 1, 2, 3 /* PS0-PS3 */
+};
+
+static const struct rzv2h_hw_info rzg3e_hw_params = {
+ .tssel_lut = rzg3e_tssel_lut,
+ .t_offs = ICU_RZG3E_TINT_OFFSET,
+ .max_tssel = ICU_RZG3E_TSSEL_MAX_VAL,
+ .field_width = 16,
+};
+
+static const struct rzv2h_hw_info rzv2h_hw_params = {
+ .t_offs = 0,
+ .max_tssel = ICU_RZV2H_TSSEL_MAX_VAL,
+ .field_width = 8,
+};
+
+static int rzg3e_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzg3e_hw_params);
+}
+
+static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzv2h_hw_params);
+}
+
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
+IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_init)
IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
index 7cd6b646774b..205ad61d15e4 100644
--- a/drivers/irqchip/irq-riscv-aplic-direct.c
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -31,7 +31,7 @@ struct aplic_direct {
};
struct aplic_idc {
- unsigned int hart_index;
+ u32 hart_index;
void __iomem *regs;
struct aplic_direct *direct;
};
@@ -219,6 +219,20 @@ static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
return 0;
}
+static int aplic_direct_get_hart_index(struct device *dev, u32 logical_index,
+ u32 *hart_index)
+{
+ const char *prop_hart_index = "riscv,hart-indexes";
+ struct device_node *np = to_of_node(dev->fwnode);
+
+ if (!np || !of_property_present(np, prop_hart_index)) {
+ *hart_index = logical_index;
+ return 0;
+ }
+
+ return of_property_read_u32_index(np, prop_hart_index, logical_index, hart_index);
+}
+
int aplic_direct_setup(struct device *dev, void __iomem *regs)
{
int i, j, rc, cpu, current_cpu, setup_count = 0;
@@ -265,8 +279,12 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
cpumask_set_cpu(cpu, &direct->lmask);
idc = per_cpu_ptr(&aplic_idcs, cpu);
- idc->hart_index = i;
- idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+ rc = aplic_direct_get_hart_index(dev, i, &idc->hart_index);
+ if (rc) {
+ dev_warn(dev, "hart index not found for IDC%d\n", i);
+ continue;
+ }
+ idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE;
idc->direct = direct;
aplic_idc_set_delivery(idc, true);
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 275df5005705..d9ae87808651 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -73,10 +73,16 @@ static int __init imsic_ipi_domain_init(void) { return 0; }
static void imsic_handle_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- int err, cpu = smp_processor_id();
+ int cpu = smp_processor_id();
struct imsic_vector *vec;
unsigned long local_id;
+ /*
+ * Process pending local synchronization instead of waiting
+ * for per-CPU local timer to expire.
+ */
+ imsic_local_sync_all(false);
+
chained_irq_enter(chip, desc);
while ((local_id = csr_swap(CSR_TOPEI, 0))) {
@@ -97,9 +103,7 @@ static void imsic_handle_irq(struct irq_desc *desc)
continue;
}
- err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq);
- if (unlikely(err))
- pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq);
+ generic_handle_irq(vec->irq);
}
chained_irq_exit(chip, desc);
@@ -120,7 +124,7 @@ static int imsic_starting_cpu(unsigned int cpu)
* Interrupts identities might have been enabled/disabled while
* this CPU was not running so sync-up local enable/disable state.
*/
- imsic_local_sync_all();
+ imsic_local_sync_all(true);
/* Enable local interrupt delivery */
imsic_local_delivery(true);
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index c708780e8760..b8ae67c25b37 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include "irq-msi-lib.h"
#include "irq-riscv-imsic-state.h"
static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
@@ -63,6 +64,11 @@ static int imsic_irq_retrigger(struct irq_data *d)
return 0;
}
+static void imsic_irq_ack(struct irq_data *d)
+{
+ irq_move_irq(d);
+}
+
static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
{
phys_addr_t msi_addr;
@@ -96,9 +102,23 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
bool force)
{
struct imsic_vector *old_vec, *new_vec;
- struct irq_data *pd = d->parent_data;
+ struct imsic_vector tmp_vec;
+
+ /*
+ * Requirements for the downstream irqdomains (or devices):
+ *
+ * 1) Downstream irqdomains (or devices) with atomic MSI update can
+ * happily do imsic_irq_set_affinity() in the process-context on
+ * any CPU so the irqchip of such irqdomains must not set the
+ * IRQCHIP_MOVE_DEFERRED flag.
+ *
+ * 2) Downstream irqdomains (or devices) with non-atomic MSI update
+ * must use imsic_irq_set_affinity() in nterrupt-context upon
+ * the next device interrupt so the irqchip of such irqdomains
+ * must set the IRQCHIP_MOVE_DEFERRED flag.
+ */
- old_vec = irq_data_get_irq_chip_data(pd);
+ old_vec = irq_data_get_irq_chip_data(d);
if (WARN_ON(!old_vec))
return -ENOENT;
@@ -111,34 +131,95 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
return -EBUSY;
/* Get a new vector on the desired set of CPUs */
- new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
+ new_vec = imsic_vector_alloc(old_vec->irq, mask_val);
if (!new_vec)
return -ENOSPC;
+ /*
+ * Device having non-atomic MSI update might see an intermediate
+ * state when changing target IMSIC vector from one CPU to another.
+ *
+ * To avoid losing interrupt to such intermediate state, do the
+ * following (just like x86 APIC):
+ *
+ * 1) First write a temporary IMSIC vector to the device which
+ * has MSI address same as the old IMSIC vector but MSI data
+ * matches the new IMSIC vector.
+ *
+ * 2) Next write the new IMSIC vector to the device.
+ *
+ * Based on the above, __imsic_local_sync() must check pending
+ * status of both old MSI data and new MSI data on the old CPU.
+ */
+ if (!irq_can_move_in_process_context(d) &&
+ new_vec->local_id != old_vec->local_id) {
+ /* Setup temporary vector */
+ tmp_vec.cpu = old_vec->cpu;
+ tmp_vec.local_id = new_vec->local_id;
+
+ /* Point device to the temporary vector */
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), &tmp_vec);
+ }
+
/* Point device to the new vector */
- imsic_msi_update_msg(d, new_vec);
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
/* Update irq descriptors with the new vector */
- pd->chip_data = new_vec;
+ d->chip_data = new_vec;
- /* Update effective affinity of parent irq data */
- irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
+ /* Update effective affinity */
+ irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
/* Move state of the old vector to the new vector */
imsic_vector_move(old_vec, new_vec);
return IRQ_SET_MASK_OK_DONE;
}
+
+static void imsic_irq_force_complete_move(struct irq_data *d)
+{
+ struct imsic_vector *mvec, *vec = irq_data_get_irq_chip_data(d);
+ unsigned int cpu = smp_processor_id();
+
+ if (WARN_ON(!vec))
+ return;
+
+ /* Do nothing if there is no in-flight move */
+ mvec = imsic_vector_get_move(vec);
+ if (!mvec)
+ return;
+
+ /* Do nothing if the old IMSIC vector does not belong to current CPU */
+ if (mvec->cpu != cpu)
+ return;
+
+ /*
+ * The best we can do is force cleanup the old IMSIC vector.
+ *
+ * The challenges over here are same as x86 vector domain so
+ * refer to the comments in irq_force_complete_move() function
+ * implemented at arch/x86/kernel/apic/vector.c.
+ */
+
+ /* Force cleanup in-flight move */
+ pr_info("IRQ fixup: irq %d move in progress, old vector cpu %d local_id %d\n",
+ d->irq, mvec->cpu, mvec->local_id);
+ imsic_vector_force_move_cleanup(vec);
+}
#endif
static struct irq_chip imsic_irq_base_chip = {
- .name = "IMSIC",
- .irq_mask = imsic_irq_mask,
- .irq_unmask = imsic_irq_unmask,
- .irq_retrigger = imsic_irq_retrigger,
- .irq_compose_msi_msg = imsic_irq_compose_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE |
- IRQCHIP_MASK_ON_SUSPEND,
+ .name = "IMSIC",
+ .irq_mask = imsic_irq_mask,
+ .irq_unmask = imsic_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = imsic_irq_set_affinity,
+ .irq_force_complete_move = imsic_irq_force_complete_move,
+#endif
+ .irq_retrigger = imsic_irq_retrigger,
+ .irq_ack = imsic_irq_ack,
+ .irq_compose_msi_msg = imsic_irq_compose_msg,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
};
static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -155,7 +236,7 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return -ENOSPC;
irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
- handle_simple_irq, NULL, NULL);
+ handle_edge_irq, NULL, NULL);
irq_set_noprobe(virq);
irq_set_affinity(virq, cpu_online_mask);
irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
@@ -172,22 +253,6 @@ static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token)
-{
- const struct msi_parent_ops *ops = domain->msi_parent_ops;
- u32 busmask = BIT(bus_token);
-
- if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
- return 0;
-
- /* Handle pure domain searches */
- if (bus_token == ops->bus_select_token)
- return 1;
-
- return !!(ops->bus_select_mask & busmask);
-}
-
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
struct irq_data *irqd, int ind)
@@ -204,107 +269,37 @@ static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
static const struct irq_domain_ops imsic_base_domain_ops = {
.alloc = imsic_irq_domain_alloc,
.free = imsic_irq_domain_free,
- .select = imsic_irq_domain_select,
+ .select = msi_lib_irq_domain_select,
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
.debug_show = imsic_irq_debug_show,
#endif
};
-#ifdef CONFIG_RISCV_IMSIC_PCI
-
-static void imsic_pci_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void imsic_pci_unmask_irq(struct irq_data *d)
+static bool imsic_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
{
- irq_chip_unmask_parent(d);
- pci_msi_unmask_irq(d);
-}
-
-#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
-
-#else
-
-#define MATCH_PCI_MSI 0
-
-#endif
-
-static bool imsic_init_dev_msi_info(struct device *dev,
- struct irq_domain *domain,
- struct irq_domain *real_parent,
- struct msi_domain_info *info)
-{
- const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
-
- /* MSI parent domain specific settings */
- switch (real_parent->bus_token) {
- case DOMAIN_BUS_NEXUS:
- if (WARN_ON_ONCE(domain != real_parent))
- return false;
-#ifdef CONFIG_SMP
- info->chip->irq_set_affinity = imsic_irq_set_affinity;
-#endif
- break;
- default:
- WARN_ON_ONCE(1);
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
- }
- /* Is the target supported? */
switch (info->bus_token) {
-#ifdef CONFIG_RISCV_IMSIC_PCI
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
- info->chip->irq_mask = imsic_pci_mask_irq;
- info->chip->irq_unmask = imsic_pci_unmask_irq;
- break;
-#endif
- case DOMAIN_BUS_DEVICE_MSI:
- /*
- * Per-device MSI should never have any MSI feature bits
- * set. It's sole purpose is to create a dumb interrupt
- * chip which has a device specific irq_write_msi_msg()
- * callback.
- */
- if (WARN_ON_ONCE(info->flags))
- return false;
-
- /* Core managed MSI descriptors */
- info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
- MSI_FLAG_FREE_MSI_DESCS;
- break;
- case DOMAIN_BUS_WIRED_TO_MSI:
+ info->chip->flags |= IRQCHIP_MOVE_DEFERRED;
break;
default:
- WARN_ON_ONCE(1);
- return false;
+ break;
}
- /* Use hierarchial chip operations re-trigger */
- info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
-
- /*
- * Mask out the domain specific MSI feature flags which are not
- * supported by the real parent.
- */
- info->flags &= pops->supported_flags;
-
- /* Enforce the required flags */
- info->flags |= pops->required_flags;
-
return true;
}
-#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
-
static const struct msi_parent_ops imsic_msi_parent_ops = {
.supported_flags = MSI_GENERIC_FLAGS_MASK |
MSI_FLAG_PCI_MSIX,
.required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
- MSI_FLAG_USE_DEF_CHIP_OPS,
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.init_dev_msi_info = imsic_init_dev_msi_info,
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index b97e6cd89ed7..bdf5cd2037f2 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,
}
}
-static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
{
- struct imsic_local_config *mlocal;
- struct imsic_vector *vec, *mvec;
+ struct imsic_local_config *tlocal, *mlocal;
+ struct imsic_vector *vec, *tvec, *mvec;
+ bool ret = true;
int i;
lockdep_assert_held(&lpriv->lock);
@@ -143,35 +144,97 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv)
__imsic_id_clear_enable(i);
/*
- * If the ID was being moved to a new ID on some other CPU
- * then we can get a MSI during the movement so check the
- * ID pending bit and re-trigger the new ID on other CPU
- * using MMIO write.
+ * Clear the previous vector pointer of the new vector only
+ * after the movement is complete on the old CPU.
*/
- mvec = READ_ONCE(vec->move);
- WRITE_ONCE(vec->move, NULL);
- if (mvec && mvec != vec) {
- if (__imsic_id_read_clear_pending(i)) {
+ mvec = READ_ONCE(vec->move_prev);
+ if (mvec) {
+ /*
+ * If the old vector has not been updated then
+ * try again in the next sync-up call.
+ */
+ if (READ_ONCE(mvec->move_next)) {
+ ret = false;
+ continue;
+ }
+
+ WRITE_ONCE(vec->move_prev, NULL);
+ }
+
+ /*
+ * If a vector was being moved to a new vector on some other
+ * CPU then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU using
+ * MMIO write.
+ */
+ mvec = READ_ONCE(vec->move_next);
+ if (mvec) {
+ /*
+ * Devices having non-atomic MSI update might see
+ * an intermediate state so check both old ID and
+ * new ID for pending interrupts.
+ *
+ * For details, see imsic_irq_set_affinity().
+ */
+ tvec = vec->local_id == mvec->local_id ?
+ NULL : &lpriv->vectors[mvec->local_id];
+
+ if (tvec && !irq_can_move_in_process_context(irq_get_irq_data(vec->irq)) &&
+ __imsic_id_read_clear_pending(tvec->local_id)) {
+ /* Retrigger temporary vector if it was already in-use */
+ if (READ_ONCE(tvec->enable)) {
+ tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu);
+ writel_relaxed(tvec->local_id, tlocal->msi_va);
+ }
+
+ mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
+ writel_relaxed(mvec->local_id, mlocal->msi_va);
+ }
+
+ if (__imsic_id_read_clear_pending(vec->local_id)) {
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va);
}
- imsic_vector_free(&lpriv->vectors[i]);
+ WRITE_ONCE(vec->move_next, NULL);
+ imsic_vector_free(vec);
}
skip:
bitmap_clear(lpriv->dirty_bitmap, i, 1);
}
+
+ return ret;
}
-void imsic_local_sync_all(void)
+#ifdef CONFIG_SMP
+static void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, smp_processor_id());
+ }
+}
+#else
+static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+}
+#endif
+
+void imsic_local_sync_all(bool force_all)
{
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags);
- bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
- __imsic_local_sync(lpriv);
+
+ if (force_all)
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ if (!__imsic_local_sync(lpriv))
+ __imsic_local_timer_start(lpriv);
+
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
}
@@ -190,12 +253,7 @@ void imsic_local_delivery(bool enable)
#ifdef CONFIG_SMP
static void imsic_local_timer_callback(struct timer_list *timer)
{
- struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&lpriv->lock, flags);
- __imsic_local_sync(lpriv);
- raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+ imsic_local_sync_all(false);
}
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
@@ -216,14 +274,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
*/
if (cpu_online(cpu)) {
if (cpu == smp_processor_id()) {
- __imsic_local_sync(lpriv);
- return;
+ if (__imsic_local_sync(lpriv))
+ return;
}
- if (!timer_pending(&lpriv->timer)) {
- lpriv->timer.expires = jiffies + 1;
- add_timer_on(&lpriv->timer, cpu);
- }
+ __imsic_local_timer_start(lpriv);
}
}
#else
@@ -278,8 +333,26 @@ void imsic_vector_unmask(struct imsic_vector *vec)
raw_spin_unlock(&lpriv->lock);
}
-static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
- bool new_enable, struct imsic_vector *new_move)
+void imsic_vector_force_move_cleanup(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *mvec;
+ unsigned long flags;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+
+ mvec = READ_ONCE(vec->move_prev);
+ WRITE_ONCE(vec->move_prev, NULL);
+ if (mvec)
+ imsic_vector_free(mvec);
+
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv,
+ struct imsic_vector *vec, bool is_old_vec,
+ bool new_enable, struct imsic_vector *move_vec)
{
unsigned long flags;
bool enabled;
@@ -289,7 +362,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi
/* Update enable and move details */
enabled = READ_ONCE(vec->enable);
WRITE_ONCE(vec->enable, new_enable);
- WRITE_ONCE(vec->move, new_move);
+ if (is_old_vec)
+ WRITE_ONCE(vec->move_next, move_vec);
+ else
+ WRITE_ONCE(vec->move_prev, move_vec);
/* Mark the vector as dirty and synchronize */
bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
@@ -322,8 +398,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve
* interrupt on the old vector while device was being moved
* to the new vector.
*/
- enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
- imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec);
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
@@ -368,7 +444,7 @@ struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int l
return &lpriv->vectors[local_id];
}
-struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
+struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask)
{
struct imsic_vector *vec = NULL;
struct imsic_local_priv *lpriv;
@@ -384,9 +460,10 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
vec = &lpriv->vectors[local_id];
- vec->hwirq = hwirq;
+ vec->irq = irq;
vec->enable = false;
- vec->move = NULL;
+ vec->move_next = NULL;
+ vec->move_prev = NULL;
return vec;
}
@@ -396,7 +473,7 @@ void imsic_vector_free(struct imsic_vector *vec)
unsigned long flags;
raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
- vec->hwirq = UINT_MAX;
+ vec->irq = 0;
irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
}
@@ -455,7 +532,7 @@ static int __init imsic_local_init(void)
vec = &lpriv->vectors[i];
vec->cpu = cpu;
vec->local_id = i;
- vec->hwirq = UINT_MAX;
+ vec->irq = 0;
}
}
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 391e44280827..3202ffa4e849 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -20,10 +20,11 @@ struct imsic_vector {
unsigned int cpu;
unsigned int local_id;
/* Details saved by driver in the vector */
- unsigned int hwirq;
+ unsigned int irq;
/* Details accessed using local lock held */
bool enable;
- struct imsic_vector *move;
+ struct imsic_vector *move_next;
+ struct imsic_vector *move_prev;
};
struct imsic_local_priv {
@@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id)
__imsic_eix_update(id, 1, false, false);
}
-void imsic_local_sync_all(void);
+void imsic_local_sync_all(bool force_all);
void imsic_local_delivery(bool enable);
void imsic_vector_mask(struct imsic_vector *vec);
@@ -87,14 +88,15 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
{
- return READ_ONCE(vec->move);
+ return READ_ONCE(vec->move_prev);
}
+void imsic_vector_force_move_cleanup(struct imsic_vector *vec);
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
-struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask);
+struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask);
void imsic_vector_free(struct imsic_vector *vector);
void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind);
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
new file mode 100644
index 000000000000..ee682e87eb8b
--- /dev/null
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SG2042 MSI Controller
+ *
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Chen Wang <unicorn_wang@outlook.com>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include "irq-msi-lib.h"
+
+#define SG2042_MAX_MSI_VECTOR 32
+
+struct sg2042_msi_chipdata {
+ void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR
+
+ phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET
+
+ u32 irq_first; // The vector number that MSIs starts
+ u32 num_irqs; // The number of vectors for MSIs
+
+ DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR);
+ struct mutex msi_map_lock; // lock for msi_map
+};
+
+static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req)
+{
+ int first;
+
+ guard(mutex)(&data->msi_map_lock);
+ first = bitmap_find_free_region(data->msi_map, data->num_irqs,
+ get_count_order(num_req));
+ return first >= 0 ? first : -ENOSPC;
+}
+
+static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req)
+{
+ guard(mutex)(&data->msi_map_lock);
+ bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req));
+}
+
+static void sg2042_msi_irq_ack(struct irq_data *d)
+{
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ int bit_off = d->hwirq;
+
+ writel(1 << bit_off, data->reg_clr);
+
+ irq_chip_ack_parent(d);
+}
+
+static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ msg->address_hi = upper_32_bits(data->doorbell_addr);
+ msg->address_lo = lower_32_bits(data->doorbell_addr);
+ msg->data = 1 << d->hwirq;
+}
+
+static const struct irq_chip sg2042_msi_middle_irq_chip = {
+ .name = "SG2042 MSI",
+ .irq_ack = sg2042_msi_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg,
+};
+
+static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
+{
+ struct sg2042_msi_chipdata *data = domain->host_data;
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = data->irq_first + hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct sg2042_msi_chipdata *data = domain->host_data;
+ int hwirq, err, i;
+
+ hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs);
+ if (hwirq < 0)
+ return hwirq;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto err_hwirq;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &sg2042_msi_middle_irq_chip, data);
+ }
+
+ return 0;
+
+err_hwirq:
+ sg2042_msi_free_hwirq(data, hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, i);
+
+ return err;
+}
+
+static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
+ .alloc = sg2042_msi_middle_domain_alloc,
+ .free = sg2042_msi_middle_domain_free,
+ .select = msi_lib_irq_domain_select,
+};
+
+#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+
+#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK
+
+static const struct msi_parent_ops sg2042_msi_parent_ops = {
+ .required_flags = SG2042_MSI_FLAGS_REQUIRED,
+ .supported_flags = SG2042_MSI_FLAGS_SUPPORTED,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .prefix = "SG2042-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
+ struct irq_domain *plic_domain, struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct irq_domain *middle_domain;
+
+ middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode,
+ &sg2042_msi_middle_domain_ops, data);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSI middle domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
+
+ middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ middle_domain->msi_parent_ops = &sg2042_msi_parent_ops;
+
+ return 0;
+}
+
+static int sg2042_msi_probe(struct platform_device *pdev)
+{
+ struct fwnode_reference_args args = { };
+ struct sg2042_msi_chipdata *data;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *plic_domain;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr");
+ if (IS_ERR(data->reg_clr)) {
+ dev_err(dev, "Failed to map clear register\n");
+ return PTR_ERR(data->reg_clr);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "doorbell");
+ if (!res) {
+ dev_err(dev, "Failed get resource from set\n");
+ return -EINVAL;
+ }
+ data->doorbell_addr = res->start;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges",
+ "#interrupt-cells", 0, 0, &args);
+ if (ret) {
+ dev_err(dev, "Unable to parse MSI vec base\n");
+ return ret;
+ }
+ fwnode_handle_put(args.fwnode);
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges", NULL,
+ args.nargs + 1, 0, &args);
+ if (ret) {
+ dev_err(dev, "Unable to parse MSI vec number\n");
+ return ret;
+ }
+
+ plic_domain = irq_find_matching_fwnode(args.fwnode, DOMAIN_BUS_ANY);
+ fwnode_handle_put(args.fwnode);
+ if (!plic_domain) {
+ pr_err("Failed to find the PLIC domain\n");
+ return -ENXIO;
+ }
+
+ data->irq_first = (u32)args.args[0];
+ data->num_irqs = (u32)args.args[args.nargs - 1];
+
+ mutex_init(&data->msi_map_lock);
+
+ return sg2042_msi_init_domains(data, plic_domain, dev);
+}
+
+static const struct of_device_id sg2042_msi_of_match[] = {
+ { .compatible = "sophgo,sg2042-msi" },
+ { }
+};
+
+static struct platform_driver sg2042_msi_driver = {
+ .driver = {
+ .name = "sg2042-msi",
+ .of_match_table = sg2042_msi_of_match,
+ },
+ .probe = sg2042_msi_probe,
+};
+builtin_platform_driver(sg2042_msi_driver);
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 0b4312152024..01b0d8321728 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -48,32 +48,41 @@ enum {
SUNXI_SRC_TYPE_EDGE_RISING,
};
-struct sunxi_sc_nmi_reg_offs {
- u32 ctrl;
- u32 pend;
- u32 enable;
+struct sunxi_sc_nmi_data {
+ struct {
+ u32 ctrl;
+ u32 pend;
+ u32 enable;
+ } reg_offs;
+ u32 enable_val;
};
-static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
- .ctrl = SUN6I_NMI_CTRL,
- .pend = SUN6I_NMI_PENDING,
- .enable = SUN6I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun6i_data __initconst = {
+ .reg_offs.ctrl = SUN6I_NMI_CTRL,
+ .reg_offs.pend = SUN6I_NMI_PENDING,
+ .reg_offs.enable = SUN6I_NMI_ENABLE,
};
-static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
- .ctrl = SUN7I_NMI_CTRL,
- .pend = SUN7I_NMI_PENDING,
- .enable = SUN7I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun7i_data __initconst = {
+ .reg_offs.ctrl = SUN7I_NMI_CTRL,
+ .reg_offs.pend = SUN7I_NMI_PENDING,
+ .reg_offs.enable = SUN7I_NMI_ENABLE,
};
-static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
- .ctrl = SUN9I_NMI_CTRL,
- .pend = SUN9I_NMI_PENDING,
- .enable = SUN9I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun9i_data __initconst = {
+ .reg_offs.ctrl = SUN9I_NMI_CTRL,
+ .reg_offs.pend = SUN9I_NMI_PENDING,
+ .reg_offs.enable = SUN9I_NMI_ENABLE,
};
-static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
- u32 val)
+static const struct sunxi_sc_nmi_data sun55i_a523_data __initconst = {
+ .reg_offs.ctrl = SUN9I_NMI_CTRL,
+ .reg_offs.pend = SUN9I_NMI_PENDING,
+ .reg_offs.enable = SUN9I_NMI_ENABLE,
+ .enable_val = BIT(31),
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val)
{
irq_reg_writel(gc, val, off);
}
@@ -143,15 +152,13 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
}
static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
- const struct sunxi_sc_nmi_reg_offs *reg_offs)
+ const struct sunxi_sc_nmi_data *data)
{
- struct irq_domain *domain;
+ unsigned int irq, clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct irq_chip_generic *gc;
- unsigned int irq;
- unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_domain *domain;
int ret;
-
domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("Could not register interrupt domain.\n");
@@ -186,27 +193,28 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED |
+ IRQCHIP_EOI_IF_HANDLED |
IRQCHIP_SKIP_SET_WAKE;
- gc->chip_types[0].regs.ack = reg_offs->pend;
- gc->chip_types[0].regs.mask = reg_offs->enable;
- gc->chip_types[0].regs.type = reg_offs->ctrl;
+ gc->chip_types[0].regs.ack = data->reg_offs.pend;
+ gc->chip_types[0].regs.mask = data->reg_offs.enable;
+ gc->chip_types[0].regs.type = data->reg_offs.ctrl;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[1].regs.ack = reg_offs->pend;
- gc->chip_types[1].regs.mask = reg_offs->enable;
- gc->chip_types[1].regs.type = reg_offs->ctrl;
+ gc->chip_types[1].regs.ack = data->reg_offs.pend;
+ gc->chip_types[1].regs.mask = data->reg_offs.enable;
+ gc->chip_types[1].regs.type = data->reg_offs.ctrl;
gc->chip_types[1].handler = handle_edge_irq;
/* Disable any active interrupts */
- sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+ sunxi_sc_nmi_write(gc, data->reg_offs.enable, data->enable_val);
/* Clear any pending NMI interrupts */
- sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
+ sunxi_sc_nmi_write(gc, data->reg_offs.pend, SUNXI_NMI_IRQ_BIT);
irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
@@ -221,20 +229,27 @@ fail_irqd_remove:
static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun6i_data);
}
IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun7i_data);
}
IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
static int __init sun9i_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun9i_data);
}
IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
+
+static int __init sun55i_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun55i_a523_data);
+}
+IRQCHIP_DECLARE(sun55i_nmi, "allwinner,sun55i-a523-nmi", sun55i_nmi_irq_init);
diff --git a/drivers/leds/leds-st1202.c b/drivers/leds/leds-st1202.c
index b691c4886993..e894b3f9a0f4 100644
--- a/drivers/leds/leds-st1202.c
+++ b/drivers/leds/leds-st1202.c
@@ -261,8 +261,6 @@ static int st1202_dt_init(struct st1202_chip *chip)
int err, reg;
for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
- struct led_init_data init_data = {};
-
err = of_property_read_u32(child, "reg", &reg);
if (err)
return dev_err_probe(dev, err, "Invalid register\n");
@@ -276,15 +274,6 @@ static int st1202_dt_init(struct st1202_chip *chip)
led->led_cdev.pattern_set = st1202_led_pattern_set;
led->led_cdev.pattern_clear = st1202_led_pattern_clear;
led->led_cdev.default_trigger = "pattern";
-
- init_data.fwnode = led->fwnode;
- init_data.devicename = "st1202";
- init_data.default_label = ":";
-
- err = devm_led_classdev_register_ext(dev, &led->led_cdev, &init_data);
- if (err < 0)
- return dev_err_probe(dev, err, "Failed to register LED class device\n");
-
led->led_cdev.brightness_set = st1202_brightness_set;
led->led_cdev.brightness_get = st1202_brightness_get;
}
@@ -368,6 +357,7 @@ static int st1202_probe(struct i2c_client *client)
return ret;
for (int i = 0; i < ST1202_MAX_LEDS; i++) {
+ struct led_init_data init_data = {};
led = &chip->leds[i];
led->chip = chip;
led->led_num = i;
@@ -384,6 +374,15 @@ static int st1202_probe(struct i2c_client *client)
if (ret < 0)
return dev_err_probe(&client->dev, ret,
"Failed to clear LED pattern\n");
+
+ init_data.fwnode = led->fwnode;
+ init_data.devicename = "st1202";
+ init_data.default_label = ":";
+
+ ret = devm_led_classdev_register_ext(&client->dev, &led->led_cdev, &init_data);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register LED class device\n");
}
return 0;
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index aad48c2540fc..a594bd5e2233 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -483,8 +483,8 @@ static int pattern_trig_activate(struct led_classdev *led_cdev)
data->led_cdev = led_cdev;
led_set_trigger_data(led_cdev, data);
timer_setup(&data->timer, pattern_trig_timer_function, 0);
- hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->hrtimer.function = pattern_trig_hrtimer_function;
+ hrtimer_setup(&data->hrtimer, pattern_trig_hrtimer_function, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
led_cdev->activated = true;
if (led_cdev->flags & LED_INIT_DEFAULT_TRIGGER) {
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index d3d26a2c9895..118beaf447aa 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -534,9 +534,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
return -EINVAL;
}
- hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- mbox->poll_hrt.function = txdone_hrtimer;
+ hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
spin_lock_init(&mbox->poll_hrt_lock);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 731467d4ed10..b690905ab89f 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -426,7 +426,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
if (!clone)
return NULL;
- bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf);
+ bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
clone->bi_private = bio;
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index a70451d99ebc..bebaa40e0eb5 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -1346,9 +1346,8 @@ struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
if (pin == NULL)
return ERR_PTR(-ENOMEM);
pin->ops = pin_ops;
- hrtimer_init(&pin->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_set(&pin->work_pin_num_events, 0);
- pin->timer.function = cec_pin_timer;
+ hrtimer_setup(&pin->timer, cec_pin_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
init_waitqueue_head(&pin->kthread_waitq);
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 05254d8717db..0357624968f1 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1363,6 +1363,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
dev->vb_queue.ops = &rtl2832_sdr_vb2_ops;
dev->vb_queue.mem_ops = &vb2_vmalloc_memops;
dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ dev->vb_queue.lock = &dev->vb_queue_lock;
ret = vb2_queue_init(&dev->vb_queue);
if (ret) {
dev_err(&pdev->dev, "Could not initialize vb2 queue\n");
@@ -1421,7 +1422,6 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
/* Init video_device structure */
dev->vdev = rtl2832_sdr_template;
dev->vdev.queue = &dev->vb_queue;
- dev->vdev.queue->lock = &dev->vb_queue_lock;
video_set_drvdata(&dev->vdev, dev);
/* Register the v4l2_device structure */
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index a04a1d33fadb..b9f2c14d62b4 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -190,8 +190,7 @@ static int __cx88_ir_start(void *priv)
ir = core->ir;
if (ir->polling) {
- hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ir->timer.function = cx88_ir_work;
+ hrtimer_setup(&ir->timer, cx88_ir_work, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&ir->timer,
ktime_set(0, ir->polling * 1000000),
HRTIMER_MODE_REL);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index d1320298a0f7..8479dc9c9a8f 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -269,8 +269,8 @@ static int wave5_vpu_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0) {
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
- hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- dev->hrtimer.function = &wave5_vpu_timer_callback;
+ hrtimer_setup(&dev->hrtimer, &wave5_vpu_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
dev->worker = kthread_run_worker(0, "vpu_irq_thread");
if (IS_ERR(dev->worker)) {
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index fe368aebbc13..84533fdd61aa 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -172,8 +172,7 @@ static int pwm_ir_probe(struct platform_device *pdev)
rcdev->tx_ir = pwm_ir_tx_sleep;
} else {
init_completion(&pwm_ir->tx_done);
- hrtimer_init(&pwm_ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pwm_ir->timer.function = pwm_ir_timer;
+ hrtimer_setup(&pwm_ir->timer, pwm_ir_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rcdev->tx_ir = pwm_ir_tx_atomic;
}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index e2a75a52563f..53f1888cc84f 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2226,26 +2226,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
goto err;
}
- if (of_node_name_eq(child, "nand")) {
- /* Warn about older DT blobs with no compatible property */
- if (!of_property_read_bool(child, "compatible")) {
- dev_warn(&pdev->dev,
- "Incompatible NAND node: missing compatible");
- ret = -EINVAL;
- goto err;
- }
- }
-
- if (of_node_name_eq(child, "onenand")) {
- /* Warn about older DT blobs with no compatible property */
- if (!of_property_read_bool(child, "compatible")) {
- dev_warn(&pdev->dev,
- "Incompatible OneNAND node: missing compatible");
- ret = -EINVAL;
- goto err;
- }
- }
-
if (of_match_node(omap_nand_ids, child)) {
/* NAND specific setup */
val = 8;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7e79da9684ed..185c08eab4ca 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2834,10 +2834,10 @@ struct rep_manu_reply{
u8 sas_format:1;
u8 reserved1:7;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 67d9391f1855..7575fee96cc6 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -3,7 +3,7 @@
config INTEL_MEI
tristate "Intel Management Engine Interface"
depends on X86 && PCI
- default GENERIC_CPU || MCORE2 || MATOM || X86_GENERIC
+ default X86_64 || MATOM
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
index f0b1fc87490e..26166357b255 100644
--- a/drivers/misc/vcpu_stall_detector.c
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -111,8 +111,7 @@ static int start_stall_detector_cpu(unsigned int cpu)
ping_timeout_ms = vcpu_stall_config.stall_timeout_sec *
MSEC_PER_SEC / 2;
- hrtimer_init(vcpu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vcpu_hrtimer->function = vcpu_stall_detect_timer_fn;
+ hrtimer_setup(vcpu_hrtimer, vcpu_stall_detect_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu_stall_detector->is_initialized = true;
hrtimer_start(vcpu_hrtimer, ms_to_ktime(ping_timeout_ms),
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fc360902729d..24fffc702a94 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2499,8 +2499,10 @@ static int atmci_probe(struct platform_device *pdev)
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
ret = atmci_configure_dma(host);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ clk_disable_unprepare(host->mck);
goto err_dma_probe_defer;
+ }
if (ret == 0) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 3cbda98d08d2..31f40c04afda 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1875,8 +1875,7 @@ static void dw_mci_init_fault(struct dw_mci *host)
{
host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
- hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- host->fault_timer.function = dw_mci_fault_timer;
+ hrtimer_setup(&host->fault_timer, dw_mci_fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
#else
static void dw_mci_init_fault(struct dw_mci *host)
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 0ef4d578ade8..48cdcba0f39c 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -503,8 +503,15 @@ static int sdhci_brcmstb_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
clk_disable_unprepare(priv->base_clk);
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
return sdhci_pltfm_suspend(dev);
}
@@ -529,6 +536,9 @@ static int sdhci_brcmstb_resume(struct device *dev)
ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
}
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ ret = cqhci_resume(host->mmc);
+
return ret;
}
#endif
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 327b6ecdc77e..d1b095af253b 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1242,10 +1242,28 @@ static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *sla
slave->dev->flags & IFF_MULTICAST;
}
+/**
+ * slave_set_ns_maddrs - add/del all NS mac addresses for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @add: add or remove all the NS mac addresses
+ *
+ * This function tries to add or delete all the NS mac addresses on the slave
+ *
+ * Note, the IPv6 NS target address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * arp_validate changes
+ * * enslaving, releasing new slaves
+ */
static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
{
struct in6_addr *targets = bond->params.ns_targets;
char slot_maddr[MAX_ADDR_LEN];
+ struct in6_addr mcaddr;
int i;
if (!slave_can_set_ns_maddr(bond, slave))
@@ -1255,7 +1273,8 @@ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool
if (ipv6_addr_any(&targets[i]))
break;
- if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) {
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) {
if (add)
dev_mc_add(slave->dev, slot_maddr);
else
@@ -1278,23 +1297,43 @@ void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
slave_set_ns_maddrs(bond, slave, false);
}
+/**
+ * slave_set_ns_maddr - set new NS mac address for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @target: the new IPv6 target
+ * @slot: the old IPv6 target in the slot
+ *
+ * This function tries to replace the old mac address to new one on the slave.
+ *
+ * Note, the target/slot IPv6 address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * An IPv6 NS target is added or removed.
+ */
static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
struct in6_addr *target, struct in6_addr *slot)
{
- char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN];
+ char mac_addr[MAX_ADDR_LEN];
+ struct in6_addr mcast_addr;
if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
return;
- /* remove the previous maddr from slave */
+ /* remove the previous mac addr from slave */
+ addrconf_addr_solict_mult(slot, &mcast_addr);
if (!ipv6_addr_any(slot) &&
- !ndisc_mc_map(slot, slot_maddr, slave->dev, 0))
- dev_mc_del(slave->dev, slot_maddr);
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_del(slave->dev, mac_addr);
- /* add new maddr on slave if target is set */
+ /* add new mac addr on slave if target is set */
+ addrconf_addr_solict_mult(target, &mcast_addr);
if (!ipv6_addr_any(target) &&
- !ndisc_mc_map(target, target_maddr, slave->dev, 0))
- dev_mc_add(slave->dev, target_maddr);
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_add(slave->dev, mac_addr);
}
static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index ac1a860986df..b080740bcb10 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -2260,14 +2260,19 @@ static int __maybe_unused flexcan_suspend(struct device *device)
flexcan_chip_interrupts_disable(dev);
+ err = flexcan_transceiver_disable(priv);
+ if (err)
+ return err;
+
err = pinctrl_pm_select_sleep_state(device);
if (err)
return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
+
+ priv->can.state = CAN_STATE_SLEEPING;
}
- priv->can.state = CAN_STATE_SLEEPING;
return 0;
}
@@ -2278,7 +2283,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
@@ -2292,12 +2296,20 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
- err = flexcan_chip_start(dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
return err;
+ err = flexcan_chip_start(dev);
+ if (err) {
+ flexcan_transceiver_disable(priv);
+ return err;
+ }
+
flexcan_chip_interrupts_enable(dev);
}
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
return 0;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d025d4163fd1..884a6352c42b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -2420,12 +2420,11 @@ int m_can_class_register(struct m_can_classdev *cdev)
if (!cdev->net->irq) {
dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- cdev->hrtimer.function = &hrtimer_callback;
+ hrtimer_setup(&cdev->hrtimer, &hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
} else {
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdev->hrtimer.function = m_can_coalescing_timer;
+ hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = m_can_dev_setup(cdev);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index df1a5d0b37b2..aa3df0d05b85 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -787,22 +787,14 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
}
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
- u32 ch)
+ u32 ch, u32 rule_entry)
{
- u32 cfg;
- int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (ch == 0) {
- start = 0; /* Channel 0 always starts from 0th rule */
- } else {
- /* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
- start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
- }
-
/* Enable write access to entry */
- page = RCANFD_GAFL_PAGENUM(start);
+ page = RCANFD_GAFL_PAGENUM(rule_entry);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
(RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
@@ -818,13 +810,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
offset = RCANFD_C_GAFL_OFFSET;
/* Accept all IDs */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0);
/* IDE or RTR is not considered for matching */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0);
/* Any data length accepted */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index),
RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
@@ -1851,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
unsigned long channels_mask = 0;
int err, ch_irq, g_irq;
int g_err_irq, g_recc_irq;
+ u32 rule_entry = 0;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
int i;
@@ -2023,7 +2016,8 @@ static int rcar_canfd_probe(struct platform_device *pdev)
rcar_canfd_configure_tx(gpriv, ch);
/* Configure receive rules */
- rcar_canfd_configure_afl_rules(gpriv, ch);
+ rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
+ rule_entry += RCANFD_CHANNEL_NUMRULES;
}
/* Configure common interrupts */
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 7209a831f0f2..c34f2067a989 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -541,11 +541,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
priv->rx_ring_num = i;
- hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
+ hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
+ hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return 0;
}
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 39a63b7313a4..07406daf7c88 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -186,7 +186,7 @@ union ucan_ctl_payload {
*/
struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
- u8 raw[128];
+ u8 fw_str[128];
} __packed;
enum {
@@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up,
UCAN_USB_CTL_PIPE_TIMEOUT);
}
-static int ucan_device_request_in(struct ucan_priv *up,
- u8 cmd, u16 subcmd, u16 datalen)
+static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size)
{
- return usb_control_msg(up->udev,
- usb_rcvctrlpipe(up->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- subcmd,
- 0,
- up->ctl_msg_buffer,
- datalen,
- UCAN_USB_CTL_PIPE_TIMEOUT);
+ int ret;
+
+ ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0),
+ UCAN_DEVICE_GET_FW_STRING,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, 0, fw_str, size - 1,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+ if (ret > 0)
+ fw_str[ret] = '\0';
+ else
+ strscpy(fw_str, "unknown", size);
}
/* Parse the device information structure reported by the device and
@@ -1314,7 +1316,6 @@ static int ucan_probe(struct usb_interface *intf,
u8 in_ep_addr;
u8 out_ep_addr;
union ucan_ctl_payload *ctl_msg_buffer;
- char firmware_str[sizeof(union ucan_ctl_payload) + 1];
udev = interface_to_usbdev(intf);
@@ -1527,17 +1528,6 @@ static int ucan_probe(struct usb_interface *intf,
*/
ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
- /* just print some device information - if available */
- ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
- sizeof(union ucan_ctl_payload));
- if (ret > 0) {
- /* copy string while ensuring zero termination */
- strscpy(firmware_str, up->ctl_msg_buffer->raw,
- sizeof(union ucan_ctl_payload) + 1);
- } else {
- strcpy(firmware_str, "unknown");
- }
-
/* device is compatible, reset it */
ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
if (ret < 0)
@@ -1555,7 +1545,10 @@ static int ucan_probe(struct usb_interface *intf,
/* initialisation complete, log device info */
netdev_info(up->netdev, "registered device\n");
- netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+ ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str,
+ sizeof(up->ctl_msg_buffer->fw_str));
+ netdev_info(up->netdev, "firmware string: %s\n",
+ up->ctl_msg_buffer->fw_str);
/* success */
return 0;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 68d1e891752b..5db96ca52505 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2208,13 +2208,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return err;
}
-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
+static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid,
+ u16 *fid, struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_atu_entry entry;
struct mv88e6xxx_vtu_entry vlan;
- u16 fid;
int err;
/* Ports have two private address databases: one for when the port is
@@ -2225,7 +2223,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
* VLAN ID into the port's database used for VLAN-unaware bridging.
*/
if (vid == 0) {
- fid = MV88E6XXX_FID_BRIDGED;
+ *fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -2235,14 +2233,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid)
return -EOPNOTSUPP;
- fid = vlan.fid;
+ *fid = vlan.fid;
}
- entry.state = 0;
- ether_addr_copy(entry.mac, addr);
- eth_addr_dec(entry.mac);
+ entry->state = 0;
+ ether_addr_copy(entry->mac, addr);
+ eth_addr_dec(entry->mac);
+
+ return mv88e6xxx_g1_atu_getnext(chip, *fid, entry);
+}
+
+static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
- err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
+ if (err)
+ return false;
+
+ return entry.state && ether_addr_equal(entry.mac, addr);
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
if (err)
return err;
@@ -2846,6 +2869,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, addr, vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -6614,6 +6644,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7b8b5b39c7bb..55f553debd3b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2038,6 +2038,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -2164,6 +2165,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
false);
if (!frag_len)
goto oom_next_rx;
+
}
xdp_active = true;
}
@@ -2173,6 +2175,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
goto next_rx;
}
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
if (len <= bp->rx_copybreak) {
@@ -2210,7 +2218,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -15375,6 +15384,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
struct bnxt_cp_ring_info *cpr;
u64 *sw;
+ if (!bp->bnapi)
+ return;
+
cpr = &bp->bnapi[i]->cp_ring;
sw = cpr->stats.sw_stats;
@@ -15398,6 +15410,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
struct bnxt_napi *bnapi;
u64 *sw;
+ if (!bp->tx_ring)
+ return;
+
bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
sw = bnapi->cp_ring.stats.sw_stats;
@@ -15439,6 +15454,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
struct bnxt_ring_struct *ring;
int rc;
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
rxr = &bp->rx_ring[idx];
clone = qmem;
memcpy(clone, rxr, sizeof(*rxr));
@@ -15521,6 +15539,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
struct bnxt_ring_struct *ring;
bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
xdp_rxq_info_unreg(&rxr->xdp_rxq);
@@ -15632,7 +15651,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
cpr = &rxr->bnapi->cp_ring;
cpr->sw_stats->rx.rx_resets++;
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i <= bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
@@ -15660,7 +15679,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
struct bnxt_vnic_info *vnic;
int i;
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i <= bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
vnic->mru = 0;
bnxt_hwrm_vnic_update(bp, vnic,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index e6c64e4bd66c..299822cacca4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -460,23 +460,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1)
+ struct page_pool *pool, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb)
return NULL;
- skb_checksum_none_assert(skb);
- if (RX_CMP_L4_CS_OK(rxcmp1)) {
- if (bp->dev->features & NETIF_F_RXCSUM) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = RX_CMP_ENCAP(rxcmp1);
- }
- }
+
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
+ BNXT_RX_PAGE_SIZE * num_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0122782400b8..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
- struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1);
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 991e3839858b..2b4bb74f21bf 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1833,9 +1833,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 44af1d13d931..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7f6b57432071..fe4e7f99b6a3 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -739,8 +739,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
- fep->perout_timer.function = fec_ptp_pps_perout_handler;
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a376d4bdf281..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,7 +942,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
of_node_put(arg.np);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0676fc547b6f..f9ba79c1165b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 7cee365cc7d1..405ddd17de1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index d649c197cf67..ed21d7f55ac1 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -49,9 +49,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (vlan_ops->dis_rx_filtering(uplink_vsi))
goto err_vlan_filtering;
- if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_uplink;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
@@ -63,8 +60,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
err_up:
ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
err_vlan_filtering:
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
@@ -275,7 +270,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
ICE_FLTR_TX);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 1ccb572ce285..22371011c249 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1001,6 +1001,28 @@ static void ice_lag_link(struct ice_lag *lag)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_lag_unlink - handle unlink event
* @lag: LAG info struct
*/
@@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
ice_lag_move_vf_nodes(lag, act_port, pri_port);
lag->primary = false;
lag->active_port = ICE_LAG_INVALID_PORT;
+
+ /* Config primary's eswitch back to normal operation. */
+ ice_lag_config_eswitch(lag, lag->netdev);
} else {
struct ice_lag *primary_lag;
@@ -1419,6 +1444,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
ice_lag_move_vf_nodes(lag, prim_port,
event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
return;
}
@@ -1428,6 +1454,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
/* new active port */
ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
} else {
/* port not set as currently active (e.g. new active port
* has already claimed the nodes and filters
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 38a1c8372180..d0faa087793d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3937,24 +3937,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index eabb35834a24..b4c9cb28a016 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -105,10 +105,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c3a0fb97c5ee..e13bd5a6cb6c 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5065,16 +5065,16 @@ static int ice_init_devlink(struct ice_pf *pf)
return err;
ice_devlink_init_regions(pf);
- ice_health_init(pf);
ice_devlink_register(pf);
+ ice_health_init(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
- ice_devlink_unregister(pf);
ice_health_deinit(pf);
+ ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9c9ea4c1b93b..380ba1e8b3b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2424,7 +2424,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 84307bb7313e..733820a0c350 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7090,8 +7090,8 @@ static int igc_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, igc_reset_task);
INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
- hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+ hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index dd76c1b7ed3a..3c7b43712d25 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6985,9 +6985,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->timer_scheduled = false;
port_pcpu->dev = dev;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index bcc96eed2481..66749b3649c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev,
spin_lock_init(&ptp->ptp_lock);
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ptp->hrtimer.function = ptp_reset_thresh;
+ hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 98d4306929f3..a2cf3e79693d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -46,6 +46,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 5d128c5b4529..0f5d7ea8956f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower, iter) {
- struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
-
if (!mlx5e_eswitch_rep(lower))
continue;
- priv = netdev_priv(lower);
- mdev = priv->mdev;
- if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ if (mlx5_esw_bridge_dev_same_esw(lower, esw))
return lower;
}
@@ -125,7 +120,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *
priv = netdev_priv(rep);
mdev = priv->mdev;
if (netif_is_lag_master(dev))
- return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return mlx5_lag_is_master(mdev);
return true;
}
@@ -455,6 +450,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!rep)
return NOTIFY_DONE;
+ if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
+ return NOTIFY_DONE;
+
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = container_of(info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a814b63ed97e..8fcaee381b0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5132,11 +5132,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
- int err;
- err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
- if (err)
- return err;
+ if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
+ return -EOPNOTSUPP;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 2b229b6226c6..dfb079e59d85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -871,8 +871,8 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
+ struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 1477db7f5307..2691d88cdee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -175,7 +175,7 @@ unlock:
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq);
int cpu;
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index cea5aa314f6c..ed2ba272946b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -951,7 +951,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
{
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
@@ -1038,7 +1038,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
if (do_bond && !__mlx5_lag_is_active(ldev)) {
- bool shared_fdb = mlx5_shared_fdb_supported(ldev);
+ bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev);
roce_lag = mlx5_lag_is_roce_lag(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 01cf72366947..c2f256bb2bc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -92,6 +92,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index ffac0bd6c895..1770297a112e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -83,7 +83,8 @@ static int enable_mpesw(struct mlx5_lag *ldev)
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
- !mlx5_lag_check_prereq(ldev))
+ !mlx5_lag_check_prereq(ldev) ||
+ !mlx5_lag_shared_fdb_supported(ldev))
return -EOPNOTSUPP;
err = mlx5_mpesw_metadata_set(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index a80ecb672f33..711d14dea248 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -196,6 +196,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
+ if (!ns) {
+ mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = chains->group_num;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 0881e961d8b1..586688da9940 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -10,12 +10,15 @@
struct mlx5_irq;
struct cpu_rmap;
+struct mlx5_irq_pool;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
@@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
int mlx5_irq_get_irq(const struct mlx5_irq *irq);
-struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index d9362eabc6a1..2c5f850c31f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -378,6 +378,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
return irq->map.index;
}
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
+{
+ return irq->pool;
+}
+
/* irq_pool API */
/* requesting an irq from a given pool according to given index */
@@ -405,18 +410,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab
return irq_table->sf_ctrl_pool;
}
-static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+static struct mlx5_irq_pool *
+sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
{
return irq_table->sf_comp_pool;
}
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = NULL;
if (mlx5_core_is_sf(dev))
- pool = sf_irq_pool_get(irq_table);
+ pool = sf_comp_irq_pool_get(irq_table);
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
* the PF IRQs pool in case the SF pool doesn't exist.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index c4d377f8df30..cc064425fe16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -28,7 +28,6 @@ struct mlx5_irq_pool {
struct mlx5_core_dev *dev;
};
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
{
return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
@@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int mlx5_irq_get_locked(struct mlx5_irq *irq);
int mlx5_irq_read_locked(struct mlx5_irq *irq);
int mlx5_irq_put(struct mlx5_irq *irq);
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq);
#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index f9f569131dde..47f7ed141553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -24,8 +24,8 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ u32 priority;
u8 num_of_at;
- u16 priority;
u8 size_log;
atomic_t num_of_rules;
struct list_head *rules;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 5f409dc30aca..3d5afc832fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -210,6 +210,10 @@ struct mlx5dr_ste_ctx {
void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
u8 *scnd_d_action, u32 reformat_id,
int size);
+ void (*set_insert_hdr)(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+ void (*set_remove_hdr)(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 7f83d77c43ef..6447efbae00d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -266,10 +266,10 @@ void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -286,9 +286,9 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -584,11 +584,11 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
@@ -597,10 +597,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -792,11 +792,11 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -808,10 +808,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
allow_modify_hdr = true;
allow_ctr = true;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -2200,6 +2200,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_pop_vlan = &dr_ste_v1_set_pop_vlan,
.set_rx_decap = &dr_ste_v1_set_rx_decap,
.set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index a8d9e308d339..591c20c95a6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -156,6 +156,10 @@ void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
u32 reformat_id, int size);
void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 0882dba0f64b..d0ebaf820d42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -69,6 +69,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_pop_vlan = &dr_ste_v1_set_pop_vlan,
.set_rx_decap = &dr_ste_v1_set_rx_decap,
.set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
index cc60ce1d274e..e468a9ae44e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -79,6 +79,46 @@ static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v3_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, u8 anchor,
+ u8 offset, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ attributes, DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset, int size)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, size / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_offset, offset / 2);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
static int
dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
u8 *hw_action, u32 hw_action_sz,
@@ -211,6 +251,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v3 = {
.set_pop_vlan = &dr_ste_v3_set_pop_vlan,
.set_rx_decap = &dr_ste_v3_set_rx_decap,
.set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v3_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v3_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 138ac58fae51..f713656f1fae 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -375,6 +375,6 @@ irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
void sparx5_port_inj_timer_setup(struct sparx5_port *port)
{
- hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->inj_timer.function = sparx5_injection_timeout;
+ hrtimer_setup(&port->inj_timer, sparx5_injection_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index be95336ce089..638ef64d639f 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -134,9 +134,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -148,12 +149,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -1547,6 +1553,7 @@ unmap_bar:
* adapter-MTU file and apc->mana_pci_debugfs folder.
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1569,6 +1576,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
@@ -1622,6 +1631,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_disable_device(pdev);
}
@@ -1648,8 +1659,10 @@ static int __init mana_driver_init(void)
mana_debugfs_root = debugfs_create_dir("mana", NULL);
err = pci_register_driver(&mana_driver);
- if (err)
+ if (err) {
debugfs_remove(mana_debugfs_root);
+ mana_debugfs_root = NULL;
+ }
return err;
}
@@ -1659,6 +1672,8 @@ static void __exit mana_driver_exit(void)
pci_unregister_driver(&mana_driver);
debugfs_remove(mana_debugfs_root);
+
+ mana_debugfs_root = NULL;
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index aa1e47233fe5..ae76ecc7a5d3 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -738,12 +738,11 @@ static const struct net_device_ops mana_devops = {
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
/*
- * at this point all dir/files under the vport directory
- * are already cleaned up.
- * We are sure the apc->mana_port_debugfs remove will not
- * cause any freed memory access issues
+ * make sure subsequent cleanup attempts don't end up removing already
+ * cleaned dentry pointer
*/
debugfs_remove(apc->mana_port_debugfs);
+ apc->mana_port_debugfs = NULL;
kfree(apc->rxqs);
apc->rxqs = NULL;
}
@@ -1254,6 +1253,7 @@ static void mana_destroy_eq(struct mana_context *ac)
return;
debugfs_remove_recursive(ac->mana_eqs_debugfs);
+ ac->mana_eqs_debugfs = NULL;
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
@@ -1914,6 +1914,7 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+ apc->tx_qp[i].mana_tx_debugfs = NULL;
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
@@ -2099,6 +2100,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
return;
debugfs_remove_recursive(rxq->mana_rx_debugfs);
+ rxq->mana_rx_debugfs = NULL;
napi = &rxq->rx_cq.napi;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f9dd50152b1e..28d24d59efb8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
- if (!sriov->allowed_vlans)
+ if (!sriov->allowed_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
vlans = (u16 *)&cmd->rsp.arg[3];
for (i = 0; i < num_vlans; i++)
@@ -2167,8 +2169,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
- if (!vf->sriov_vlans)
+ if (!vf->sriov_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a5e3d1a88305..8b4640c5d61e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -686,8 +686,8 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
- hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ hrtimer_setup(&port->hrtimer, rmnet_map_flush_tx_packet_queue, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->agg_lock);
rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 3bd11cb56294..2aacc1996796 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1501,7 +1501,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev)
static void rtase_sw_reset(struct net_device *dev)
{
struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_ring *ring, *tmp;
+ struct rtase_int_vector *ivec;
int ret;
+ u32 i;
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1512,6 +1515,13 @@ static void rtase_sw_reset(struct net_device *dev)
rtase_tx_clear(tp);
rtase_rx_clear(tp);
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry)
+ list_del(&ring->ring_entry);
+ }
+
ret = rtase_init_ring(dev);
if (ret) {
netdev_err(dev, "unable to init ring\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index bd4eb187f8c6..b5a7e05ab7a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -46,7 +46,9 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
u32 a_index = 0;
if (!plat_dat->axi) {
- plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+ plat_dat->axi = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_axi),
+ GFP_KERNEL);
if (!plat_dat->axi)
return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0ae7db96f46..554d2c0a8fde 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3199,8 +3199,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -6970,8 +6969,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2806238629f8..4a8e6b9413e3 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2306,13 +2306,17 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
int i, ret = 0;
for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ tx_chn = &common->tx_chns[i];
+
+ hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
+ netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll);
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -2323,19 +2327,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
tx_chn->id, tx_chn->irq, ret);
goto err;
}
-
- netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
- am65_cpsw_nuss_tx_poll);
}
return 0;
err:
- for (--i ; i >= 0 ; i--) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
-
- netif_napi_del(&tx_chn->napi_tx);
+ netif_napi_del(&tx_chn->napi_tx);
+ for (--i; i >= 0; i--) {
+ tx_chn = &common->tx_chns[i];
devm_free_irq(dev, tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
}
return ret;
@@ -2565,9 +2566,11 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
- hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+ hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
ret = devm_request_irq(dev, flow->irq,
am65_cpsw_nuss_rx_irq,
@@ -2577,11 +2580,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
flow->irq = -EINVAL;
- goto err_flow;
+ goto err_request_irq;
}
-
- netif_napi_add(common->dma_ndev, &flow->napi_rx,
- am65_cpsw_nuss_rx_poll);
}
/* setup classifier to route priorities to flows */
@@ -2589,11 +2589,14 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
return 0;
+err_request_irq:
+ netif_napi_del(&flow->napi_rx);
+
err_flow:
- for (--i; i >= 0 ; i--) {
+ for (--i; i >= 0; i--) {
flow = &rx_chn->flows[i];
- netif_napi_del(&flow->napi_rx);
devm_free_irq(dev, flow->irq, flow);
+ netif_napi_del(&flow->napi_rx);
}
err:
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 74f0f200a89d..6c1b8ff563e0 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -249,9 +249,8 @@ int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
ret = request_irq(tx_chn->irq, prueth_tx_irq,
IRQF_TRIGGER_HIGH, tx_chn->name,
tx_chn);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 00ed97860547..4d496c8f479d 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1169,9 +1169,8 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
- hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- emac->rx_hrtimer.function = &emac_rx_timer_callback;
+ hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
prueth->emac[mac] = emac;
return 0;
@@ -1679,6 +1678,7 @@ static int prueth_probe(struct platform_device *pdev)
}
spin_lock_init(&prueth->vtbl_lock);
+ spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 329b46e9ee53..f41786b05741 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -305,6 +305,8 @@ struct prueth {
int default_vlan;
/** @vtbl_lock: Lock for vtbl in shared memory */
spinlock_t vtbl_lock;
+ /** @stats_lock: Lock for reading icssg stats */
+ spinlock_t stats_lock;
};
struct emac_tx_ts_response {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 8800bd3a8d07..6f0edae38ea2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -26,6 +26,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
u32 val, reg;
int i;
+ spin_lock(&prueth->stats_lock);
+
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
@@ -51,6 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
emac->pa_stats[i] += val;
}
}
+
+ spin_unlock(&prueth->stats_lock);
}
void icssg_stats_work_handler(struct work_struct *work)
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f632b0cfd5ae..fd91f8a45bce 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -776,8 +776,8 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp,
state->trx.tx_buf = state->buf;
state->trx.rx_buf = state->buf;
spi_message_add_tail(&state->trx, &state->msg);
- hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- state->timer.function = at86rf230_async_state_timer;
+ hrtimer_setup(&state->timer, at86rf230_async_state_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static irqreturn_t at86rf230_isr(int irq, void *data)
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index e3dcdeacc12c..d74d47dd6e04 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -583,6 +583,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
struct mctp_i2c_hdr *hdr;
struct mctp_hdr *mhdr;
u8 lldst, llsrc;
+ int rc;
if (len > MCTP_I2C_MAXMTU)
return -EMSGSIZE;
@@ -593,6 +594,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
lldst = *((u8 *)daddr);
llsrc = *((u8 *)saddr);
+ rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr));
+ if (rc)
+ return rc;
+
skb_push(skb, sizeof(struct mctp_i2c_hdr));
skb_reset_mac_header(skb);
hdr = (void *)skb_mac_header(skb);
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index c1e72253063b..c678f79aa356 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -506,10 +506,15 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned int len)
{
struct mctp_i3c_internal_hdr *ihdr;
+ int rc;
if (!daddr || !saddr)
return -EINVAL;
+ rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr));
+ if (rc)
+ return rc;
+
skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
skb_reset_mac_header(skb);
ihdr = (void *)skb_mac_header(skb);
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 34231b5b9175..e9fc54517449 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -22,6 +22,11 @@
#define PHY_ID_TJA_1103 0x001BB010
#define PHY_ID_TJA_1120 0x001BB031
+#define VEND1_DEVICE_ID3 0x0004
+#define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12)
+#define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8)
+#define DEVICE_ID3_SAMPLE_TYPE_R 0x9
+
#define VEND1_DEVICE_CONTROL 0x0040
#define DEVICE_CONTROL_RESET BIT(15)
#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
@@ -109,6 +114,9 @@
#define MII_BASIC_CONFIG_RMII 0x5
#define MII_BASIC_CONFIG_MII 0x4
+#define VEND1_SGMII_BASIC_CONTROL 0xB000
+#define SGMII_LPM BIT(11)
+
#define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
#define EXTENDED_CNT_EN BIT(15)
#define VEND1_MONITOR_STATUS 0xAC80
@@ -1593,6 +1601,63 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev)
return 0;
}
+/* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
+static void nxp_c45_tja1120_errata(struct phy_device *phydev)
+{
+ bool macsec_ability, sgmii_ability;
+ int silicon_version, sample_type;
+ int phy_abilities;
+ int ret = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
+ if (ret < 0)
+ return;
+
+ sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
+ if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
+ return;
+
+ silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+ sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
+ if ((!macsec_ability && silicon_version == 2) ||
+ (macsec_ability && silicon_version == 1)) {
+ /* TJA1120/TJA1121 PHY configuration errata workaround.
+ * Apply PHY writes sequence before link up.
+ */
+ if (!macsec_ability) {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
+ } else {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
+ }
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
+
+ if (sgmii_ability) {
+ /* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
+ * Put SGMII PCS into power down mode and back up.
+ */
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ }
+ }
+}
+
static int nxp_c45_config_init(struct phy_device *phydev)
{
int ret;
@@ -1609,6 +1674,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
+ if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
+ nxp_c45_tja1120_errata(phydev);
+
phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
PHY_CONFIG_AUTO);
diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
index 4a5d73002a1a..0e9e987f37dd 100644
--- a/drivers/net/phy/phy_link_topology.c
+++ b/drivers/net/phy/phy_link_topology.c
@@ -73,7 +73,7 @@ int phy_link_topo_add_phy(struct net_device *dev,
xa_limit_32b, &topo->next_phy_index,
GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err;
return 0;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d5c47a2a62dc..34e82f1e37d9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -833,8 +833,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->dev = dev;
- hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
+ hrtimer_setup(&ctx->tx_timer, &cdc_ncm_tx_timer_cb, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
tasklet_setup(&ctx->bh, cdc_ncm_txpath_bh);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a91bf9c7e31d..137adf6d5b08 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -627,7 +627,7 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
@@ -658,7 +658,7 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7646ddd9bef7..9d7c37e968b5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3826,7 +3826,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
cpumask_var_t mask;
int stragglers;
int group_size;
- int i, j, cpu;
+ int i, start = 0, cpu;
int num_cpu;
int stride;
@@ -3840,16 +3840,18 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
- for (j = 0; j < group_size; j++) {
+ for_each_online_cpu_wrap(cpu, start) {
+ if (!group_size--) {
+ start = cpu;
+ break;
+ }
cpumask_set_cpu(cpu, mask);
- cpu = cpumask_next_wrap(cpu, cpu_online_mask,
- nr_cpu_ids, false);
}
+
virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask);
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 49c8507d1a6b..47854a36413e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -403,6 +403,8 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans)
iwl_trans_pcie_op_mode_leave(trans);
+ cancel_work_sync(&trans->restart.wk);
+
trans->op_mode = NULL;
trans->state = IWL_TRANS_NO_FW;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index df49dd2e2026..d10877856049 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -422,6 +422,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
+ iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
&mvm->fw->ucode_capa);
if (ret) {
@@ -430,8 +432,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return ret;
}
- iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
-
/*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 7b6071a59b69..7c1dd5cc084a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1869,12 +1869,12 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
unsigned int offset)
{
struct sg_table *sgt;
- unsigned int n_segments;
+ unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;
+ int orig_nents;
if (WARN_ON_ONCE(skb_has_frag_list(skb)))
return NULL;
- n_segments = DIV_ROUND_UP(skb->len - offset, skb_shinfo(skb)->gso_size);
*hdr = iwl_pcie_get_page_hdr(trans,
hdr_room + __alignof__(struct sg_table) +
sizeof(struct sg_table) +
@@ -1889,11 +1889,12 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sg_init_table(sgt->sgl, n_segments);
/* Only map the data, not the header (it is copied to the TSO page) */
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, offset,
- skb->len - offset);
- if (WARN_ON_ONCE(sgt->orig_nents <= 0))
+ orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);
+ if (WARN_ON_ONCE(orig_nents <= 0))
return NULL;
+ sgt->orig_nents = orig_nents;
+
/* And map the entire SKB */
if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
return NULL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 0e1ede9314d8..4840d0b500b3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -264,8 +264,8 @@ void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
};
dev->beacon_ops = &beacon_ops;
- hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
+ hrtimer_setup(&dev->pre_tbtt_timer, mt76x02u_pre_tbtt_interrupt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
mt76x02_init_beacon_config(dev);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index 5323acff962a..45775ecdf221 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -842,7 +842,7 @@ int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Set txstatus timer function.
*/
- rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
+ hrtimer_update_function(&rt2x00dev->txstatus_timer, rt2800mmio_tx_sta_fifo_timeout);
/*
* Overwrite TX done handler
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 160bef79acdb..b51a23300ba2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -618,7 +618,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Set txstatus timer function.
*/
- rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
+ hrtimer_update_function(&rt2x00dev->txstatus_timer, rt2800usb_tx_sta_fifo_timeout);
/*
* Overwrite TX done handler
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 9e7d9dbe954c..432ddfac2c33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1391,8 +1391,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
mutex_init(&rt2x00dev->conf_mutex);
INIT_LIST_HEAD(&rt2x00dev->bar_list);
spin_lock_init(&rt2x00dev->bar_list_lock);
- hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
+ hrtimer_setup(&rt2x00dev->txstatus_timer, hrtimer_dummy_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index cf6a331d4042..fb187a9e984e 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -5548,10 +5548,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
for (i = 0; i < ARRAY_SIZE(data->link_data); i++) {
- hrtimer_init(&data->link_data[i].beacon_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_SOFT);
- data->link_data[i].beacon_timer.function =
- mac80211_hwsim_beacon;
+ hrtimer_setup(&data->link_data[i].beacon_timer, mac80211_hwsim_beacon,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS_SOFT);
data->link_data[i].link_id = i;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 829515a601b3..530a3ea47a1a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -1381,24 +1381,20 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
/* The phase is set to power off. */
ipc_imem->phase = IPC_P_OFF;
- hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
+ hrtimer_setup(&ipc_imem->startup_timer, ipc_imem_startup_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
+ hrtimer_setup(&ipc_imem->tdupdate_timer, ipc_imem_td_update_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
+ hrtimer_setup(&ipc_imem->fast_update_timer, ipc_imem_fast_update_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+ hrtimer_setup(&ipc_imem->td_alloc_timer, ipc_imem_td_alloc_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+ hrtimer_setup(&ipc_imem->adb_timer, ipc_imem_adb_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 6295e55ef85e..368f6d894bba 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -106,10 +106,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
if (!ntb->msi)
return -EINVAL;
- msi_lock_descs(&ntb->pdev->dev);
- desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
- addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
- msi_unlock_descs(&ntb->pdev->dev);
+ scoped_guard (msi_descs_lock, &ntb->pdev->dev) {
+ desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
+ addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
+ }
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
@@ -289,7 +289,7 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
if (!ntb->msi)
return -EINVAL;
- msi_lock_descs(dev);
+ guard(msi_descs_lock)(dev);
msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
if (irq_has_action(entry->irq))
continue;
@@ -307,17 +307,11 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
if (ret) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
- goto unlock;
+ return ret;
}
-
- ret = entry->irq;
- goto unlock;
+ return entry->irq;
}
- ret = -ENODEV;
-
-unlock:
- msi_unlock_descs(dev);
- return ret;
+ return -ENODEV;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 8aeca7914050..1c1c74f4ff2d 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -284,8 +284,7 @@ static struct pp_ctx *pp_create_data(struct ntb_dev *ntb)
pp->ntb = ntb;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->lock);
- hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pp->timer.function = pp_timer_func;
+ hrtimer_setup(&pp->timer, pp_timer_func, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return pp;
}
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index a060f69558e7..8971aca41e63 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
}
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
- !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ !blk_mq_add_to_batch(req, iob,
+ nvme_req(req)->status != NVME_SC_SUCCESS,
apple_nvme_complete_batch))
apple_nvme_complete_rq(req);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f028913e2e62..8359d0aa0e44 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -431,6 +431,12 @@ static inline void nvme_end_req_zoned(struct request *req)
static inline void __nvme_end_req(struct request *req)
{
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+ if (blk_rq_is_passthrough(req))
+ nvme_log_err_passthru(req);
+ else
+ nvme_log_error(req);
+ }
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
@@ -441,12 +447,6 @@ void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
- if (blk_rq_is_passthrough(req))
- nvme_log_err_passthru(req);
- else
- nvme_log_error(req);
- }
__nvme_end_req(req);
blk_mq_end_request(req, status);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 640590b21728..3ad7f197c808 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1130,8 +1130,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
- !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
- nvme_pci_complete_batch))
+ !blk_mq_add_to_batch(req, iob,
+ nvme_req(req)->status != NVME_SC_SUCCESS,
+ nvme_pci_complete_batch))
nvme_pci_complete_rq(req);
}
@@ -1411,9 +1412,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd = { };
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;
+ /*
+ * Shutdown the device immediately if we see it is disconnected. This
+ * unblocks PCIe error handling if the nvme driver is waiting in
+ * error_resume for a device that has been removed. We can't unbind the
+ * driver while the driver's error callback is waiting to complete, so
+ * we're relying on a timeout to break that deadlock if a removal
+ * occurs while reset work is running.
+ */
+ if (pci_dev_is_disconnected(pdev))
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
if (nvme_state_terminal(&dev->ctrl))
goto disable;
@@ -1421,7 +1433,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* the recovery mechanism will surely fail.
*/
mb();
- if (pci_channel_offline(to_pci_dev(dev->dev)))
+ if (pci_channel_offline(pdev))
return BLK_EH_RESET_TIMER;
/*
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 565d2bd36dcd..b1e31483f157 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1265,15 +1265,12 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;
- if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
if (!(flags & NVME_QUEUE_PHYS_CONTIG))
return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
- if (flags & NVME_CQ_IRQ_ENABLED)
- set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
-
cq->pci_addr = pci_addr;
cq->qid = cqid;
cq->depth = qsize + 1;
@@ -1290,24 +1287,27 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
cq->qes = ctrl->io_cqes;
cq->pci_size = cq->qes * cq->depth;
- cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
- if (!cq->iv) {
- status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
- goto err;
+ if (flags & NVME_CQ_IRQ_ENABLED) {
+ cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
+ if (!cq->iv)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
}
status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
if (status != NVME_SC_SUCCESS)
goto err;
+ set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
cqid, qsize, cq->qes, cq->vector);
return NVME_SC_SUCCESS;
err:
- clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
- clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+ if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
return status;
}
@@ -1333,7 +1333,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
u16 status;
- if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
if (!(flags & NVME_QUEUE_PHYS_CONTIG))
@@ -1355,7 +1355,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
if (status != NVME_SC_SUCCESS)
- goto out_clear_bit;
+ return status;
sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
@@ -1365,6 +1365,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
goto out_destroy_sq;
}
+ set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
+
dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
sqid, qsize, sq->qes);
@@ -1372,8 +1374,6 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
out_destroy_sq:
nvmet_sq_destroy(&sq->nvme_sq);
-out_clear_bit:
- clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
return status;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2fbd379923fd..5c3054aaec8c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -203,6 +203,12 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
+
If unsure, say N.
config PCI_LABEL
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6084b38bdda1..178da6b9fc33 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
@@ -3976,24 +3975,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
- int ret = 0;
if (!pdev->msi_enabled && !pdev->msix_enabled)
return 0;
- msi_lock_descs(&pdev->dev);
+ guard(msi_descs_lock)(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data)) {
- ret = -EINVAL;
- break;
- }
-
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
hv_compose_msi_msg(irq_data, &entry->msg);
}
- msi_unlock_descs(&pdev->dev);
-
- return ret;
+ return 0;
}
/*
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 9d9596947350..e619accca49d 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -17,6 +17,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index b956ce591f96..d89f491afdf0 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msi_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msix_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 2f647cac4cae..d74162880d83 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -10,12 +10,12 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
int pci_msi_enable = 1;
-int pci_msi_ignore_mask;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -285,6 +285,8 @@ static void pci_msi_set_enable(struct pci_dev *dev, int enable)
static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
struct irq_affinity_desc *masks)
{
+ const struct irq_domain *d = dev_get_msi_domain(&dev->dev);
+ const struct msi_domain_info *info = d->host_data;
struct msi_desc desc;
u16 control;
@@ -295,8 +297,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
- /* Respect XEN's mask disabling */
- if (pci_msi_ignore_mask)
+ if (info->flags & MSI_FLAG_NO_MASK)
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
@@ -336,41 +337,11 @@ static int msi_verify_entries(struct pci_dev *dev)
return !entry ? 0 : -EIO;
}
-/**
- * msi_capability_init - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: number of interrupts to allocate
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
- *
- * Setup the MSI capability structure of the device with the requested
- * number of interrupts. A return value of zero indicates the successful
- * setup of an entry with the new MSI IRQ. A negative return value indicates
- * an error, and a positive return value indicates the number of interrupts
- * which could have been allocated.
- */
-static int msi_capability_init(struct pci_dev *dev, int nvec,
- struct irq_affinity *affd)
+static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *masks = NULL;
+ int ret = msi_setup_msi_desc(dev, nvec, masks);
struct msi_desc *entry, desc;
- int ret;
-
- /* Reject multi-MSI early on irq domain enabled architectures */
- if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
- return 1;
- /*
- * Disable MSI during setup in the hardware, but mark it enabled
- * so that setup code can evaluate it.
- */
- pci_msi_set_enable(dev, 0);
- dev->msi_enabled = 1;
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
-
- msi_lock_descs(&dev->dev);
- ret = msi_setup_msi_desc(dev, nvec, masks);
if (ret)
goto fail;
@@ -399,19 +370,48 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
pcibios_free_irq(dev);
dev->irq = entry->irq;
- goto unlock;
-
+ return 0;
err:
pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
fail:
dev->msi_enabled = 0;
-unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
return ret;
}
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ struct irq_affinity *affd)
+{
+ /* Reject multi-MSI early on irq domain enabled architectures */
+ if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
+ return 1;
+
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+ dev->msi_enabled = 1;
+
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msi_capability_init(dev, nvec, masks);
+}
+
int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -604,12 +604,15 @@ static void __iomem *msix_map_region(struct pci_dev *dev,
*/
void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
{
+ const struct irq_domain *d = dev_get_msi_domain(&dev->dev);
+ const struct msi_domain_info *info = d->host_data;
+
desc->nvec_used = 1;
desc->pci.msi_attrib.is_msix = 1;
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
- desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
+ desc->pci.msi_attrib.can_mask = !(info->flags & MSI_FLAG_NO_MASK) &&
!desc->pci.msi_attrib.is_virtual;
if (desc->pci.msi_attrib.can_mask) {
@@ -659,47 +662,45 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
- if (pci_msi_ignore_mask)
- return;
-
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
-static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity *affd)
+static int __msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *masks = NULL;
- int ret;
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+ int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
- msi_lock_descs(&dev->dev);
- ret = msix_setup_msi_descs(dev, entries, nvec, masks);
if (ret)
- goto out_free;
+ goto fail;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto out_free;
+ goto fail;
/* Check if all MSI entries honor device restrictions */
ret = msi_verify_entries(dev);
if (ret)
- goto out_free;
+ goto fail;
msix_update_entries(dev, entries);
- goto out_unlock;
+ return 0;
-out_free:
+fail:
pci_free_msi_irqs(dev);
-out_unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
return ret;
}
+static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity *affd)
+{
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msix_setup_interrupts(dev, entries, nvec, masks);
+}
+
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -714,6 +715,8 @@ out_unlock:
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd)
{
+ const struct irq_domain *d = dev_get_msi_domain(&dev->dev);
+ const struct msi_domain_info *info = d->host_data;
int ret, tsize;
u16 control;
@@ -744,15 +747,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
- /*
- * Ensure that all table entries are masked to prevent
- * stale entries from firing in a crash kernel.
- *
- * Done late to deal with a broken Marvell NVME device
- * which takes the MSI-X mask bits into account even
- * when MSI-X is disabled, which prevents MSI delivery.
- */
- msix_mask_all(dev->msix_base, tsize);
+ if (!(info->flags & MSI_FLAG_NO_MASK)) {
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(dev->msix_base, tsize);
+ }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
@@ -871,13 +876,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);
- msi_lock_descs(&dev->dev);
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
- if (write_msg)
- __pci_write_msi_msg(entry, &entry->msg);
- pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ scoped_guard (msi_descs_lock, &dev->dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ }
}
- msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -916,6 +921,53 @@ void pci_free_msi_irqs(struct pci_dev *dev)
}
}
+#ifdef CONFIG_PCIE_TPH
+/**
+ * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
+ * @pdev: The PCIe device to update
+ * @index: The MSI-X index to update
+ * @tag: The tag to write
+ *
+ * Returns: 0 on success, error code on failure
+ */
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ struct msi_desc *msi_desc;
+ struct irq_desc *irq_desc;
+ unsigned int virq;
+
+ if (!pdev->msix_enabled)
+ return -ENXIO;
+
+ guard(msi_descs_lock)(&pdev->dev);
+ virq = msi_get_virq(&pdev->dev, index);
+ if (!virq)
+ return -ENXIO;
+ /*
+ * This is a horrible hack, but short of implementing a PCI
+ * specific interrupt chip callback and a huge pile of
+ * infrastructure, this is the minor nuissance. It provides the
+ * protection against concurrent operations on this entry and keeps
+ * the control word cache in sync.
+ */
+ irq_desc = irq_to_desc(virq);
+ if (!irq_desc)
+ return -ENXIO;
+
+ guard(raw_spinlock_irq)(&irq_desc->lock);
+ msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
+ if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
+ return -ENXIO;
+
+ msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
+ msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
+ pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
+ /* Flush the write */
+ readl(pci_msix_desc_addr(msi_desc));
+ return 0;
+}
+#endif
+
/* Misc. infrastructure */
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 01e51db8d285..2e9cf26a9ee9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -989,6 +989,15 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
const char *name);
void pcim_release_region(struct pci_dev *pdev, int bar);
+#ifdef CONFIG_PCI_MSI
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
+#else
+static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ return -ENODEV;
+}
+#endif
+
/*
* Config Address for PCI Configuration Mechanism #1
*
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 07de59ca2ebf..77fce5e1b830 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -204,48 +204,6 @@ static u8 get_rp_completer_type(struct pci_dev *pdev)
return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
}
-/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */
-static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag)
-{
-#ifdef CONFIG_PCI_MSI
- struct msi_desc *msi_desc = NULL;
- void __iomem *vec_ctrl;
- u32 val;
- int err = 0;
-
- msi_lock_descs(&pdev->dev);
-
- /* Find the msi_desc entry with matching msix_idx */
- msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) {
- if (msi_desc->msi_index == msix_idx)
- break;
- }
-
- if (!msi_desc) {
- err = -ENXIO;
- goto err_out;
- }
-
- /* Get the vector control register (offset 0xc) pointed by msix_idx */
- vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE;
- vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL;
-
- val = readl(vec_ctrl);
- val &= ~PCI_MSIX_ENTRY_CTRL_ST;
- val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
- writel(val, vec_ctrl);
-
- /* Read back to flush the update */
- val = readl(vec_ctrl);
-
-err_out:
- msi_unlock_descs(&pdev->dev);
- return err;
-#else
- return -ENODEV;
-#endif
-}
-
/* Write tag to ST table - Return 0 if OK, otherwise -errno */
static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
{
@@ -346,7 +304,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
switch (loc) {
case PCI_TPH_LOC_MSIX:
- err = write_tag_to_msix(pdev, index, tag);
+ err = pci_msix_write_tph_tag(pdev, index, tag);
break;
case PCI_TPH_LOC_CAP:
err = write_tag_to_st_table(pdev, index, tag);
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 06fd317529fc..df9a28ba69dc 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
+#include <linux/perf/arm_pmuv3.h>
#include <linux/platform_device.h>
#include <asm/apple_m1_pmu.h>
@@ -120,6 +121,8 @@ enum m1_pmu_events {
*/
M1_PMU_CFG_COUNT_USER = BIT(8),
M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+ M1_PMU_CFG_COUNT_HOST = BIT(10),
+ M1_PMU_CFG_COUNT_GUEST = BIT(11),
};
/*
@@ -172,6 +175,17 @@ static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_BRANCH_MISSES] = M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
};
+#define M1_PMUV3_EVENT_MAP(pmuv3_event, m1_event) \
+ [ARMV8_PMUV3_PERFCTR_##pmuv3_event] = M1_PMU_PERFCTR_##m1_event
+
+static const u16 m1_pmu_pmceid_map[ARMV8_PMUV3_MAX_COMMON_EVENTS] = {
+ [0 ... ARMV8_PMUV3_MAX_COMMON_EVENTS - 1] = HW_OP_UNSUPPORTED,
+ M1_PMUV3_EVENT_MAP(INST_RETIRED, INST_ALL),
+ M1_PMUV3_EVENT_MAP(CPU_CYCLES, CORE_ACTIVE_CYCLE),
+ M1_PMUV3_EVENT_MAP(BR_RETIRED, INST_BRANCH),
+ M1_PMUV3_EVENT_MAP(BR_MIS_PRED_RETIRED, BRANCH_MISPRED_NONSPEC),
+};
+
/* sysfs definitions */
static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
@@ -327,11 +341,10 @@ static void m1_pmu_disable_counter_interrupt(unsigned int index)
__m1_pmu_enable_counter_interrupt(index, false);
}
-static void m1_pmu_configure_counter(unsigned int index, u8 event,
- bool user, bool kernel)
+static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
+ bool kernel, bool host)
{
- u64 val, user_bit, kernel_bit;
- int shift;
+ u64 clear, set, user_bit, kernel_bit;
switch (index) {
case 0 ... 7:
@@ -346,19 +359,27 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
BUG();
}
- val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
-
+ clear = set = 0;
if (user)
- val |= user_bit;
+ set |= user_bit;
else
- val &= ~user_bit;
+ clear |= user_bit;
if (kernel)
- val |= kernel_bit;
+ set |= kernel_bit;
else
- val &= ~kernel_bit;
+ clear |= kernel_bit;
+
+ if (host)
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL1, clear, set);
+ else if (is_kernel_in_hyp_mode())
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL12, clear, set);
+}
- write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+static void __m1_pmu_configure_eventsel(unsigned int index, u8 event)
+{
+ u64 clear = 0, set = 0;
+ int shift;
/*
* Counters 0 and 1 have fixed events. For anything else,
@@ -371,21 +392,32 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
break;
case 2 ... 5:
shift = (index - 2) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR0_EL1, clear, set);
break;
case 6 ... 9:
shift = (index - 6) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR1_EL1, clear, set);
break;
}
}
+static void m1_pmu_configure_counter(unsigned int index, unsigned long config_base)
+{
+ bool kernel = config_base & M1_PMU_CFG_COUNT_KERNEL;
+ bool guest = config_base & M1_PMU_CFG_COUNT_GUEST;
+ bool host = config_base & M1_PMU_CFG_COUNT_HOST;
+ bool user = config_base & M1_PMU_CFG_COUNT_USER;
+ u8 evt = config_base & M1_PMU_CFG_EVENT;
+
+ __m1_pmu_configure_event_filter(index, user && host, kernel && host, true);
+ __m1_pmu_configure_event_filter(index, user && guest, kernel && guest, false);
+ __m1_pmu_configure_eventsel(index, evt);
+}
+
/* arm_pmu backend */
static void m1_pmu_enable_event(struct perf_event *event)
{
@@ -396,11 +428,7 @@ static void m1_pmu_enable_event(struct perf_event *event)
user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
- m1_pmu_disable_counter_interrupt(event->hw.idx);
- m1_pmu_disable_counter(event->hw.idx);
- isb();
-
- m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+ m1_pmu_configure_counter(event->hw.idx, event->hw.config_base);
m1_pmu_enable_counter(event->hw.idx);
m1_pmu_enable_counter_interrupt(event->hw.idx);
isb();
@@ -538,6 +566,26 @@ static int m2_pmu_map_event(struct perf_event *event)
return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
}
+static int m1_pmu_map_pmuv3_event(unsigned int eventsel)
+{
+ u16 m1_event = HW_OP_UNSUPPORTED;
+
+ if (eventsel < ARMV8_PMUV3_MAX_COMMON_EVENTS)
+ m1_event = m1_pmu_pmceid_map[eventsel];
+
+ return m1_event == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : m1_event;
+}
+
+static void m1_pmu_init_pmceid(struct arm_pmu *pmu)
+{
+ unsigned int event;
+
+ for (event = 0; event < ARMV8_PMUV3_MAX_COMMON_EVENTS; event++) {
+ if (m1_pmu_map_pmuv3_event(event) >= 0)
+ set_bit(event, pmu->pmceid_bitmap);
+ }
+}
+
static void m1_pmu_reset(void *info)
{
int i;
@@ -558,7 +606,7 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
{
unsigned long config_base = 0;
- if (!attr->exclude_guest) {
+ if (!attr->exclude_guest && !is_kernel_in_hyp_mode()) {
pr_debug("ARM performance counters do not support mode exclusion\n");
return -EOPNOTSUPP;
}
@@ -566,6 +614,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
config_base |= M1_PMU_CFG_COUNT_KERNEL;
if (!attr->exclude_user)
config_base |= M1_PMU_CFG_COUNT_USER;
+ if (!attr->exclude_host)
+ config_base |= M1_PMU_CFG_COUNT_HOST;
+ if (!attr->exclude_guest)
+ config_base |= M1_PMU_CFG_COUNT_GUEST;
event->config_base = config_base;
@@ -594,6 +646,9 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
cpu_pmu->reset = m1_pmu_reset;
cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
+ cpu_pmu->map_pmuv3_event = m1_pmu_map_pmuv3_event;
+ m1_pmu_init_pmceid(cpu_pmu);
+
bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index d5fcea3d4328..1a0d0e1a2263 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1273,9 +1273,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* No overflow interrupt? Have to use a timer instead. */
if (!ccn->irq) {
dev_info(ccn->dev, "No access to interrupts, using timer.\n");
- hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+ hrtimer_setup(&ccn->dt.hrtimer, arm_ccn_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
/* Pick one CPU which we will use to collect data from CCN... */
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index ef959e66db7c..d4fe30ff225b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -802,8 +802,6 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
-#define CMN_EVENT_CCLA_RNI(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
#define CMN_EVENT_HNS(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
@@ -1798,6 +1796,9 @@ static int arm_cmn_event_init(struct perf_event *event)
} else if (type == CMN_TYPE_XP &&
(cmn->part == PART_CMN700 || cmn->part == PART_CMN_S3)) {
hw->wide_sel = true;
+ } else if (type == CMN_TYPE_RND) {
+ /* Secretly permit this as an alias for "rnid" events */
+ type = CMN_TYPE_RNI;
}
/* This is sufficiently annoying to recalculate, so cache it */
diff --git a/drivers/perf/arm_cspmu/ampere_cspmu.c b/drivers/perf/arm_cspmu/ampere_cspmu.c
index f72f5689923c..b8ca69fd9d1d 100644
--- a/drivers/perf/arm_cspmu/ampere_cspmu.c
+++ b/drivers/perf/arm_cspmu/ampere_cspmu.c
@@ -10,10 +10,10 @@
#include "arm_cspmu.h"
-#define PMAUXR0 0xD80
-#define PMAUXR1 0xD84
-#define PMAUXR2 0xD88
-#define PMAUXR3 0xD8C
+#define PMAUXR0 PMIMPDEF
+#define PMAUXR1 (PMIMPDEF + 0x4)
+#define PMAUXR2 (PMIMPDEF + 0x8)
+#define PMAUXR3 (PMIMPDEF + 0xC)
#define to_ampere_cspmu_ctx(cspmu) ((struct ampere_cspmu_ctx *)(cspmu->impl.ctx))
@@ -132,32 +132,20 @@ ampere_cspmu_get_name(const struct arm_cspmu *cspmu)
return ctx->name;
}
-static u32 ampere_cspmu_event_filter(const struct perf_event *event)
+static void ampere_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
/*
- * PMEVFILTR or PMCCFILTR aren't used in Ampere SoC PMU but are marked
- * as RES0. Make sure, PMCCFILTR is written zero.
+ * PMCCFILTR is RES0, so this is just a dummy callback to override
+ * the default implementation and avoid writing to it.
*/
- return 0;
}
static void ampere_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+ const struct perf_event *event)
{
- struct perf_event *event;
- unsigned int idx;
u32 threshold, rank, bank;
- /*
- * At this point, all the events have the same filter settings.
- * Therefore, take the first event and use its configuration.
- */
- idx = find_first_bit(cspmu->hw_events.used_ctrs,
- cspmu->cycle_counter_logical_idx);
-
- event = cspmu->hw_events.events[idx];
-
threshold = get_threshold(event);
rank = get_rank(event);
bank = get_bank(event);
@@ -233,7 +221,7 @@ static int ampere_cspmu_init_ops(struct arm_cspmu *cspmu)
cspmu->impl.ctx = ctx;
- impl_ops->event_filter = ampere_cspmu_event_filter;
+ impl_ops->set_cc_filter = ampere_cspmu_set_cc_filter;
impl_ops->set_ev_filter = ampere_cspmu_set_ev_filter;
impl_ops->validate_event = ampere_cspmu_validate_event;
impl_ops->get_name = ampere_cspmu_get_name;
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 81e8b97e9353..efa9b229e701 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -40,51 +40,6 @@
ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
(unsigned long)_config)
-/*
- * CoreSight PMU Arch register offsets.
- */
-#define PMEVCNTR_LO 0x0
-#define PMEVCNTR_HI 0x4
-#define PMEVTYPER 0x400
-#define PMCCFILTR 0x47C
-#define PMEVFILTR 0xA00
-#define PMCNTENSET 0xC00
-#define PMCNTENCLR 0xC20
-#define PMINTENSET 0xC40
-#define PMINTENCLR 0xC60
-#define PMOVSCLR 0xC80
-#define PMOVSSET 0xCC0
-#define PMCFGR 0xE00
-#define PMCR 0xE04
-#define PMIIDR 0xE08
-
-/* PMCFGR register field */
-#define PMCFGR_NCG GENMASK(31, 28)
-#define PMCFGR_HDBG BIT(24)
-#define PMCFGR_TRO BIT(23)
-#define PMCFGR_SS BIT(22)
-#define PMCFGR_FZO BIT(21)
-#define PMCFGR_MSI BIT(20)
-#define PMCFGR_UEN BIT(19)
-#define PMCFGR_NA BIT(17)
-#define PMCFGR_EX BIT(16)
-#define PMCFGR_CCD BIT(15)
-#define PMCFGR_CC BIT(14)
-#define PMCFGR_SIZE GENMASK(13, 8)
-#define PMCFGR_N GENMASK(7, 0)
-
-/* PMCR register field */
-#define PMCR_TRO BIT(11)
-#define PMCR_HDBG BIT(10)
-#define PMCR_FZO BIT(9)
-#define PMCR_NA BIT(8)
-#define PMCR_DP BIT(5)
-#define PMCR_X BIT(4)
-#define PMCR_D BIT(3)
-#define PMCR_C BIT(2)
-#define PMCR_P BIT(1)
-#define PMCR_E BIT(0)
-
/* Each SET/CLR register supports up to 32 counters. */
#define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
#define ARM_CSPMU_SET_CLR_COUNTER_NUM \
@@ -111,7 +66,9 @@ static unsigned long arm_cspmu_cpuhp_state;
static DEFINE_MUTEX(arm_cspmu_lock);
static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc, u32 filter);
+ const struct perf_event *event);
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{
@@ -226,6 +183,7 @@ arm_cspmu_event_attr_is_visible(struct kobject *kobj,
static struct attribute *arm_cspmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_FILTER_ATTR,
+ ARM_CSPMU_FORMAT_FILTER2_ATTR,
NULL,
};
@@ -250,11 +208,6 @@ static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
}
-static u32 arm_cspmu_event_filter(const struct perf_event *event)
-{
- return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
-}
-
static ssize_t arm_cspmu_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -416,7 +369,7 @@ static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
DEFAULT_IMPL_OP(get_name),
DEFAULT_IMPL_OP(is_cycle_counter_event),
DEFAULT_IMPL_OP(event_type),
- DEFAULT_IMPL_OP(event_filter),
+ DEFAULT_IMPL_OP(set_cc_filter),
DEFAULT_IMPL_OP(set_ev_filter),
DEFAULT_IMPL_OP(event_attr_is_visible),
};
@@ -812,26 +765,28 @@ static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
}
static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+ const struct perf_event *event)
{
- u32 offset = PMEVFILTR + (4 * hwc->idx);
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
+ u32 filter2 = event->attr.config2 & ARM_CSPMU_FILTER_MASK;
+ u32 offset = 4 * event->hw.idx;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMEVFILTR + offset);
+ writel(filter2, cspmu->base0 + PMEVFILT2R + offset);
}
-static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
- u32 offset = PMCCFILTR;
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMCCFILTR);
}
static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- u32 filter;
/* We always reprogram the counter */
if (pmu_flags & PERF_EF_RELOAD)
@@ -839,13 +794,11 @@ static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
arm_cspmu_set_event_period(event);
- filter = cspmu->impl.ops.event_filter(event);
-
if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
- arm_cspmu_set_cc_filter(cspmu, filter);
+ cspmu->impl.ops.set_cc_filter(cspmu, event);
} else {
arm_cspmu_set_event(cspmu, hwc);
- cspmu->impl.ops.set_ev_filter(cspmu, hwc, filter);
+ cspmu->impl.ops.set_ev_filter(cspmu, event);
}
hwc->state = 0;
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index 2621f3111148..19684b76bd96 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -47,6 +47,8 @@
/* Default filter format */
#define ARM_CSPMU_FORMAT_FILTER_ATTR \
ARM_CSPMU_FORMAT_ATTR(filter, "config1:0-31")
+#define ARM_CSPMU_FORMAT_FILTER2_ATTR \
+ ARM_CSPMU_FORMAT_ATTR(filter2, "config2:0-31")
/*
* This is the default event number for cycle count, if supported, since the
@@ -65,6 +67,53 @@
/* The cycle counter, if implemented, is located at counter[31]. */
#define ARM_CSPMU_CYCLE_CNTR_IDX 31
+/*
+ * CoreSight PMU Arch register offsets.
+ */
+#define PMEVCNTR_LO 0x0
+#define PMEVCNTR_HI 0x4
+#define PMEVTYPER 0x400
+#define PMCCFILTR 0x47C
+#define PMEVFILT2R 0x800
+#define PMEVFILTR 0xA00
+#define PMCNTENSET 0xC00
+#define PMCNTENCLR 0xC20
+#define PMINTENSET 0xC40
+#define PMINTENCLR 0xC60
+#define PMOVSCLR 0xC80
+#define PMOVSSET 0xCC0
+#define PMIMPDEF 0xD80
+#define PMCFGR 0xE00
+#define PMCR 0xE04
+#define PMIIDR 0xE08
+
+/* PMCFGR register field */
+#define PMCFGR_NCG GENMASK(31, 28)
+#define PMCFGR_HDBG BIT(24)
+#define PMCFGR_TRO BIT(23)
+#define PMCFGR_SS BIT(22)
+#define PMCFGR_FZO BIT(21)
+#define PMCFGR_MSI BIT(20)
+#define PMCFGR_UEN BIT(19)
+#define PMCFGR_NA BIT(17)
+#define PMCFGR_EX BIT(16)
+#define PMCFGR_CCD BIT(15)
+#define PMCFGR_CC BIT(14)
+#define PMCFGR_SIZE GENMASK(13, 8)
+#define PMCFGR_N GENMASK(7, 0)
+
+/* PMCR register field */
+#define PMCR_TRO BIT(11)
+#define PMCR_HDBG BIT(10)
+#define PMCR_FZO BIT(9)
+#define PMCR_NA BIT(8)
+#define PMCR_DP BIT(5)
+#define PMCR_X BIT(4)
+#define PMCR_D BIT(3)
+#define PMCR_C BIT(2)
+#define PMCR_P BIT(1)
+#define PMCR_E BIT(0)
+
/* PMIIDR register field */
#define ARM_CSPMU_PMIIDR_IMPLEMENTER GENMASK(11, 0)
#define ARM_CSPMU_PMIIDR_PRODUCTID GENMASK(31, 20)
@@ -103,11 +152,11 @@ struct arm_cspmu_impl_ops {
bool (*is_cycle_counter_event)(const struct perf_event *event);
/* Decode event type/id from configs */
u32 (*event_type)(const struct perf_event *event);
- /* Decode filter value from configs */
- u32 (*event_filter)(const struct perf_event *event);
- /* Set event filter */
+ /* Set event filters */
+ void (*set_cc_filter)(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
void (*set_ev_filter)(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc, u32 filter);
+ const struct perf_event *event);
/* Implementation specific event validation */
int (*validate_event)(struct arm_cspmu *cspmu,
struct perf_event *event);
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index 8116c7846a46..dc6d4e3e2a1b 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -6,6 +6,7 @@
/* Support for NVIDIA specific attributes. */
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/topology.h>
@@ -183,6 +184,24 @@ static u32 nv_cspmu_event_filter(const struct perf_event *event)
return filter_val;
}
+static void nv_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+ u32 offset = PMEVFILTR + (4 * event->hw.idx);
+
+ writel(filter, cspmu->base0 + offset);
+}
+
+static void nv_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+
+ writel(filter, cspmu->base0 + PMCCFILTR);
+}
+
+
enum nv_cspmu_name_fmt {
NAME_FMT_GENERIC,
NAME_FMT_SOCKET
@@ -322,7 +341,8 @@ static int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
cspmu->impl.ctx = ctx;
/* NVIDIA specific callbacks. */
- impl_ops->event_filter = nv_cspmu_event_filter;
+ impl_ops->set_cc_filter = nv_cspmu_set_cc_filter;
+ impl_ops->set_ev_filter = nv_cspmu_set_ev_filter;
impl_ops->get_event_attrs = nv_cspmu_get_event_attrs;
impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
impl_ops->get_name = nv_cspmu_get_name;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 398cce3d76fc..2f33e69a8caf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -342,12 +342,10 @@ armpmu_add(struct perf_event *event, int flags)
if (idx < 0)
return idx;
- /*
- * If there is an event in the counter we are going to use then make
- * sure it is disabled.
- */
+ /* The newly-allocated counter should be empty */
+ WARN_ON_ONCE(hw_events->events[idx]);
+
event->hw.idx = idx;
- armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 0e360feb3432..e506d59654e7 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -795,11 +795,6 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
static void armv8pmu_enable_event(struct perf_event *event)
{
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- armv8pmu_disable_event_counter(event);
armv8pmu_write_event_type(event);
armv8pmu_enable_event_irq(event);
armv8pmu_enable_event_counter(event);
@@ -825,10 +820,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
else
armv8pmu_disable_user_access();
+ kvm_vcpu_pmu_resync_el0();
+
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
-
- kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
@@ -1369,6 +1364,7 @@ PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
PMUV3_INIT_SIMPLE(armv8_neoverse_v2)
PMUV3_INIT_SIMPLE(armv8_neoverse_v3)
PMUV3_INIT_SIMPLE(armv8_neoverse_v3ae)
+PMUV3_INIT_SIMPLE(armv8_rainier)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
@@ -1416,6 +1412,7 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,neoverse-v2-pmu", .data = armv8_neoverse_v2_pmu_init},
{.compatible = "arm,neoverse-v3-pmu", .data = armv8_neoverse_v3_pmu_init},
{.compatible = "arm,neoverse-v3ae-pmu", .data = armv8_neoverse_v3ae_pmu_init},
+ {.compatible = "arm,rainier-pmu", .data = armv8_rainier_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c
index 420cadd108e7..17831e1920bd 100644
--- a/drivers/perf/arm_v7_pmu.c
+++ b/drivers/perf/arm_v7_pmu.c
@@ -858,16 +858,6 @@ static void armv7pmu_enable_event(struct perf_event *event)
}
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
@@ -875,14 +865,7 @@ static void armv7pmu_enable_event(struct perf_event *event)
if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /*
- * Enable interrupt for this counter
- */
armv7_pmnc_enable_intens(idx);
-
- /*
- * Enable counter
- */
armv7_pmnc_enable_counter(idx);
}
@@ -898,18 +881,7 @@ static void armv7pmu_disable_event(struct perf_event *event)
return;
}
- /*
- * Disable counter and interrupt
- */
-
- /*
- * Disable counter
- */
armv7_pmnc_disable_counter(idx);
-
- /*
- * Disable interrupt for this counter
- */
armv7_pmnc_disable_intens(idx);
}
@@ -1477,14 +1449,6 @@ static void krait_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We set the event for the cycle counter because we
* have the ability to perform event filtering.
@@ -1494,10 +1458,7 @@ static void krait_pmu_enable_event(struct perf_event *event)
else
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
armv7_pmnc_enable_counter(idx);
}
@@ -1798,14 +1759,6 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We don't set the event for the cycle counter because we
* don't have the ability to perform event filtering.
@@ -1815,10 +1768,7 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
else if (idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
armv7_pmnc_enable_counter(idx);
}
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index cccecae9823f..f851e070760c 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -565,15 +565,15 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev)
u32 sbdf;
sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn);
- plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf,
- pdev, sizeof(*pdev));
-
+ plat_dev = platform_device_register_simple("dwc_pcie_pmu", sbdf, NULL, 0);
if (IS_ERR(plat_dev))
return PTR_ERR(plat_dev);
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
- if (!dev_info)
+ if (!dev_info) {
+ platform_device_unregister(plat_dev);
return -ENOMEM;
+ }
/* Cache platform device to handle pci device hotplug */
dev_info->plat_dev = plat_dev;
@@ -614,18 +614,26 @@ static struct notifier_block dwc_pcie_pmu_nb = {
static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
{
- struct pci_dev *pdev = plat_dev->dev.platform_data;
+ struct pci_dev *pdev;
struct dwc_pcie_pmu *pcie_pmu;
char *name;
u32 sbdf;
u16 vsec;
int ret;
+ sbdf = plat_dev->id;
+ pdev = pci_get_domain_bus_and_slot(sbdf >> 16, PCI_BUS_NUM(sbdf & 0xffff),
+ sbdf & 0xff);
+ if (!pdev) {
+ pr_err("No pdev found for the sbdf 0x%x\n", sbdf);
+ return -ENODEV;
+ }
+
vsec = dwc_pcie_des_cap(pdev);
if (!vsec)
return -ENODEV;
- sbdf = plat_dev->id;
+ pci_dev_put(pdev);
name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
if (!name)
return -ENOMEM;
@@ -640,7 +648,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
pcie_pmu->on_cpu = -1;
pcie_pmu->pmu = (struct pmu){
.name = name,
- .parent = &pdev->dev,
+ .parent = &plat_dev->dev,
.module = THIS_MODULE,
.attr_groups = dwc_pcie_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
@@ -730,6 +738,15 @@ static struct platform_driver dwc_pcie_pmu_driver = {
.driver = {.name = "dwc_pcie_pmu",},
};
+static void dwc_pcie_cleanup_devices(void)
+{
+ struct dwc_pcie_dev_info *dev_info, *tmp;
+
+ list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) {
+ dwc_pcie_unregister_dev(dev_info);
+ }
+}
+
static int __init dwc_pcie_pmu_init(void)
{
struct pci_dev *pdev = NULL;
@@ -742,7 +759,7 @@ static int __init dwc_pcie_pmu_init(void)
ret = dwc_pcie_register_dev(pdev);
if (ret) {
pci_dev_put(pdev);
- return ret;
+ goto err_cleanup;
}
}
@@ -751,35 +768,35 @@ static int __init dwc_pcie_pmu_init(void)
dwc_pcie_pmu_online_cpu,
dwc_pcie_pmu_offline_cpu);
if (ret < 0)
- return ret;
+ goto err_cleanup;
dwc_pcie_pmu_hp_state = ret;
ret = platform_driver_register(&dwc_pcie_pmu_driver);
if (ret)
- goto platform_driver_register_err;
+ goto err_remove_cpuhp;
ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
if (ret)
- goto platform_driver_register_err;
+ goto err_unregister_driver;
notify = true;
return 0;
-platform_driver_register_err:
+err_unregister_driver:
+ platform_driver_unregister(&dwc_pcie_pmu_driver);
+err_remove_cpuhp:
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
-
+err_cleanup:
+ dwc_pcie_cleanup_devices();
return ret;
}
static void __exit dwc_pcie_pmu_exit(void)
{
- struct dwc_pcie_dev_info *dev_info, *tmp;
-
if (notify)
bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
- list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
- dwc_pcie_unregister_dev(dev_info);
+ dwc_pcie_cleanup_devices();
platform_driver_unregister(&dwc_pcie_pmu_driver);
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
}
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 039feded9152..72ac17efd846 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -1064,8 +1064,8 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+ hrtimer_setup(&ddr_pmu->hrtimer, cn10k_ddr_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
cpuhp_state_add_instance_nocalls(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index cadd60221b8f..6ed4707bd6bb 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -752,9 +752,8 @@ static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
tx2_pmu->cpu = cpu;
if (tx2_pmu->hrtimer_callback) {
- hrtimer_init(&tx2_pmu->hrtimer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
+ hrtimer_setup(&tx2_pmu->hrtimer, tx2_pmu->hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = tx2_uncore_pmu_register(tx2_pmu);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index 73dbf29c002f..cf6efa9c0364 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -974,7 +974,7 @@ static const struct regmap_config bcm281xx_pinctrl_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = BCM281XX_PIN_VC_CAM3_SDA,
+ .max_register = BCM281XX_PIN_VC_CAM3_SDA * 4,
};
static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index 471f644c5eef..d09a5e9b2eca 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -2374,6 +2374,9 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
pctrl->gpio_bank[id].gc.parent = dev;
pctrl->gpio_bank[id].gc.fwnode = child;
pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
+ if (pctrl->gpio_bank[id].gc.label == NULL)
+ return -ENOMEM;
+
pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input;
pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input;
diff --git a/drivers/pinctrl/spacemit/Kconfig b/drivers/pinctrl/spacemit/Kconfig
index 168f8a5ffbb9..d6f6017fd097 100644
--- a/drivers/pinctrl/spacemit/Kconfig
+++ b/drivers/pinctrl/spacemit/Kconfig
@@ -4,9 +4,10 @@
#
config PINCTRL_SPACEMIT_K1
- tristate "SpacemiT K1 SoC Pinctrl driver"
+ bool "SpacemiT K1 SoC Pinctrl driver"
depends on ARCH_SPACEMIT || COMPILE_TEST
depends on OF
+ default ARCH_SPACEMIT
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c
index a32579d73613..59fd555ff38d 100644
--- a/drivers/pinctrl/spacemit/pinctrl-k1.c
+++ b/drivers/pinctrl/spacemit/pinctrl-k1.c
@@ -1044,7 +1044,7 @@ static struct platform_driver k1_pinctrl_driver = {
.of_match_table = k1_pinctrl_ids,
},
};
-module_platform_driver(k1_pinctrl_driver);
+builtin_platform_driver(k1_pinctrl_driver);
MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>");
MODULE_DESCRIPTION("Pinctrl driver for the SpacemiT K1 SoC");
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index d4f32ad66530..a594d5fcfcfd 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -371,7 +371,7 @@ static const struct software_node *ssam_node_group_sp8[] = {
NULL,
};
-/* Devices for Surface Pro 9 (Intel/x86) and 10 */
+/* Devices for Surface Pro 9, 10 and 11 (Intel/x86) */
static const struct software_node *ssam_node_group_sp9[] = {
&ssam_node_root,
&ssam_node_hub_kip,
@@ -430,6 +430,9 @@ static const struct acpi_device_id ssam_platform_hub_acpi_match[] = {
/* Surface Pro 10 */
{ "MSHW0510", (unsigned long)ssam_node_group_sp9 },
+ /* Surface Pro 11 */
+ { "MSHW0583", (unsigned long)ssam_node_group_sp9 },
+
/* Surface Book 2 */
{ "MSHW0107", (unsigned long)ssam_node_group_gen5 },
diff --git a/drivers/platform/x86/amd/hsmp/Kconfig b/drivers/platform/x86/amd/hsmp/Kconfig
index 7d10d4462a45..d6f7a62d55b5 100644
--- a/drivers/platform/x86/amd/hsmp/Kconfig
+++ b/drivers/platform/x86/amd/hsmp/Kconfig
@@ -7,7 +7,7 @@ config AMD_HSMP
tristate
menu "AMD HSMP Driver"
- depends on AMD_NB || COMPILE_TEST
+ depends on AMD_NODE || COMPILE_TEST
config AMD_HSMP_ACPI
tristate "AMD HSMP ACPI device driver"
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index 444b43be35a2..c1eccb3c80c5 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/device.h>
@@ -24,6 +23,8 @@
#include <uapi/asm-generic/errno-base.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -321,8 +322,8 @@ static int hsmp_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
if (!hsmp_pdev->is_probed) {
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return -ENODEV;
hsmp_pdev->sock = devm_kcalloc(&pdev->dev, hsmp_pdev->num_sockets,
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 03164e30b3a5..a3ac09a90de4 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -8,7 +8,6 @@
*/
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/delay.h>
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h
index e852f0a947e4..af8b21f821d6 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.h
+++ b/drivers/platform/x86/amd/hsmp/hsmp.h
@@ -21,8 +21,6 @@
#define HSMP_ATTR_GRP_NAME_SIZE 10
-#define MAX_AMD_SOCKETS 8
-
#define HSMP_CDEV_NAME "hsmp_cdev"
#define HSMP_DEVNODE_NAME "hsmp"
@@ -41,7 +39,6 @@ struct hsmp_socket {
void __iomem *virt_base_addr;
struct semaphore hsmp_sem;
char name[HSMP_ATTR_GRP_NAME_SIZE];
- struct pci_dev *root;
struct device *dev;
u16 sock_ind;
int (*amd_hsmp_rdwr)(struct hsmp_socket *sock, u32 off, u32 *val, bool rw);
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index 02ca85762b68..b9782a078dbd 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -10,14 +10,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
+#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -34,28 +36,12 @@
#define SMN_HSMP_MSG_RESP 0x0010980
#define SMN_HSMP_MSG_DATA 0x00109E0
-#define HSMP_INDEX_REG 0xc4
-#define HSMP_DATA_REG 0xc8
-
static struct hsmp_plat_device *hsmp_pdev;
static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
u32 *value, bool write)
{
- int ret;
-
- if (!sock->root)
- return -ENODEV;
-
- ret = pci_write_config_dword(sock->root, HSMP_INDEX_REG,
- sock->mbinfo.base_addr + offset);
- if (ret)
- return ret;
-
- ret = (write ? pci_write_config_dword(sock->root, HSMP_DATA_REG, *value)
- : pci_read_config_dword(sock->root, HSMP_DATA_REG, value));
-
- return ret;
+ return amd_smn_hsmp_rdwr(sock->sock_ind, sock->mbinfo.base_addr + offset, value, write);
}
static ssize_t hsmp_metric_tbl_plat_read(struct file *filp, struct kobject *kobj,
@@ -95,7 +81,12 @@ static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj,
* Static array of 8 + 1(for NULL) elements is created below
* to create sysfs groups for sockets.
* is_bin_visible function is used to show / hide the necessary groups.
+ *
+ * Validate the maximum number against MAX_AMD_NUM_NODES. If this changes,
+ * then the attributes and groups below must be adjusted.
*/
+static_assert(MAX_AMD_NUM_NODES == 8);
+
#define HSMP_BIN_ATTR(index, _list) \
static const struct bin_attribute attr##index = { \
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, \
@@ -159,10 +150,7 @@ static int init_platform_device(struct device *dev)
int ret, i;
for (i = 0; i < hsmp_pdev->num_sockets; i++) {
- if (!node_to_amd_nb(i))
- return -ENODEV;
sock = &hsmp_pdev->sock[i];
- sock->root = node_to_amd_nb(i)->root;
sock->sock_ind = i;
sock->dev = dev;
sock->mbinfo.base_addr = SMN_HSMP_BASE;
@@ -305,11 +293,11 @@ static int __init hsmp_plt_init(void)
return -ENOMEM;
/*
- * amd_nb_num() returns number of SMN/DF interfaces present in the system
+ * amd_num_nodes() returns number of SMN/DF interfaces present in the system
* if we have N SMN/DF interfaces that ideally means N sockets
*/
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return ret;
ret = platform_driver_register(&amd_hsmp_driver);
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index f34f3130c330..1d90f9382024 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -219,12 +219,14 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
switch (dev->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
val = TA_BEST_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
val = TA_BETTER_PERFORMANCE;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
val = TA_BEST_BATTERY;
break;
default:
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index ceaff1ebb7b9..a1e43873a07b 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -510,18 +510,18 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
ret = amd_pmf_set_dram_addr(dev, true);
if (ret)
- goto error;
+ goto err_cancel_work;
dev->policy_base = devm_ioremap_resource(dev->dev, dev->res);
if (IS_ERR(dev->policy_base)) {
ret = PTR_ERR(dev->policy_base);
- goto error;
+ goto err_free_dram_buf;
}
dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
if (!dev->policy_buf) {
ret = -ENOMEM;
- goto error;
+ goto err_free_dram_buf;
}
memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
@@ -531,13 +531,13 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
if (!dev->prev_data) {
ret = -ENOMEM;
- goto error;
+ goto err_free_policy;
}
for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
if (ret)
- return ret;
+ goto err_free_prev_data;
ret = amd_pmf_start_policy_engine(dev);
switch (ret) {
@@ -550,27 +550,41 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
status = false;
break;
default:
- goto error;
+ ret = -EINVAL;
+ amd_pmf_tee_deinit(dev);
+ goto err_free_prev_data;
}
if (status)
break;
}
- if (!status && !pb_side_load)
- goto error;
+ if (!status && !pb_side_load) {
+ ret = -EINVAL;
+ goto err_free_prev_data;
+ }
if (pb_side_load)
amd_pmf_open_pb(dev, dev->dbgfs_dir);
ret = amd_pmf_register_input_device(dev);
if (ret)
- goto error;
+ goto err_pmf_remove_pb;
return 0;
-error:
- amd_pmf_deinit_smart_pc(dev);
+err_pmf_remove_pb:
+ if (pb_side_load && dev->esbin)
+ amd_pmf_remove_pb(dev);
+ amd_pmf_tee_deinit(dev);
+err_free_prev_data:
+ kfree(dev->prev_data);
+err_free_policy:
+ kfree(dev->policy_buf);
+err_free_dram_buf:
+ kfree(dev->buf);
+err_cancel_work:
+ cancel_delayed_work_sync(&dev->pb_work);
return ret;
}
diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
index 42ce41a2fe3a..ff76ea36835e 100644
--- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
@@ -221,7 +221,7 @@ static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = {
SEC_PD(T7_VI_CLK2, 0),
/* ETH is for ethernet online wakeup, and should be always on */
SEC_PD(T7_ETH, GENPD_FLAG_ALWAYS_ON),
- SEC_PD(T7_ISP, 0),
+ TOP_PD(T7_ISP, 0, PWRC_T7_MIPI_ISP_ID),
SEC_PD(T7_MIPI_ISP, 0),
TOP_PD(T7_GDC, 0, PWRC_T7_NIC3_ID),
TOP_PD(T7_DEWARP, 0, PWRC_T7_NIC3_ID),
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 1a6fc8d38e20..90c664d344d0 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -162,11 +162,11 @@ static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
data->wde_interval = 300L * NSEC_PER_MSEC;
data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC);
- hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_trigger.function = ltc2952_poweroff_timer_trigger;
+ hrtimer_setup(&data->timer_trigger, ltc2952_poweroff_timer_trigger, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&data->timer_wde, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_wde.function = ltc2952_poweroff_timer_wde;
+ hrtimer_setup(&data->timer_wde, ltc2952_poweroff_timer_wde, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static int ltc2952_poweroff_init(struct platform_device *pdev)
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 7a8d1feb8e90..dc6c8b0dd1cf 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1787,13 +1787,12 @@ static int ab8500_chargalg_probe(struct platform_device *pdev)
psy_cfg.drv_data = di;
/* Initilialize safety timer */
- hrtimer_init(&di->safety_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
+ hrtimer_setup(&di->safety_timer, ab8500_chargalg_safety_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initilialize maintenance timer */
- hrtimer_init(&di->maintenance_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- di->maintenance_timer.function =
- ab8500_chargalg_maintenance_timer_expired;
+ hrtimer_setup(&di->maintenance_timer, ab8500_chargalg_maintenance_timer_expired,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* Init work for chargalg */
INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 04c212953ded..5ad7cc438068 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -339,8 +339,7 @@ struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
return NULL;
cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
- hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ii_dev->timer.function = idle_inject_timer_fn;
+ hrtimer_setup(&ii_dev->timer, idle_inject_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ii_dev->latency_us = UINT_MAX;
ii_dev->update = update;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 77d75e1f14a9..5ab3feb29686 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1274,7 +1274,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &rapl_defaults_cht),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &rapl_defaults_tng),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &rapl_defaults_ann),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2,&rapl_defaults_ann),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &rapl_defaults_core),
@@ -2064,8 +2064,7 @@ int rapl_package_add_pmu(struct rapl_package *rp)
raw_spin_lock_init(&data->lock);
INIT_LIST_HEAD(&data->active_list);
data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms);
- hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->hrtimer.function = rapl_hrtimer_handle;
+ hrtimer_setup(&data->hrtimer, rapl_hrtimer_handle, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return rapl_pmu_update(rp);
}
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index d46eed159495..f5eeb4dd01ad 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -208,8 +208,7 @@ static void parport_attach(struct parport *port)
calibrate_port(&device);
- hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- device.timer.function = hrtimer_event;
+ hrtimer_setup(&device.timer, hrtimer_event, CLOCK_REALTIME, HRTIMER_MODE_ABS);
hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
return;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 4ddf0efead68..00a7f3617cd8 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1830,12 +1830,49 @@ static const struct file_operations constraint_flags_fops = {
#define REG_STR_SIZE 64
+static void link_and_create_debugfs(struct regulator *regulator, struct regulator_dev *rdev,
+ struct device *dev)
+{
+ int err = 0;
+
+ if (dev) {
+ regulator->dev = dev;
+
+ /* Add a link to the device sysfs entry */
+ err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
+ regulator->supply_name);
+ if (err) {
+ rdev_dbg(rdev, "could not add device link %s: %pe\n",
+ dev->kobj.name, ERR_PTR(err));
+ /* non-fatal */
+ }
+ }
+
+ if (err != -EEXIST) {
+ regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs);
+ if (IS_ERR(regulator->debugfs)) {
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ }
+ }
+
+ if (regulator->debugfs) {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
+ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+ regulator, &constraint_flags_fops);
+ }
+}
+
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name)
{
struct regulator *regulator;
- int err = 0;
lockdep_assert_held_once(&rdev->mutex.base);
@@ -1868,38 +1905,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
list_add(&regulator->list, &rdev->consumer_list);
- if (dev) {
- regulator->dev = dev;
-
- /* Add a link to the device sysfs entry */
- err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
- supply_name);
- if (err) {
- rdev_dbg(rdev, "could not add device link %s: %pe\n",
- dev->kobj.name, ERR_PTR(err));
- /* non-fatal */
- }
- }
-
- if (err != -EEXIST) {
- regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
- if (IS_ERR(regulator->debugfs)) {
- rdev_dbg(rdev, "Failed to create debugfs directory\n");
- regulator->debugfs = NULL;
- }
- }
-
- if (regulator->debugfs) {
- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
- &regulator->uA_load);
- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].min_uV);
- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].max_uV);
- debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
- regulator, &constraint_flags_fops);
- }
-
/*
* Check now if the regulator is an always on regulator - if
* it is then we don't need to do nearly so much work for
@@ -2069,6 +2074,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (have_full_constraints()) {
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
@@ -2086,6 +2095,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
goto out;
}
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
}
@@ -2133,6 +2146,9 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
regulator_unlock_two(rdev, r, &ww_ctx);
+ /* rdev->supply was created in set_supply() */
+ link_and_create_debugfs(rdev->supply, r, &rdev->dev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -2211,8 +2227,10 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
* enabled, even if it isn't hooked up, and just
* provide a dummy.
*/
- dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
rdev = dummy_regulator_rdev;
+ if (!rdev)
+ return ERR_PTR(-EPROBE_DEFER);
+ dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
get_device(&rdev->dev);
break;
@@ -2271,6 +2289,8 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
return regulator;
}
+ link_and_create_debugfs(regulator, rdev, dev);
+
rdev->open_count++;
if (get_type == EXCLUSIVE_GET) {
rdev->exclusive = 1;
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 5b9b9e4e762d..9f59889129ab 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -60,7 +60,7 @@ static struct platform_driver dummy_regulator_driver = {
.probe = dummy_regulator_probe,
.driver = {
.name = "reg-dummy",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index 5925fa7a9a06..9cde7181b0f0 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -27,6 +27,11 @@
#define RTQ2208_REG_LDO1_CFG 0xB1
#define RTQ2208_REG_LDO2_CFG 0xC1
#define RTQ2208_REG_LDO_DVS_CTRL 0xD0
+#define RTQ2208_REG_HIDDEN_BUCKPH 0x55
+#define RTQ2208_REG_HIDDEN_LDOCFG0 0x8F
+#define RTQ2208_REG_HIDDEN_LDOCFG1 0x96
+#define RTQ2208_REG_HIDDEN0 0xFE
+#define RTQ2208_REG_HIDDEN1 0xFF
/* Mask */
#define RTQ2208_BUCK_NR_MTP_SEL_MASK GENMASK(7, 0)
@@ -45,6 +50,11 @@
#define RTQ2208_LDO1_VOSEL_SD_MASK BIT(5)
#define RTQ2208_LDO2_DISCHG_EN_MASK BIT(6)
#define RTQ2208_LDO2_VOSEL_SD_MASK BIT(7)
+#define RTQ2208_MASK_BUCKPH_GROUP1 GENMASK(6, 4)
+#define RTQ2208_MASK_BUCKPH_GROUP2 GENMASK(2, 0)
+#define RTQ2208_MASK_LDO2_OPT0 BIT(7)
+#define RTQ2208_MASK_LDO2_OPT1 BIT(6)
+#define RTQ2208_MASK_LDO1_FIXED BIT(6)
/* Size */
#define RTQ2208_VOUT_MAXNUM 256
@@ -245,11 +255,6 @@ static const unsigned int rtq2208_ldo_volt_table[] = {
3300000,
};
-static struct of_regulator_match rtq2208_ldo_match[] = {
- {.name = "ldo2", },
- {.name = "ldo1", },
-};
-
static unsigned int rtq2208_of_map_mode(unsigned int mode)
{
switch (mode) {
@@ -344,59 +349,6 @@ static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
return IRQ_HANDLED;
}
-static int rtq2208_of_get_ldo_dvs_ability(struct device *dev)
-{
- struct device_node *np;
- struct of_regulator_match *match;
- struct regulator_desc *desc;
- struct regulator_init_data *init_data;
- u32 fixed_uV;
- int ret, i;
-
- if (!dev->of_node)
- return -ENODEV;
-
- np = of_get_child_by_name(dev->of_node, "regulators");
- if (!np)
- np = dev->of_node;
-
- ret = of_regulator_match(dev, np, rtq2208_ldo_match, ARRAY_SIZE(rtq2208_ldo_match));
-
- of_node_put(np);
-
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) {
- match = rtq2208_ldo_match + i;
- init_data = match->init_data;
- desc = (struct regulator_desc *)match->desc;
-
- if (!init_data || !desc)
- continue;
-
- /* specify working fixed voltage if the propery exists */
- ret = of_property_read_u32(match->of_node, "richtek,fixed-microvolt", &fixed_uV);
-
- if (!ret) {
- if (fixed_uV != init_data->constraints.min_uV ||
- fixed_uV != init_data->constraints.max_uV)
- return -EINVAL;
- desc->n_voltages = 1;
- desc->fixed_uV = fixed_uV;
- desc->fixed_uV = init_data->constraints.min_uV;
- desc->ops = &rtq2208_regulator_ldo_fix_ops;
- } else {
- desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
- desc->volt_table = rtq2208_ldo_volt_table;
- desc->ops = &rtq2208_regulator_ldo_adj_ops;
- }
- }
-
- return 0;
-}
-
-
#define BUCK_INFO(_name, _id) \
{ \
.name = _name, \
@@ -424,9 +376,11 @@ static const struct linear_range rtq2208_vout_range[] = {
REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
};
-static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx)
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
struct regulator_desc *desc;
+ unsigned int fixed_uV;
static const struct {
char *name;
int base;
@@ -462,7 +416,8 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->mode_mask = RTQ2208_BUCK_NRMODE_MASK;
- if (idx >= RTQ2208_BUCK_B && idx <= RTQ2208_BUCK_E) {
+ switch (idx) {
+ case RTQ2208_BUCK_B ... RTQ2208_BUCK_E:
/* init buck desc */
desc->ops = &rtq2208_regulator_buck_ops;
desc->vsel_reg = curr_info->base + VSEL_SHIFT(mtp_sel);
@@ -480,7 +435,19 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = BUCK_RG_SHIFT(curr_info->base, 4);
rdesc->suspend_enable_mask = RTQ2208_BUCK_EN_STR_MASK;
rdesc->suspend_mode_mask = RTQ2208_BUCK_STRMODE_MASK;
- } else {
+ break;
+ default:
+ fixed_uV = idx == RTQ2208_LDO2 ? ldo2_fixed : ldo1_fixed;
+ if (fixed_uV) {
+ desc->n_voltages = 1;
+ desc->fixed_uV = fixed_uV;
+ desc->ops = &rtq2208_regulator_ldo_fix_ops;
+ } else {
+ desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
+ desc->volt_table = rtq2208_ldo_volt_table;
+ desc->ops = &rtq2208_regulator_ldo_adj_ops;
+ }
+
/* init ldo desc */
desc->active_discharge_reg = RTQ2208_REG_LDO_DVS_CTRL;
desc->active_discharge_on = curr_info->dis_on;
@@ -490,13 +457,15 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = curr_info->base;
rdesc->suspend_enable_mask = RTQ2208_LDO_EN_STR_MASK;
+ break;
}
}
static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
- struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
+ struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
- int mtp_sel, i, idx, ret;
+ int mtp_sel, i, idx;
/* get mtp_sel0 or mtp_sel1 */
mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
@@ -508,43 +477,101 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
if (!rdesc[i])
return -ENOMEM;
- rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx);
-
- /* init ldo dvs ability */
- if (idx >= RTQ2208_LDO2)
- rtq2208_ldo_match[idx - RTQ2208_LDO2].desc = &rdesc[i]->desc;
+ rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, ldo1_fixed, ldo2_fixed);
}
- /* init ldo fixed_uV */
- ret = rtq2208_of_get_ldo_dvs_ability(dev);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
-
return 0;
}
-/** different slave address corresponds different used bucks
- * slave address 0x10: BUCK[BCA FGE]
- * slave address 0x20: BUCK[BC FGHE]
- * slave address 0x40: BUCK[C G]
- */
-static int rtq2208_regulator_check(int slave_addr, int *num,
- int *regulator_idx_table, unsigned int *buck_masks)
+static int rtq2208_regulator_check(struct device *dev, int *num, int *regulator_idx_table,
+ unsigned int *buck_masks, unsigned int *ldo1_fixed_uV,
+ unsigned int *ldo2_fixed_uV)
{
- static bool rtq2208_used_table[3][RTQ2208_LDO_MAX] = {
- /* BUCK[BCA FGE], LDO[12] */
- {1, 1, 0, 1, 1, 1, 0, 1, 1, 1},
- /* BUCK[BC FGHE], LDO[12]*/
- {1, 1, 0, 0, 1, 1, 1, 1, 1, 1},
- /* BUCK[C G], LDO[12] */
- {0, 1, 0, 0, 0, 1, 0, 0, 1, 1},
- };
- int i, idx = ffs(slave_addr >> 4) - 1;
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ bool rtq2208_used_table[RTQ2208_LDO_MAX] = {0};
+ u8 entry_key[] = { 0x69, 0x01 };
+ unsigned int buck_phase, ldo_cfg0, ldo_cfg1;
+ int i, ret;
u8 mask;
+ ret = regmap_raw_write(regmap, RTQ2208_REG_HIDDEN0, entry_key, ARRAY_SIZE(entry_key));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enter hidden page\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_BUCKPH, &buck_phase);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read buck phase configuration\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG0, &ldo_cfg0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg0\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG1, &ldo_cfg1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg1\n");
+
+ ret = regmap_write(regmap, RTQ2208_REG_HIDDEN1, 0x00);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to exit hidden page\n");
+
+ dev_info(dev, "BUCK Phase 0x%x\n", buck_phase);
+ /*
+ * Use buck phase configuration to assign used table mask
+ * GROUP1 GROUP2
+ * 0 -> 2P + 2P BC FG
+ * 1 -> 2P + 1P + 1P BCA FGE
+ * 2 -> 1P + 1P + 1P + 1P BCDA FGHE
+ * 3 -> 3P + 1P BC FG
+ * others -> 4P C G
+ */
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP1, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_D] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_A] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_B] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_C] = true;
+ break;
+ }
+
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP2, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_F] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_E] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_H] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_G] = true;
+ break;
+ }
+
+ *ldo1_fixed_uV = FIELD_GET(RTQ2208_MASK_LDO1_FIXED, ldo_cfg1) ? 1200000 : 0;
+
+ if (!FIELD_GET(RTQ2208_MASK_LDO2_OPT0, ldo_cfg0) &&
+ !FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 0;
+ else if (FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 900000;
+ else
+ *ldo2_fixed_uV = 1200000;
+
+ /* By default, LDO1 & LDO2 are always used */
+ rtq2208_used_table[RTQ2208_LDO1] = rtq2208_used_table[RTQ2208_LDO2] = true;
+
for (i = 0; i < RTQ2208_LDO_MAX; i++) {
- if (!rtq2208_used_table[idx][i])
+ if (!rtq2208_used_table[i])
continue;
regulator_idx_table[(*num)++] = i;
@@ -559,7 +586,7 @@ static int rtq2208_regulator_check(int slave_addr, int *num,
static const struct regmap_config rtq2208_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0xEF,
+ .max_register = 0xFF,
};
static int rtq2208_probe(struct i2c_client *i2c)
@@ -573,6 +600,7 @@ static int rtq2208_probe(struct i2c_client *i2c)
int i, ret = 0, idx, n_regulator = 0;
unsigned int regulator_idx_table[RTQ2208_LDO_MAX],
buck_masks[RTQ2208_BUCK_NUM_IRQ_REGS] = {0x33, 0x33, 0x33, 0x33, 0x33};
+ unsigned int ldo1_fixed_uV, ldo2_fixed_uV;
rdev_map = devm_kzalloc(dev, sizeof(struct rtq2208_rdev_map), GFP_KERNEL);
if (!rdev_map)
@@ -583,7 +611,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
return dev_err_probe(dev, PTR_ERR(regmap), "Failed to allocate regmap\n");
/* get needed regulator */
- ret = rtq2208_regulator_check(i2c->addr, &n_regulator, regulator_idx_table, buck_masks);
+ ret = rtq2208_regulator_check(dev, &n_regulator, regulator_idx_table, buck_masks,
+ &ldo1_fixed_uV, &ldo2_fixed_uV);
if (ret)
return dev_err_probe(dev, ret, "Failed to check used regulators\n");
@@ -593,7 +622,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
cfg.dev = dev;
/* init regulator desc */
- ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev);
+ ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev,
+ ldo1_fixed_uV, ldo2_fixed_uV);
if (ret)
return ret;
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index aa5464be7053..6d3e75b33260 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -8,6 +8,7 @@
*/
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -72,14 +73,22 @@ static struct regmap *mchp_lan966x_syscon_to_regmap(struct device *dev,
struct device_node *syscon_np)
{
struct regmap_config regmap_config = mchp_lan966x_syscon_regmap_config;
- resource_size_t size;
+ struct resource res;
void __iomem *base;
+ int err;
+
+ err = of_address_to_resource(syscon_np, 0, &res);
+ if (err)
+ return ERR_PTR(err);
- base = devm_of_iomap(dev, syscon_np, 0, &size);
- if (IS_ERR(base))
- return ERR_CAST(base);
+ /* It is not possible to use devm_of_iomap because this resource is
+ * shared with other drivers.
+ */
+ base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!base)
+ return ERR_PTR(-ENOMEM);
- regmap_config.max_register = size - 4;
+ regmap_config.max_register = resource_size(&res) - 4;
return devm_regmap_init_mmio(dev, base, &regmap_config);
}
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e31fa0ad127e..b88cd4fb295b 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -240,8 +240,7 @@ static struct rtc_device *rtc_allocate_device(void)
/* Init uie timer */
rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, rtc);
/* Init pie timer */
- hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rtc->pie_timer.function = rtc_pie_update_irq;
+ hrtimer_setup(&rtc->pie_timer, rtc_pie_update_irq, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rtc->pie_enabled = 0;
set_bit(RTC_FEATURE_ALARM, rtc->features);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 26e1ea1940ec..62feb2c639d5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -2326,8 +2326,7 @@ static inline int __init ap_async_init(void)
*/
if (MACHINE_IS_VM)
poll_high_timeout = 1500000;
- hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ap_poll_timer.function = ap_poll_timeout;
+ hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
queue_work(system_long_wq, &ap_scan_bus_work);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 16d085d56e9d..9e42230e42b8 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2922,9 +2922,7 @@ static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
struct timer_cb *p_timer;
p_timer = &vscsi->rsp_q_timer;
- hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
- p_timer->timer.function = ibmvscsis_service_wait_q;
+ hrtimer_setup(&p_timer->timer, ibmvscsis_service_wait_q, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
p_timer->started = false;
p_timer->timer_pops = 0;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5a9c5a323f8..62438e84e52a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1715,18 +1715,12 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
-static inline unsigned int
+static __always_inline unsigned int
lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
- unsigned int cpu_it;
-
- for_each_cpu_wrap(cpu_it, mask, start) {
- if (cpu_online(cpu_it))
- break;
- }
-
- return cpu_it;
+ return cpumask_next_and_wrap(start, mask, cpu_online_mask);
}
+
/**
* lpfc_next_present_cpu - Finds next present CPU after n
* @n: the cpu prior to search
@@ -1734,16 +1728,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
* Note: If no next present cpu, then fallback to first present cpu.
*
**/
-static inline unsigned int lpfc_next_present_cpu(int n)
+static __always_inline unsigned int lpfc_next_present_cpu(int n)
{
- unsigned int cpu;
-
- cpu = cpumask_next(n, cpu_present_mask);
-
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
- return cpu;
+ return cpumask_next_wrap(n, cpu_present_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index bcadf11414c8..919bf9b7ac26 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -7952,11 +7952,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
/* CMF congestion timer */
- hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_timer.function = lpfc_cmf_timer;
+ hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* CMF 1 minute stats collection timer */
- hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
+ hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Control structure for handling external multi-buffer mailbox
@@ -12873,7 +12872,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
if (offline) {
/* Find next online CPU on original mask */
- cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_next = cpumask_next_wrap(cpu, orig_mask);
cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 0ba9e6a6a13c..c8d6ced5640e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -105,10 +105,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 587f7d248219..d123d3b740e1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -606,7 +606,7 @@ typedef struct _MPI2_CONFIG_REPLY {
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
- U8 ChipName[16]; /*0x04 */
+ U8 ChipName[16] __nonstring; /*0x04 */
U8 ChipRevision[8]; /*0x14 */
U8 BoardName[16]; /*0x1C */
U8 BoardAssembly[16]; /*0x2C */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d84413b77d84..dc74ebc6405a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -328,10 +328,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4f63aff333db..3a2bd953a976 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -282,8 +282,8 @@ struct register_host_info {
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
- uint8_t model_num[16];
- uint8_t model_description[80];
+ uint8_t model_num[16] __nonstring;
+ uint8_t model_description[80] __nonstring;
uint8_t reserved0[160];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5ceaa4665e5d..fe5c30bb2639 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6384,8 +6384,8 @@ static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
sd_dp = &sqcp->sd_dp;
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+ hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
sqcp->scmd = scmd;
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 8aa8dec14911..444a8f59b7da 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -1539,8 +1539,8 @@ static ssize_t used_types_show(struct kobject *kobj,
u16 i;
for (i = 0; i < hdev->used_type_num - 1; i++)
- len += sysfs_emit(&buf[len], "%s ", hdev->type_name_maps[i].name);
- len += sysfs_emit(&buf[len], "%s\n", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s ", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s\n", hdev->type_name_maps[i].name);
return len;
}
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 8ac7658e3d52..3ed8161d7d28 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -192,9 +192,20 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \
"unknown"
+static void imx8m_unregister_soc(void *data)
+{
+ soc_device_unregister(data);
+}
+
+static void imx8m_unregister_cpufreq(void *data)
+{
+ platform_device_unregister(data);
+}
+
static int imx8m_soc_probe(struct platform_device *pdev)
{
struct soc_device_attribute *soc_dev_attr;
+ struct platform_device *cpufreq_dev;
const struct imx8_soc_data *data;
struct device *dev = &pdev->dev;
const struct of_device_id *id;
@@ -239,11 +250,22 @@ static int imx8m_soc_probe(struct platform_device *pdev)
if (IS_ERR(soc_dev))
return PTR_ERR(soc_dev);
+ ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev);
+ if (ret)
+ return ret;
+
pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
soc_dev_attr->revision);
- if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
- platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) {
+ cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ERR(cpufreq_dev))
+ return dev_err_probe(dev, PTR_ERR(cpufreq_dev),
+ "Failed to register imx-cpufreq-dev device\n");
+ ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 328b6153b2be..71be378d2e43 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -75,7 +75,6 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
{
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
locator_hdl);
- struct pdr_service *pds;
mutex_lock(&pdr->lock);
/* Create a local client port for QMI communication */
@@ -87,12 +86,7 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
mutex_unlock(&pdr->lock);
/* Service pending lookup requests */
- mutex_lock(&pdr->list_lock);
- list_for_each_entry(pds, &pdr->lookups, node) {
- if (pds->need_locator_lookup)
- schedule_work(&pdr->locator_work);
- }
- mutex_unlock(&pdr->list_lock);
+ schedule_work(&pdr->locator_work);
return 0;
}
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index 052c292eeda6..cde19cdfd3c7 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -233,7 +233,7 @@ static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
{
- struct pmic_glink *pg = __pmic_glink;
+ struct pmic_glink *pg;
guard(mutex)(&__pmic_glink_lock);
pg = __pmic_glink;
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index c36364522157..193266f5e3f9 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -103,19 +103,15 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
if (ret)
return ret;
- msi_lock_descs(dev);
+ guard(msi_descs_lock)(dev);
nvec = ti_sci_inta_msi_alloc_descs(dev, res);
- if (nvec <= 0) {
- ret = nvec;
- goto unlock;
- }
+ if (nvec <= 0)
+ return nvec;
/* Use alloc ALL as it's unclear whether there are gaps in the indices */
ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec);
if (ret)
dev_err(dev, "Failed to allocate IRQs %d\n", ret);
-unlock:
- msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index d8c9be64d006..244ac0106862 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -930,11 +930,8 @@ static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
/* Release the chip-select. */
ret = atmel_qspi_reg_sync(aq);
- if (ret) {
- pm_runtime_mark_last_busy(&aq->pdev->dev);
- pm_runtime_put_autosuspend(&aq->pdev->dev);
+ if (ret)
return ret;
- }
atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index 5b6af55855ef..62ba0bd9cbb7 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -70,8 +70,7 @@
#define INT_RX_CHANNEL_OVERFLOW BIT(2)
#define INT_TX_CHANNEL_UNDERRUN BIT(3)
-#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
- CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
+#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
#define REG_CONTROL (0x00)
#define REG_FRAME_SIZE (0x04)
@@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi)
mchp_corespi_write(spi, REG_CONTROL, control);
}
-static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max)
{
- while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
- u32 data = mchp_corespi_read(spi, REG_RX_DATA);
+ for (int i = 0; i < fifo_max; i++) {
+ u32 data;
+
+ while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)
+ ;
+
+ data = mchp_corespi_read(spi, REG_RX_DATA);
spi->rx_len -= spi->n_bytes;
@@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
mchp_corespi_write(spi, REG_FRAMESUP, len);
}
-static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max)
{
- int fifo_max, i = 0;
+ int i = 0;
- fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
mchp_corespi_set_xfer_size(spi, fifo_max);
while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
@@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
if (intfield == 0)
return IRQ_NONE;
- if (intfield & INT_TXDONE)
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
-
- if (intfield & INT_RXRDY) {
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
-
- if (spi->rx_len)
- mchp_corespi_read_fifo(spi);
- }
-
- if (!spi->rx_len && !spi->tx_len)
- finalise = true;
-
if (intfield & INT_RX_CHANNEL_OVERFLOW) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
finalise = true;
@@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host,
mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
- while (spi->tx_len)
- mchp_corespi_write_fifo(spi);
+ while (spi->tx_len) {
+ int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
+
+ mchp_corespi_write_fifo(spi, fifo_max);
+ mchp_corespi_read_fifo(spi, fifo_max);
+ }
+ spi_finalize_current_transfer(host);
return 1;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a7a4647717d4..ff07c87dbadc 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2954,9 +2954,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
+ int ret;
child = device_find_any_child(&ctlr->dev);
- return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ put_device(child);
+
+ return ret;
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index 049246774ced..6146555fe9cf 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -172,10 +172,10 @@ void atomisp_unregister_subdev(struct v4l2_subdev *subdev);
#define IS_BYT __IS_SOC(INTEL_ATOM_SILVERMONT)
#define IS_CHT __IS_SOC(INTEL_ATOM_AIRMONT)
#define IS_MRFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID)
-#define IS_MOFD __IS_SOC(INTEL_ATOM_AIRMONT_MID)
+#define IS_MOFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID2)
/* Both CHT and MOFD come with ISP2401 */
#define IS_ISP2401 __IS_SOCS(INTEL_ATOM_AIRMONT, \
- INTEL_ATOM_AIRMONT_MID)
+ INTEL_ATOM_SILVERMONT_MID2)
#endif /* ATOMISP_PLATFORM_H_ */
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index 4aa2797f5e3c..8b85524beb59 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -322,7 +322,8 @@ struct ipu3_uapi_ae_config {
* 0: positive, 1: negative, default 0.
* @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
* used for building histogram. Range [0, 32], default 8.
- * Rule:
+ *
+ * Rule:
* y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
* A single Y is calculated based on sum of Gr/R/B/Gb based on
* their contribution ratio.
diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
index 817421508d5c..b2a615aea7c1 100644
--- a/drivers/thermal/intel/intel_tcc.c
+++ b/drivers/thermal/intel/intel_tcc.c
@@ -106,7 +106,7 @@ static const struct x86_cpu_id intel_tcc_cpu_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &temp_broadwell),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &temp_goldmont),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &temp_goldmont),
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 8229a6fbda5a..717b31d78728 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -1009,6 +1009,8 @@ static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
*/
tb_tunnel_get(tunnel);
+ tunnel->dprx_started = true;
+
if (tunnel->callback) {
tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
@@ -1021,9 +1023,12 @@ static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
{
- tunnel->dprx_canceled = true;
- cancel_delayed_work(&tunnel->dprx_work);
- tb_tunnel_put(tunnel);
+ if (tunnel->dprx_started) {
+ tunnel->dprx_started = false;
+ tunnel->dprx_canceled = true;
+ cancel_delayed_work(&tunnel->dprx_work);
+ tb_tunnel_put(tunnel);
+ }
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 7f6d3a18a41e..8a0a0cb21a89 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -63,6 +63,7 @@ enum tb_tunnel_state {
* @allocated_down: Allocated downstream bandwidth (only for USB3)
* @bw_mode: DP bandwidth allocation mode registers can be used to
* determine consumed and allocated bandwidth
+ * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it)
* @dprx_canceled: Was DPRX capabilities read poll canceled
* @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
* @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
@@ -100,6 +101,7 @@ struct tb_tunnel {
int allocated_up;
int allocated_down;
bool bw_mode;
+ bool dprx_started;
bool dprx_canceled;
ktime_t dprx_timeout;
struct delayed_work dprx_work;
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index d0b18358859e..742004d63c6f 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1056,8 +1056,7 @@ static int brcmuart_probe(struct platform_device *pdev)
}
/* setup HR timer */
- hrtimer_init(&priv->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->hrt.function = brcmuart_hrtimer_func;
+ hrtimer_setup(&priv->hrt, brcmuart_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
up.port.shutdown = brcmuart_shutdown;
up.port.startup = brcmuart_startup;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 442967a6cd52..c57f44882abb 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -566,12 +566,10 @@ static int serial8250_em485_init(struct uart_8250_port *p)
if (!p->em485)
return -ENOMEM;
- hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- p->em485->stop_tx_timer.function = &serial8250_em485_handle_stop_tx;
- p->em485->start_tx_timer.function = &serial8250_em485_handle_start_tx;
+ hrtimer_setup(&p->em485->stop_tx_timer, &serial8250_em485_handle_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&p->em485->start_tx_timer, &serial8250_em485_handle_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
p->em485->port = p;
p->em485->active_timer = NULL;
p->em485->tx_stopped = true;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 04212c823a91..98f178bdbcbe 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2867,11 +2867,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
return -EINVAL;
}
}
-
- hrtimer_init(&uap->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(&uap->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- uap->trigger_start_tx.function = pl011_trigger_start_tx;
- uap->trigger_stop_tx.function = pl011_trigger_stop_tx;
+ hrtimer_setup(&uap->trigger_start_tx, pl011_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&uap->trigger_stop_tx, pl011_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9c59ec128bb4..9a1afe409b98 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2582,10 +2582,10 @@ static int imx_uart_probe(struct platform_device *pdev)
imx_uart_writel(sport, ucr3, UCR3);
}
- hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- sport->trigger_start_tx.function = imx_trigger_start_tx;
- sport->trigger_stop_tx.function = imx_trigger_stop_tx;
+ hrtimer_setup(&sport->trigger_start_tx, imx_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&sport->trigger_stop_tx, imx_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index b1ea48f38248..b72c3bc19bfa 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1702,8 +1702,7 @@ static void sci_request_dma(struct uart_port *port)
dma += s->buf_len_rx;
}
- hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s->rx_timer.function = sci_dma_rx_timer_fn;
+ hrtimer_setup(&s->rx_timer, sci_dma_rx_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
s->chan_rx_saved = s->chan_rx = chan;
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 92ec51870d1d..fe457bf1e15b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -454,7 +454,7 @@ static void cdns_uart_handle_tx(void *dev_id)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
(kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) {
- cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
+ hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_rx_callback);
hrtimer_start(&cdns_uart->tx_timer,
ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
}
@@ -734,7 +734,7 @@ static void cdns_uart_start_tx(struct uart_port *port)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
if (!cdns_uart->rs485_tx_started) {
- cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_tx_callback);
cdns_rs485_tx_setup(cdns_uart);
return hrtimer_start(&cdns_uart->tx_timer,
ms_to_ktime(port->rs485.delay_rts_before_send),
@@ -1626,8 +1626,8 @@ static int cdns_rs485_config(struct uart_port *port, struct ktermios *termios,
writel(val, port->membase + CDNS_UART_MODEMCR);
/* Timer setup */
- hrtimer_init(&cdns_uart->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ hrtimer_setup(&cdns_uart->tx_timer, &cdns_rs485_tx_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Disable transmitter and make Rx setup*/
cdns_uart_stop_tx(port);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 23b9f6efa047..2cfa1774944b 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1782,15 +1782,19 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
ufshcd_mcq_config_esi(hba, msg);
}
+struct ufs_qcom_irq {
+ unsigned int irq;
+ unsigned int idx;
+ struct ufs_hba *hba;
+};
+
static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
{
- struct msi_desc *desc = data;
- struct device *dev = msi_desc_to_dev(desc);
- struct ufs_hba *hba = dev_get_drvdata(dev);
- u32 id = desc->msi_index;
- struct ufs_hw_queue *hwq = &hba->uhq[id];
+ struct ufs_qcom_irq *qi = data;
+ struct ufs_hba *hba = qi->hba;
+ struct ufs_hw_queue *hwq = &hba->uhq[qi->idx];
- ufshcd_mcq_write_cqis(hba, 0x1, id);
+ ufshcd_mcq_write_cqis(hba, 0x1, qi->idx);
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
@@ -1799,8 +1803,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct msi_desc *desc;
- struct msi_desc *failed_desc = NULL;
+ struct ufs_qcom_irq *qi;
int nr_irqs, ret;
if (host->esi_enabled)
@@ -1811,47 +1814,47 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
+ if (qi)
+ return -ENOMEM;
+
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
- return ret;
+ goto cleanup;
}
- msi_lock_descs(hba->dev);
- msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- ret = devm_request_irq(hba->dev, desc->irq,
- ufs_qcom_mcq_esi_handler,
- IRQF_SHARED, "qcom-mcq-esi", desc);
+ for (int idx = 0; idx < nr_irqs; idx++) {
+ qi[idx].irq = msi_get_virq(hba->dev, idx);
+ qi[idx].idx = idx;
+ qi[idx].hba = hba;
+
+ ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", qi + idx);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
- __func__, desc->irq, ret);
- failed_desc = desc;
- break;
+ __func__, qi[idx].irq, ret);
+ qi[idx].irq = 0;
+ goto cleanup;
}
}
- msi_unlock_descs(hba->dev);
- if (ret) {
- /* Rewind */
- msi_lock_descs(hba->dev);
- msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- if (desc == failed_desc)
- break;
- devm_free_irq(hba->dev, desc->irq, hba);
- }
- msi_unlock_descs(hba->dev);
- platform_device_msi_free_irqs_all(hba->dev);
- } else {
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0)
- ufshcd_rmwl(hba, ESI_VEC_MASK,
- FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
- REG_UFS_CFG3);
- ufshcd_mcq_enable_esi(hba);
- host->esi_enabled = true;
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_rmwl(hba, ESI_VEC_MASK,
+ FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
}
+ ufshcd_mcq_enable_esi(hba);
+ host->esi_enabled = true;
+ return 0;
+cleanup:
+ for (int idx = 0; qi[idx].irq; idx++)
+ devm_free_irq(hba->dev, qi[idx].irq, hba);
+ platform_device_msi_free_irqs_all(hba->dev);
+ devm_kfree(hba->dev, qi);
return ret;
}
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index c17516c29b63..a093544482d5 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -424,8 +424,7 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
/* Initialize timers */
static int ci_otg_init_timers(struct ci_hdrc *ci)
{
- hrtimer_init(&ci->otg_fsm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ci->otg_fsm_hrtimer.function = ci_otg_hrtimer_func;
+ hrtimer_setup(&ci->otg_fsm_hrtimer, ci_otg_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
return 0;
}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 238c6fd50e75..2a542a99ec44 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1459,8 +1459,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
- hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- qh->wait_timer.function = &dwc2_wait_timer_fn;
+ hrtimer_setup(&qh->wait_timer, &dwc2_wait_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
index 3d404d19a205..64c4965a160f 100644
--- a/drivers/usb/fotg210/fotg210-hcd.c
+++ b/drivers/usb/fotg210/fotg210-hcd.c
@@ -4901,8 +4901,7 @@ static int hcd_fotg210_init(struct usb_hcd *hcd)
*/
fotg210->need_io_watchdog = 1;
- hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fotg210->hrtimer.function = fotg210_hrtimer_func;
+ hrtimer_setup(&fotg210->hrtimer, fotg210_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index f60576a65ca6..58b0dd575af3 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1559,8 +1559,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- ncm->task_timer.function = ncm_tx_timeout;
+ hrtimer_setup(&ncm->task_timer, ncm_tx_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index bda08c5ba7c0..4f1b5db51dda 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2479,8 +2479,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- dum_hcd->timer.function = dummy_timer;
+ hrtimer_setup(&dum_hcd->timer, dummy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2509,8 +2508,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- dum_hcd->timer.function = dummy_timer;
+ hrtimer_setup(&dum_hcd->timer, dummy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6de79ac5e6a4..6d1d190c914d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -466,8 +466,7 @@ static int ehci_init(struct usb_hcd *hcd)
*/
ehci->need_io_watchdog = 1;
- hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ehci->hrtimer.function = ehci_hrtimer_func;
+ hrtimer_setup(&ehci->hrtimer, ehci_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 9589243e8951..4cde3abb7006 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -760,8 +760,8 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
if (!controller)
goto kzalloc_fail;
- hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- controller->early_tx.function = cppi41_recheck_tx_req;
+ hrtimer_setup(&controller->early_tx, cppi41_recheck_tx_req, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
INIT_LIST_HEAD(&controller->early_tx_list);
controller->controller.channel_alloc = cppi41_dma_channel_allocate;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e07c5e3eb18c..9b34e23b7091 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1079,6 +1079,20 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
/* GMC devices */
{ USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
+ /* Altera USB Blaster 3 */
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 5ee60ba2a73c..52be47d684ea 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1612,3 +1612,16 @@
*/
#define GMC_VID 0x1cd7
#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
+
+/*
+ * Altera USB Blaster 3 (http://www.altera.com).
+ */
+#define ALTERA_VID 0x09fb
+#define ALTERA_UB3_6022_PID 0x6022
+#define ALTERA_UB3_6025_PID 0x6025
+#define ALTERA_UB3_6026_PID 0x6026
+#define ALTERA_UB3_6029_PID 0x6029
+#define ALTERA_UB3_602A_PID 0x602a
+#define ALTERA_UB3_602C_PID 0x602c
+#define ALTERA_UB3_602D_PID 0x602d
+#define ALTERA_UB3_602E_PID 0x602e
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 58bd54e8c483..5cd26dac2069 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1368,13 +1368,13 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */
.driver_info = RSVD(0) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990A (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990A (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
@@ -1388,28 +1388,44 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x30), /* Telit FE990B (rmnet) */
+ .driver_info = NCTRL(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x30), /* Telit FE990B (MBIM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x30), /* Telit FE990B (RNDIS) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x30), /* Telit FE990B (ECM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x60) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) }, /* Telit FN990B (rmnet) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x30), /* Telit FN990B (rmnet) */
.driver_info = NCTRL(5) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) }, /* Telit FN990B (MBIM) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */
.driver_info = NCTRL(6) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) }, /* Telit FN990B (RNDIS) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x30), /* Telit FN990B (RNDIS) */
.driver_info = NCTRL(6) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) }, /* Telit FN990B (ECM) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x30), /* Telit FN990B (ECM) */
.driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x60) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 6bf1a22c785a..a99db4e025cd 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5117,16 +5117,16 @@ static void run_state_machine(struct tcpm_port *port)
*/
if (port->vbus_never_low) {
port->vbus_never_low = false;
- tcpm_set_state(port, SNK_SOFT_RESET,
- port->timings.sink_wait_cap_time);
+ upcoming_state = SNK_SOFT_RESET;
} else {
if (!port->self_powered)
upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
else
upcoming_state = hard_reset_state(port);
- tcpm_set_state(port, SNK_WAIT_CAPABILITIES_TIMEOUT,
- port->timings.sink_wait_cap_time);
}
+
+ tcpm_set_state(port, upcoming_state,
+ port->timings.sink_wait_cap_time);
break;
case SNK_WAIT_CAPABILITIES_TIMEOUT:
/*
@@ -7721,14 +7721,14 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
kthread_init_work(&port->event_work, tcpm_pd_event_handler);
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
- hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->state_machine_timer.function = state_machine_timer_handler;
- hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
- hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->enable_frs_timer.function = enable_frs_timer_handler;
- hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->send_discover_timer.function = send_discover_timer_handler;
+ hrtimer_setup(&port->state_machine_timer, state_machine_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&port->vdm_state_machine_timer, vdm_state_machine_timer_handler,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_setup(&port->enable_frs_timer, enable_frs_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&port->send_discover_timer, send_discover_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->pd_event_lock);
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 49559605177e..c321d442f0da 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -266,24 +266,12 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
if (ret)
goto err_free;
- /*
- * We can't use anon_inode_getfd() because we need to modify
- * the f_mode flags directly to allow more than just ioctls
- */
- filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- df, O_RDWR);
+ filep = anon_inode_getfile_fmode("[vfio-device]", &vfio_device_fops,
+ df, O_RDWR, FMODE_PREAD | FMODE_PWRITE);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_close_device;
}
-
- /*
- * TODO: add an anon_inode interface to do this.
- * Appears to be missing by lack of need rather than
- * explicitly prevented. Now there's need.
- */
- filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
-
/*
* Use the pseudo fs inode on the device to link all mmaps
* to the same address space, allowing us to unmap all vmas
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 7fdb5edd7e2e..75338ffc703f 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -282,6 +282,8 @@ static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
+static void hvfb_putmem(struct fb_info *info);
+
/* Send message to Hyper-V host */
static inline int synthvid_send(struct hv_device *hdev,
struct synthvid_msg *msg)
@@ -863,6 +865,17 @@ static void hvfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width,
}
/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup related to
+ * framebuffer here.
+ */
+static void hvfb_destroy(struct fb_info *info)
+{
+ hvfb_putmem(info);
+ framebuffer_release(info);
+}
+
+/*
* TODO: GEN1 codepaths allocate from system or DMA-able memory. Fix the
* driver to use the _SYSMEM_ or _DMAMEM_ helpers in these cases.
*/
@@ -877,6 +890,7 @@ static const struct fb_ops hvfb_ops = {
.fb_set_par = hvfb_set_par,
.fb_setcolreg = hvfb_setcolreg,
.fb_blank = hvfb_blank,
+ .fb_destroy = hvfb_destroy,
};
/* Get options from kernel paramenter "video=" */
@@ -952,7 +966,7 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
}
/* Release contiguous physical memory */
-static void hvfb_release_phymem(struct hv_device *hdev,
+static void hvfb_release_phymem(struct device *device,
phys_addr_t paddr, unsigned int size)
{
unsigned int order = get_order(size);
@@ -960,7 +974,7 @@ static void hvfb_release_phymem(struct hv_device *hdev,
if (order <= MAX_PAGE_ORDER)
__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
else
- dma_free_coherent(&hdev->device,
+ dma_free_coherent(device,
round_up(size, PAGE_SIZE),
phys_to_virt(paddr),
paddr);
@@ -989,6 +1003,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
+ aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
/*
* For Gen 1 VM, we can directly use the contiguous memory
@@ -1010,11 +1025,21 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+ } else {
+ aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
}
/*
- * Cannot use the contiguous physical memory.
- * Allocate mmio space for framebuffer.
+ * Cannot use contiguous physical memory, so allocate MMIO space for
+ * the framebuffer. At this point in the function, conflicting devices
+ * that might have claimed the framebuffer MMIO space based on
+ * screen_info.lfb_base must have already been removed so that
+ * vmbus_allocate_mmio() does not allocate different MMIO space. If the
+ * kdump image were to be loaded using kexec_file_load(), the
+ * framebuffer location in the kdump image would be set from
+ * screen_info.lfb_base at the time that kdump is enabled. If the
+ * framebuffer has moved elsewhere, this could be the wrong location,
+ * causing kdump to hang when efifb (for example) loads.
*/
dio_fb_size =
screen_width * screen_height * screen_depth / 8;
@@ -1051,11 +1076,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_size = dio_fb_size;
getmem_done:
- if (base && size)
- aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
- else
- aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
-
if (!gen2vm)
pci_dev_put(pdev);
@@ -1074,16 +1094,16 @@ err1:
}
/* Release the framebuffer */
-static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
+static void hvfb_putmem(struct fb_info *info)
{
struct hvfb_par *par = info->par;
if (par->need_docopy) {
vfree(par->dio_vp);
- iounmap(info->screen_base);
+ iounmap(par->mmio_vp);
vmbus_free_mmio(par->mem->start, screen_fb_size);
} else {
- hvfb_release_phymem(hdev, info->fix.smem_start,
+ hvfb_release_phymem(info->device, info->fix.smem_start,
screen_fb_size);
}
@@ -1172,7 +1192,7 @@ static int hvfb_probe(struct hv_device *hdev,
if (ret)
goto error;
- ret = register_framebuffer(info);
+ ret = devm_register_framebuffer(&hdev->device, info);
if (ret) {
pr_err("Unable to register framebuffer\n");
goto error;
@@ -1197,7 +1217,7 @@ static int hvfb_probe(struct hv_device *hdev,
error:
fb_deferred_io_cleanup(info);
- hvfb_putmem(hdev, info);
+ hvfb_putmem(info);
error2:
vmbus_close(hdev->channel);
error1:
@@ -1220,14 +1240,10 @@ static void hvfb_remove(struct hv_device *hdev)
fb_deferred_io_cleanup(info);
- unregister_framebuffer(info);
cancel_delayed_work_sync(&par->dwork);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
-
- hvfb_putmem(hdev, info);
- framebuffer_release(info);
}
static int hvfb_suspend(struct hv_device *hdev)
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 70fbc9a3e703..cf3fb61f4d5b 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -23,6 +23,7 @@
#include <linux/cleanup.h>
#include <linux/uuid.h>
#include <linux/configfs.h>
+#include <linux/mm.h>
#include <uapi/linux/sev-guest.h>
#include <uapi/linux/psp-sev.h>
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 8a294b9cbcf6..56d0dbe62163 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -2950,8 +2950,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
mutex_init(&vm->hotplug_mutex);
INIT_LIST_HEAD(&vm->next);
spin_lock_init(&vm->removal_lock);
- hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vm->retry_timer.function = virtio_mem_timer_expired;
+ hrtimer_setup(&vm->retry_timer, virtio_mem_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
vm->in_kdump = is_kdump_kernel();
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 7a1096265f18..0820e35ad2e3 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -187,14 +187,12 @@ static int __init softdog_init(void)
watchdog_set_nowayout(&softdog_dev, nowayout);
watchdog_stop_on_reboot(&softdog_dev);
- hrtimer_init(&softdog_ticktock, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- softdog_ticktock.function = softdog_fire;
+ hrtimer_setup(&softdog_ticktock, softdog_fire, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) {
softdog_info.options |= WDIOF_PRETIMEOUT;
- hrtimer_init(&softdog_preticktock, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- softdog_preticktock.function = softdog_pretimeout;
+ hrtimer_setup(&softdog_preticktock, softdog_pretimeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
if (soft_active_on_boot)
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 19698d87dc57..8369fd94fc1a 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1051,8 +1051,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
}
kthread_init_work(&wd_data->work, watchdog_ping_work);
- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
- wd_data->timer.function = watchdog_timer_expired;
+ hrtimer_setup(&wd_data->timer, watchdog_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_HARD);
watchdog_hrtimer_pretimeout_init(wdd);
if (wdd->id == 0) {
diff --git a/drivers/watchdog/watchdog_hrtimer_pretimeout.c b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
index 940b53718a91..fbc7eecd8b20 100644
--- a/drivers/watchdog/watchdog_hrtimer_pretimeout.c
+++ b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
@@ -23,8 +23,8 @@ void watchdog_hrtimer_pretimeout_init(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data = wdd->wd_data;
- hrtimer_init(&wd_data->pretimeout_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wd_data->pretimeout_timer.function = watchdog_hrtimer_pretimeout;
+ hrtimer_setup(&wd_data->pretimeout_timer, watchdog_hrtimer_pretimeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
void watchdog_hrtimer_pretimeout_start(struct watchdog_device *wdd)
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 416f231809cb..bfe07adb3e3a 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -43,6 +43,18 @@ static int xen_add_device(struct device *dev)
pci_mcfg_reserved = true;
}
#endif
+
+ if (pci_domain_nr(pci_dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values, do not attempt to register devices with Xen in
+ * segments greater or equal than 0x10000.
+ */
+ dev_info(dev,
+ "not registering with Xen: invalid PCI segment\n");
+ return 0;
+ }
+
if (pci_seg_supported) {
DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
@@ -149,6 +161,16 @@ static int xen_remove_device(struct device *dev)
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
+ if (pci_domain_nr(pci_dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values.
+ */
+ dev_info(dev,
+ "not unregistering with Xen: invalid PCI segment\n");
+ return 0;
+ }
+
if (pci_seg_supported) {
struct physdev_pci_device device = {
.seg = pci_domain_nr(pci_dev->bus),
@@ -182,6 +204,16 @@ int xen_reset_device(const struct pci_dev *dev)
.flags = PCI_DEVICE_RESET_FLR,
};
+ if (pci_domain_nr(dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values.
+ */
+ dev_info(&dev->dev,
+ "unable to notify Xen of device reset: invalid PCI segment\n");
+ return 0;
+ }
+
return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
}
EXPORT_SYMBOL_GPL(xen_reset_device);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 544d3f9010b9..1db82da56db6 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -26,6 +26,8 @@
#define DRV_NAME "xen-platform-pci"
+#define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002
+
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
@@ -174,6 +176,8 @@ pci_out:
static const struct pci_device_id platform_pci_tbl[] = {
{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index b616b7768c3b..5c2f829d5b0b 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -262,26 +262,6 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
return found_dev;
}
-struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev)
-{
- struct pcistub_device *psdev;
- struct pci_dev *found_dev = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&pcistub_devices_lock, flags);
-
- list_for_each_entry(psdev, &pcistub_devices, dev_list) {
- if (psdev->dev == dev) {
- found_dev = pcistub_device_get_pci_dev(pdev, psdev);
- break;
- }
- }
-
- spin_unlock_irqrestore(&pcistub_devices_lock, flags);
- return found_dev;
-}
-
/*
* Called when:
* - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index f9599ed2f2e2..b786c1f74f85 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -67,8 +67,6 @@ extern struct list_head xen_pcibk_quirks;
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func);
-struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev);
void pcistub_put_pci_dev(struct pci_dev *dev);
static inline bool xen_pcibk_pv_support(void)
diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
index b799bc759c15..088b7f02c358 100644
--- a/drivers/xen/xenfs/xensyms.c
+++ b/drivers/xen/xenfs/xensyms.c
@@ -48,7 +48,7 @@ static int xensyms_next_sym(struct xensyms *xs)
return -ENOMEM;
set_xen_guest_handle(symdata->name, xs->name);
- symdata->symnum--; /* Rewind */
+ symdata->symnum = symnum; /* Rewind */
ret = HYPERVISOR_platform_op(&xs->op);
if (ret < 0)
@@ -78,7 +78,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
{
struct xensyms *xs = m->private;
- xs->op.u.symdata.symnum = ++(*pos);
+ *pos = xs->op.u.symdata.symnum;
if (xensyms_next_sym(xs))
return NULL;