aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/core.c1
-rw-r--r--drivers/acpi/numa/hmat.c7
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/acpi/processor_core.c15
-rw-r--r--drivers/base/platform-msi.c350
-rw-r--r--drivers/block/zram/Kconfig1
-rw-r--r--drivers/crypto/ccp/sev-dev.c36
-rw-r--r--drivers/dax/cxl.c1
-rw-r--r--drivers/dax/device.c1
-rw-r--r--drivers/dax/hmem/hmem.c1
-rw-r--r--drivers/dax/kmem.c1
-rw-r--r--drivers/dax/pmem.c1
-rw-r--r--drivers/dax/super.c1
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/udmabuf.c232
-rw-r--r--drivers/firewire/.kunitconfig2
-rw-r--r--drivers/firewire/Kconfig31
-rw-r--r--drivers/firewire/Makefile2
-rw-r--r--drivers/firewire/core-iso.c32
-rw-r--r--drivers/firewire/core-topology.c219
-rw-r--r--drivers/firewire/core-trace.c11
-rw-r--r--drivers/firewire/core-transaction.c24
-rw-r--r--drivers/firewire/ohci-serdes-test.c56
-rw-r--r--drivers/firewire/ohci.c237
-rw-r--r--drivers/firewire/ohci.h43
-rw-r--r--drivers/firewire/packet-header-definitions.h2
-rw-r--r--drivers/firewire/packet-serdes-test.c334
-rw-r--r--drivers/firewire/phy-packet-definitions.h302
-rw-r--r--drivers/firewire/self-id-sequence-helper-test.c152
-rw-r--r--drivers/firmware/efi/libstub/loongarch.c2
-rw-r--r--drivers/firmware/efi/riscv-runtime.c13
-rw-r--r--drivers/fsi/fsi-occ.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c20
-rw-r--r--drivers/hsi/clients/ssi_protocol.c1
-rw-r--r--drivers/hv/hv_balloon.c5
-rw-r--r--drivers/irqchip/Kconfig29
-rw-r--r--drivers/irqchip/Makefile6
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c121
-rw-r--r--drivers/irqchip/irq-bcm2835.c4
-rw-r--r--drivers/irqchip/irq-gic-common.h3
-rw-r--r--drivers/irqchip/irq-gic-v2m.c81
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c210
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c202
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c163
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c57
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c130
-rw-r--r--drivers/irqchip/irq-gic-v3.c22
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c24
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c54
-rw-r--r--drivers/irqchip/irq-lan966x-oic.c278
-rw-r--r--drivers/irqchip/irq-mbigen.c96
-rw-r--r--drivers/irqchip/irq-meson-gpio.c1
-rw-r--r--drivers/irqchip/irq-msi-lib.c140
-rw-r--r--drivers/irqchip/irq-msi-lib.h27
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c44
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c275
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c37
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c1
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c52
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c150
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c13
-rw-r--r--drivers/irqchip/irq-riscv-intc.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c670
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c729
-rw-r--r--drivers/irqchip/irq-ts4800.c1
-rw-r--r--drivers/macintosh/ams/ams-i2c.c2
-rw-r--r--drivers/macintosh/mac_hid.c1
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/macintosh/windfarm_ad7417_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c2
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c2
-rw-r--r--drivers/mailbox/Kconfig8
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c4
-rw-r--r--drivers/mailbox/imx-mailbox.c10
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c100
-rw-r--r--drivers/mailbox/omap-mailbox.c3
-rw-r--r--drivers/mailbox/qcom-cpucp-mbox.c187
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c9
-rw-r--r--drivers/md/bcache/alloc.c64
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c124
-rw-r--r--drivers/md/bcache/bset.h40
-rw-r--r--drivers/md/bcache/btree.c69
-rw-r--r--drivers/md/bcache/extents.c53
-rw-r--r--drivers/md/bcache/movinggc.c41
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/md/bcache/util.c2
-rw-r--r--drivers/md/bcache/util.h67
-rw-r--r--drivers/md/bcache/writeback.c13
-rw-r--r--drivers/md/dm-vdo/repair.c19
-rw-r--r--drivers/md/dm-vdo/slab-depot.c14
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/md-cluster.c49
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c17
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/misc/pci_endpoint_test.c87
-rw-r--r--drivers/most/core.c10
-rw-r--r--drivers/most/most_cdev.c6
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/chips/cfi_util.c1
-rw-r--r--drivers/mtd/maps/Makefile11
-rw-r--r--drivers/mtd/maps/map_funcs.c1
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c20
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c26
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c26
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c86
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c700
-rw-r--r--drivers/mtd/nand/spi/macronix.c64
-rw-r--r--drivers/mtd/parsers/brcm_u-boot.c1
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c18
-rw-r--r--drivers/mtd/parsers/tplink_safeloader.c1
-rw-r--r--drivers/mtd/spi-nor/Makefile1
-rw-r--r--drivers/mtd/spi-nor/core.c188
-rw-r--r--drivers/mtd/spi-nor/core.h12
-rw-r--r--drivers/mtd/spi-nor/everspin.c19
-rw-r--r--drivers/mtd/spi-nor/winbond.c2
-rw-r--r--drivers/mtd/spi-nor/xilinx.c169
-rw-r--r--drivers/mtd/tests/Makefile34
-rw-r--r--drivers/mtd/tests/mtd_test.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h2
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c2
-rw-r--r--drivers/nvdimm/btt.c5
-rw-r--r--drivers/nvdimm/core.c1
-rw-r--r--drivers/nvdimm/e820.c1
-rw-r--r--drivers/nvdimm/nd_virtio.c1
-rw-r--r--drivers/nvdimm/of_pmem.c1
-rw-r--r--drivers/nvdimm/pmem.c1
-rw-r--r--drivers/nvme/host/ioctl.c16
-rw-r--r--drivers/pci/bus.c10
-rw-r--r--drivers/pci/controller/Kconfig9
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/dwc/Kconfig22
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c55
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c38
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c202
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c4
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c157
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c145
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c121
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h46
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c330
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c126
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c50
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c346
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c308
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c2
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c2
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h2
-rw-r--r--drivers/pci/controller/pci-aardvark.c1
-rw-r--r--drivers/pci/controller/pci-host-common.c5
-rw-r--r--drivers/pci/controller/pci-host-generic.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/pci/controller/pci-loongson.c13
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c1
-rw-r--r--drivers/pci/controller/pcie-altera.c1
-rw-r--r--drivers/pci/controller/pcie-apple.c1
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c1
-rw-r--r--drivers/pci/controller/pcie-mediatek.c1
-rw-r--r--drivers/pci/controller/pcie-mt7621.c1
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c6
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c3
-rw-r--r--drivers/pci/controller/pcie-rockchip.c2
-rw-r--r--drivers/pci/controller/plda/Kconfig30
-rw-r--r--drivers/pci/controller/plda/Makefile4
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c (renamed from drivers/pci/controller/pcie-microchip-host.c)615
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c651
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h273
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c488
-rw-r--r--drivers/pci/controller/vmd.c9
-rw-r--r--drivers/pci/devres.c913
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c48
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c115
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c19
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c1
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c79
-rw-r--r--drivers/pci/hotplug/acpiphp_ampere_altra.c1
-rw-r--r--drivers/pci/hotplug/pciehp.h4
-rw-r--r--drivers/pci/hotplug/pciehp_core.c42
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c5
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c4
-rw-r--r--drivers/pci/hotplug/pnv_php.c3
-rw-r--r--drivers/pci/iomap.c16
-rw-r--r--drivers/pci/msi/irqdomain.c20
-rw-r--r--drivers/pci/of.c54
-rw-r--r--drivers/pci/pci-acpi.c22
-rw-r--r--drivers/pci/pci-mid.c4
-rw-r--r--drivers/pci/pci-pf-stub.c1
-rw-r--r--drivers/pci/pci-stub.c1
-rw-r--r--drivers/pci/pci.c306
-rw-r--r--drivers/pci/pci.h100
-rw-r--r--drivers/pci/pcie/aer.c18
-rw-r--r--drivers/pci/pcie/dpc.c60
-rw-r--r--drivers/pci/pcie/portdrv.c2
-rw-r--r--drivers/pci/probe.c34
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/setup-bus.c91
-rw-r--r--drivers/pci/switch/switchtec.c16
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c1
-rw-r--r--drivers/pcmcia/i82092.c1
-rw-r--r--drivers/pcmcia/i82365.c1
-rw-r--r--drivers/pcmcia/max1600.c1
-rw-r--r--drivers/pcmcia/rsrc_mgr.c1
-rw-r--r--drivers/pcmcia/yenta_socket.c7
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c10
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c5
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm4908.c1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm63xx.c4
-rw-r--r--drivers/pinctrl/berlin/berlin.c21
-rw-r--r--drivers/pinctrl/berlin/berlin.h6
-rw-r--r--drivers/pinctrl/core.c30
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/freescale/Kconfig18
-rw-r--r--drivers/pinctrl/freescale/Makefile2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c357
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c39
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx91.c271
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c18
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c10
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c32
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c42
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7629.c20
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt76x8.c88
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c34
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c24
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-s4.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c4
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/nuvoton/Kconfig19
-rw-r--r--drivers/pinctrl/nuvoton/Makefile2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c1187
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.h52
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35d1.c1799
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c31
-rw-r--r--drivers/pinctrl/pinconf-generic.c7
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c7
-rw-r--r--drivers/pinctrl/pinctrl-at91.c14
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c212
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c45
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h12
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c707
-rw-r--r--drivers/pinctrl/pinctrl-k210.c7
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c24
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c1
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_i2c.c1
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c1
-rw-r--r--drivers/pinctrl/pinctrl-mlxbf3.c12
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c28
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c9
-rw-r--r--drivers/pinctrl/pinctrl-single.c7
-rw-r--r--drivers/pinctrl/pinctrl-st.c37
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c1
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c4
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c85
-rw-r--r--drivers/pinctrl/pinmux.c19
-rw-r--r--drivers/pinctrl/pinmux.h19
-rw-r--r--drivers/pinctrl/qcom/Kconfig9
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c19
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c236
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c830
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779h0.c82
-rw-r--r--drivers/pinctrl/renesas/pfc-sh73a0.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c14
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c909
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c26
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c7
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c7
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c13
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c14
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c27
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c18
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c7
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c4
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c42
-rw-r--r--drivers/platform/mips/cpu_hwmon.c3
-rw-r--r--drivers/power/reset/piix4-poweroff.c1
-rw-r--r--drivers/power/supply/Kconfig21
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_chargalg.c2
-rw-r--r--drivers/power/supply/ab8500_charger.c52
-rw-r--r--drivers/power/supply/ab8500_fg.c5
-rw-r--r--drivers/power/supply/adp5061.c2
-rw-r--r--drivers/power/supply/bd99954-charger.c7
-rw-r--r--drivers/power/supply/bq24735-charger.c2
-rw-r--r--drivers/power/supply/bq25890_charger.c10
-rw-r--r--drivers/power/supply/cw2015_battery.c2
-rw-r--r--drivers/power/supply/ingenic-battery.c10
-rw-r--r--drivers/power/supply/lenovo_yoga_c630_battery.c501
-rw-r--r--drivers/power/supply/lp8727_charger.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c4
-rw-r--r--drivers/power/supply/max14656_charger_detector.c2
-rw-r--r--drivers/power/supply/max1720x_battery.c337
-rw-r--r--drivers/power/supply/max77976_charger.c4
-rw-r--r--drivers/power/supply/mm8013.c2
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/power_supply_hwmon.c25
-rw-r--r--drivers/power/supply/power_supply_leds.c174
-rw-r--r--drivers/power/supply/power_supply_sysfs.c3
-rw-r--r--drivers/power/supply/qcom_battmgr.c1
-rw-r--r--drivers/power/supply/rt9455_charger.c4
-rw-r--r--drivers/power/supply/samsung-sdi-battery.c26
-rw-r--r--drivers/power/supply/sbs-charger.c2
-rw-r--r--drivers/power/supply/sbs-manager.c4
-rw-r--r--drivers/rtc/interface.c9
-rw-r--r--drivers/rtc/lib_test.c1
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c2
-rw-r--r--drivers/rtc/rtc-abx80x.c12
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-cmos.c10
-rw-r--r--drivers/rtc/rtc-ds1307.c7
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c2
-rw-r--r--drivers/rtc/rtc-em3027.c2
-rw-r--r--drivers/rtc/rtc-fm3130.c2
-rw-r--r--drivers/rtc/rtc-goldfish.c1
-rw-r--r--drivers/rtc/rtc-hym8563.c4
-rw-r--r--drivers/rtc/rtc-isl12022.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c36
-rw-r--r--drivers/rtc/rtc-max31335.c2
-rw-r--r--drivers/rtc/rtc-max6900.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c1
-rw-r--r--drivers/rtc/rtc-nct3018y.c2
-rw-r--r--drivers/rtc/rtc-omap.c1
-rw-r--r--drivers/rtc/rtc-pcf8523.c2
-rw-r--r--drivers/rtc/rtc-pcf8563.c6
-rw-r--r--drivers/rtc/rtc-pcf8583.c2
-rw-r--r--drivers/rtc/rtc-rc5t583.c1
-rw-r--r--drivers/rtc/rtc-rv3029c2.c4
-rw-r--r--drivers/rtc/rtc-rx6110.c2
-rw-r--r--drivers/rtc/rtc-rx8010.c2
-rw-r--r--drivers/rtc/rtc-rx8581.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-sd3078.c2
-rw-r--r--drivers/rtc/rtc-stm32.c78
-rw-r--r--drivers/rtc/rtc-tps65910.c1
-rw-r--r--drivers/rtc/rtc-tps6594.c75
-rw-r--r--drivers/rtc/rtc-twl.c1
-rw-r--r--drivers/rtc/rtc-x1205.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c10
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c2
-rw-r--r--drivers/thermal/thermal_core.c51
-rw-r--r--drivers/thermal/thermal_core.h3
-rw-r--r--drivers/thermal/thermal_helpers.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c2
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-pci.c2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c13
-rw-r--r--drivers/video/fbdev/core/fb_defio.c13
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c2
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.h63
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_mem.c29
-rw-r--r--drivers/xen/balloon.c9
394 files changed, 16792 insertions, 7035 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index d4595d1985b1..5429ec9ef06f 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3531,5 +3531,6 @@ static __exit void nfit_exit(void)
module_init(nfit_init);
module_exit(nfit_exit);
+MODULE_DESCRIPTION("ACPI NVDIMM Firmware Interface Table (NFIT) driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index febd9e51350b..1a902a02390f 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -933,17 +933,14 @@ static int hmat_callback(struct notifier_block *self,
return NOTIFY_OK;
}
-static int hmat_set_default_dram_perf(void)
+static int __init hmat_set_default_dram_perf(void)
{
int rc;
int nid, pxm;
struct memory_target *target;
struct access_coordinate *attrs;
- if (!default_dram_type)
- return -EIO;
-
- for_each_node_mask(nid, default_dram_type->nodes) {
+ for_each_node_mask(nid, default_dram_nodes) {
pxm = node_to_pxm(nid);
target = find_mem_target(pxm);
if (!target)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 58b89b8d950e..d0bfb3706801 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -293,11 +293,6 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
}
EXPORT_SYMBOL_GPL(acpi_pci_find_root);
-struct acpi_handle_node {
- struct list_head node;
- acpi_handle handle;
-};
-
/**
* acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
* @handle: the handle in question
@@ -1008,7 +1003,6 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
int node = acpi_get_node(device->handle);
struct pci_bus *bus;
struct pci_host_bridge *host_bridge;
- union acpi_object *obj;
info->root = root;
info->bridge = device;
@@ -1050,17 +1044,6 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
if (!(root->osc_ext_control_set & OSC_CXL_ERROR_REPORTING_CONTROL))
host_bridge->native_cxl_error = 0;
- /*
- * Evaluate the "PCI Boot Configuration" _DSM Function. If it
- * exists and returns 0, we must preserve any PCI resource
- * assignments made by firmware for this host bridge.
- */
- obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1,
- DSM_PCI_PRESERVE_BOOT_CONFIG, NULL, ACPI_TYPE_INTEGER);
- if (obj && obj->integer.value == 0)
- host_bridge->preserve_config = 1;
- ACPI_FREE(obj);
-
acpi_dev_power_up_children_with_adr(device);
pci_scan_child_bus(bus);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index b04b684f3190..9b6b71a2ffb5 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -216,6 +216,21 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
return rv;
}
+int __init acpi_get_madt_revision(void)
+{
+ struct acpi_table_header *madt = NULL;
+ int revision;
+
+ if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, &madt)))
+ return -EINVAL;
+
+ revision = madt->revision;
+
+ acpi_put_table(madt);
+
+ return revision;
+}
+
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 11f5fdf65b9e..0e60dd650b5e 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -4,346 +4,12 @@
*
* Copyright (C) 2015 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
+ * Copyright (C) 2022 Linutronix GmbH
*/
#include <linux/device.h>
-#include <linux/idr.h>
-#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
-#include <linux/slab.h>
-
-/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */
-
-#define DEV_ID_SHIFT 21
-#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
-
-/*
- * Internal data structure containing a (made up, but unique) devid
- * and the callback to write the MSI message.
- */
-struct platform_msi_priv_data {
- struct device *dev;
- void *host_data;
- msi_alloc_info_t arg;
- irq_write_msi_msg_t write_msg;
- int devid;
-};
-
-/* The devid allocator */
-static DEFINE_IDA(platform_msi_devid_ida);
-
-#ifdef GENERIC_MSI_DOMAIN_OPS
-/*
- * Convert an msi_desc to a globaly unique identifier (per-device
- * devid + msi_desc position in the msi_list).
- */
-static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
-{
- u32 devid = desc->dev->msi.data->platform_data->devid;
-
- return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
-}
-
-static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
-{
- arg->desc = desc;
- arg->hwirq = platform_msi_calc_hwirq(desc);
-}
-
-static int platform_msi_init(struct irq_domain *domain,
- struct msi_domain_info *info,
- unsigned int virq, irq_hw_number_t hwirq,
- msi_alloc_info_t *arg)
-{
- return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- info->chip, info->chip_data);
-}
-
-static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg)
-{
- arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
-}
-#else
-#define platform_msi_set_desc NULL
-#define platform_msi_init NULL
-#define platform_msi_set_proxy_dev(x) do {} while(0)
-#endif
-
-static void platform_msi_update_dom_ops(struct msi_domain_info *info)
-{
- struct msi_domain_ops *ops = info->ops;
-
- BUG_ON(!ops);
-
- if (ops->msi_init == NULL)
- ops->msi_init = platform_msi_init;
- if (ops->set_desc == NULL)
- ops->set_desc = platform_msi_set_desc;
-}
-
-static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
-{
- struct msi_desc *desc = irq_data_get_msi_desc(data);
-
- desc->dev->msi.data->platform_data->write_msg(desc, msg);
-}
-
-static void platform_msi_update_chip_ops(struct msi_domain_info *info)
-{
- struct irq_chip *chip = info->chip;
-
- BUG_ON(!chip);
- if (!chip->irq_mask)
- chip->irq_mask = irq_chip_mask_parent;
- if (!chip->irq_unmask)
- chip->irq_unmask = irq_chip_unmask_parent;
- if (!chip->irq_eoi)
- chip->irq_eoi = irq_chip_eoi_parent;
- if (!chip->irq_set_affinity)
- chip->irq_set_affinity = msi_domain_set_affinity;
- if (!chip->irq_write_msi_msg)
- chip->irq_write_msi_msg = platform_msi_write_msg;
- if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
- !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
- info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
-}
-
-/**
- * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
- * @fwnode: Optional fwnode of the interrupt controller
- * @info: MSI domain info
- * @parent: Parent irq domain
- *
- * Updates the domain and chip ops and creates a platform MSI
- * interrupt domain.
- *
- * Returns:
- * A domain pointer or NULL in case of failure.
- */
-struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent)
-{
- struct irq_domain *domain;
-
- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
- platform_msi_update_dom_ops(info);
- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
- platform_msi_update_chip_ops(info);
- info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
- MSI_FLAG_FREE_MSI_DESCS;
-
- domain = msi_create_irq_domain(fwnode, info, parent);
- if (domain)
- irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
-
- return domain;
-}
-EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
-
-static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
-{
- struct platform_msi_priv_data *datap;
- int err;
-
- /*
- * Limit the number of interrupts to 2048 per device. Should we
- * need to bump this up, DEV_ID_SHIFT should be adjusted
- * accordingly (which would impact the max number of MSI
- * capable devices).
- */
- if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
- return -EINVAL;
-
- if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
- dev_err(dev, "Incompatible msi_domain, giving up\n");
- return -EINVAL;
- }
-
- err = msi_setup_device_data(dev);
- if (err)
- return err;
-
- /* Already initialized? */
- if (dev->msi.data->platform_data)
- return -EBUSY;
-
- datap = kzalloc(sizeof(*datap), GFP_KERNEL);
- if (!datap)
- return -ENOMEM;
-
- datap->devid = ida_alloc_max(&platform_msi_devid_ida,
- (1 << DEV_ID_SHIFT) - 1, GFP_KERNEL);
- if (datap->devid < 0) {
- err = datap->devid;
- kfree(datap);
- return err;
- }
-
- datap->write_msg = write_msi_msg;
- datap->dev = dev;
- dev->msi.data->platform_data = datap;
- return 0;
-}
-
-static void platform_msi_free_priv_data(struct device *dev)
-{
- struct platform_msi_priv_data *data = dev->msi.data->platform_data;
-
- dev->msi.data->platform_data = NULL;
- ida_free(&platform_msi_devid_ida, data->devid);
- kfree(data);
-}
-
-/**
- * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
- * @dev: The device for which to allocate interrupts
- * @nvec: The number of interrupts to allocate
- * @write_msi_msg: Callback to write an interrupt message for @dev
- *
- * Returns:
- * Zero for success, or an error code in case of failure
- */
-static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
-{
- int err;
-
- err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (err)
- return err;
-
- err = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
- if (err)
- platform_msi_free_priv_data(dev);
-
- return err;
-}
-
-/**
- * platform_msi_get_host_data - Query the private data associated with
- * a platform-msi domain
- * @domain: The platform-msi domain
- *
- * Return: The private data provided when calling
- * platform_msi_create_device_domain().
- */
-void *platform_msi_get_host_data(struct irq_domain *domain)
-{
- struct platform_msi_priv_data *data = domain->host_data;
-
- return data->host_data;
-}
-
-static struct lock_class_key platform_device_msi_lock_class;
-
-/**
- * __platform_msi_create_device_domain - Create a platform-msi device domain
- *
- * @dev: The device generating the MSIs
- * @nvec: The number of MSIs that need to be allocated
- * @is_tree: flag to indicate tree hierarchy
- * @write_msi_msg: Callback to write an interrupt message for @dev
- * @ops: The hierarchy domain operations to use
- * @host_data: Private data associated to this domain
- *
- * Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
- *
- * This is for interrupt domains which stack on a platform-msi domain
- * created by platform_msi_create_irq_domain(). @dev->msi.domain points to
- * that platform-msi domain which is the parent for the new domain.
- */
-struct irq_domain *
-__platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- bool is_tree,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct platform_msi_priv_data *data;
- struct irq_domain *domain;
- int err;
-
- err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (err)
- return NULL;
-
- /*
- * Use a separate lock class for the MSI descriptor mutex on
- * platform MSI device domains because the descriptor mutex nests
- * into the domain mutex. See alloc/free below.
- */
- lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
-
- data = dev->msi.data->platform_data;
- data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
- is_tree ? 0 : nvec,
- dev->fwnode, ops, data);
- if (!domain)
- goto free_priv;
-
- platform_msi_set_proxy_dev(&data->arg);
- err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
- if (err)
- goto free_domain;
-
- return domain;
-
-free_domain:
- irq_domain_remove(domain);
-free_priv:
- platform_msi_free_priv_data(dev);
- return NULL;
-}
-
-/**
- * platform_msi_device_domain_free - Free interrupts associated with a platform-msi
- * device domain
- *
- * @domain: The platform-msi device domain
- * @virq: The base irq from which to perform the free operation
- * @nr_irqs: How many interrupts to free from @virq
- */
-void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct platform_msi_priv_data *data = domain->host_data;
-
- msi_lock_descs(data->dev);
- msi_domain_depopulate_descs(data->dev, virq, nr_irqs);
- irq_domain_free_irqs_common(domain, virq, nr_irqs);
- msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1);
- msi_unlock_descs(data->dev);
-}
-
-/**
- * platform_msi_device_domain_alloc - Allocate interrupts associated with
- * a platform-msi device domain
- *
- * @domain: The platform-msi device domain
- * @virq: The base irq from which to perform the allocate operation
- * @nr_irqs: How many interrupts to allocate from @virq
- *
- * Return 0 on success, or an error code on failure. Must be called
- * with irq_domain_mutex held (which can only be done as part of a
- * top-level interrupt allocation).
- */
-int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct platform_msi_priv_data *data = domain->host_data;
- struct device *dev = data->dev;
-
- return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
-}
-
-/* End of removal area */
-
-/* Real per device domain interfaces */
/*
* This indirection can go when platform_device_msi_init_and_alloc_irqs()
@@ -357,7 +23,7 @@ static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
cb(irq_data_get_msi_desc(d), msg);
}
-static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc)
+static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = desc->msi_index;
@@ -373,7 +39,7 @@ static const struct msi_domain_template platform_msi_template = {
},
.ops = {
- .set_desc = platform_msi_set_desc_byindex,
+ .set_desc = platform_msi_set_desc,
},
.info = {
@@ -408,10 +74,6 @@ int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nve
if (!domain || !write_msi_msg)
return -EINVAL;
- /* Migration support. Will go away once everything is converted */
- if (!irq_domain_is_msi_parent(domain))
- return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg);
-
/*
* @write_msi_msg is stored in the resulting msi_domain_info::data.
* The underlying domain creation mechanism will assign that
@@ -432,12 +94,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
*/
void platform_device_msi_free_irqs_all(struct device *dev)
{
- struct irq_domain *domain = dev->msi.domain;
-
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
-
- /* Migration support. Will go away once everything is converted */
- if (!irq_domain_is_msi_parent(domain))
- platform_msi_free_priv_data(dev);
}
EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 7b29cce60ab2..eacf1cba7bf4 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -2,6 +2,7 @@
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && MMU
+ depends on HAVE_ZSMALLOC
depends on CRYPTO_LZO || CRYPTO_ZSTD || CRYPTO_LZ4 || CRYPTO_LZ4HC || CRYPTO_842
select ZSMALLOC
help
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 1912bee22dd4..9810edbb272d 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -2033,6 +2033,39 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable
return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
}
+static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_snp_vlek_load input;
+ void *blob;
+ int ret;
+
+ if (!sev->snp_initialized || !argp->data)
+ return -EINVAL;
+
+ if (!writable)
+ return -EPERM;
+
+ if (copy_from_user(&input, u64_to_user_ptr(argp->data), sizeof(input)))
+ return -EFAULT;
+
+ if (input.len != sizeof(input) || input.vlek_wrapped_version != 0)
+ return -EINVAL;
+
+ blob = psp_copy_user_blob(input.vlek_wrapped_address,
+ sizeof(struct sev_user_data_snp_wrapped_vlek_hashstick));
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ input.vlek_wrapped_address = __psp_pa(blob);
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error);
+
+ kfree(blob);
+
+ return ret;
+}
+
static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -2093,6 +2126,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
case SNP_SET_CONFIG:
ret = sev_ioctl_do_snp_set_config(&input, writable);
break;
+ case SNP_VLEK_LOAD:
+ ret = sev_ioctl_do_snp_vlek_load(&input, writable);
+ break;
default:
ret = -EINVAL;
goto out;
diff --git a/drivers/dax/cxl.c b/drivers/dax/cxl.c
index c696837ab23c..9b29e732b39a 100644
--- a/drivers/dax/cxl.c
+++ b/drivers/dax/cxl.c
@@ -43,6 +43,7 @@ static struct cxl_driver cxl_dax_region_driver = {
module_cxl_driver(cxl_dax_region_driver);
MODULE_ALIAS_CXL(CXL_DEVICE_DAX_REGION);
+MODULE_DESCRIPTION("CXL DAX: direct access to CXL regions");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index eb61598247a9..2051e4f73c8a 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -482,6 +482,7 @@ static void __exit dax_exit(void)
}
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Device DAX: direct access device driver");
MODULE_LICENSE("GPL v2");
module_init(dax_init);
module_exit(dax_exit);
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index b9da69f92697..5e7c53f18491 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -168,5 +168,6 @@ MODULE_SOFTDEP("pre: cxl_acpi");
MODULE_ALIAS("platform:hmem*");
MODULE_ALIAS("platform:hmem_platform*");
+MODULE_DESCRIPTION("HMEM DAX: direct access to 'specific purpose' memory");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 4fe9d040e375..e97d47f42ee2 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -299,6 +299,7 @@ static void __exit dax_kmem_exit(void)
}
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("KMEM DAX: map dax-devices as System-RAM");
MODULE_LICENSE("GPL v2");
module_init(dax_kmem_init);
module_exit(dax_kmem_exit);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index f3c6c67b8412..c8ebf4e281f2 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -94,6 +94,7 @@ static void __exit dax_pmem_exit(void)
}
module_exit(dax_pmem_exit);
+MODULE_DESCRIPTION("PMEM DAX: direct access to persistent memory");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index aca71d7fccc1..e16d1d40d773 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -606,6 +606,7 @@ static void __exit dax_core_exit(void)
}
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("DAX: direct access to differentiated memory");
MODULE_LICENSE("GPL v2");
subsys_initcall(dax_core_init);
module_exit(dax_core_exit);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index e4dc53a36428..b46eb8a552d7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -35,6 +35,7 @@ config UDMABUF
default n
depends on DMA_SHARED_BUFFER
depends on MEMFD_CREATE || COMPILE_TEST
+ depends on MMU
help
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index c40645999648..047c3cd2ceff 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -10,6 +10,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/shmem_fs.h>
+#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/udmabuf.h>
#include <linux/vmalloc.h>
@@ -25,9 +26,16 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is
struct udmabuf {
pgoff_t pagecount;
- struct page **pages;
+ struct folio **folios;
struct sg_table *sg;
struct miscdevice *device;
+ pgoff_t *offsets;
+ struct list_head unpin_list;
+};
+
+struct udmabuf_folio {
+ struct folio *folio;
+ struct list_head list;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -35,12 +43,15 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct udmabuf *ubuf = vma->vm_private_data;
pgoff_t pgoff = vmf->pgoff;
+ unsigned long pfn;
if (pgoff >= ubuf->pagecount)
return VM_FAULT_SIGBUS;
- vmf->page = ubuf->pages[pgoff];
- get_page(vmf->page);
- return 0;
+
+ pfn = folio_pfn(ubuf->folios[pgoff]);
+ pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
+
+ return vmf_insert_pfn(vma, vmf->address, pfn);
}
static const struct vm_operations_struct udmabuf_vm_ops = {
@@ -56,17 +67,28 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
vma->vm_ops = &udmabuf_vm_ops;
vma->vm_private_data = ubuf;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
{
struct udmabuf *ubuf = buf->priv;
+ struct page **pages;
void *vaddr;
+ pgoff_t pg;
dma_resv_assert_held(buf->resv);
- vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
+ pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (pg = 0; pg < ubuf->pagecount; pg++)
+ pages[pg] = &ubuf->folios[pg]->page;
+
+ vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
+ kfree(pages);
if (!vaddr)
return -EINVAL;
@@ -88,23 +110,30 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
{
struct udmabuf *ubuf = buf->priv;
struct sg_table *sg;
+ struct scatterlist *sgl;
+ unsigned int i = 0;
int ret;
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
- 0, ubuf->pagecount << PAGE_SHIFT,
- GFP_KERNEL);
+
+ ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
if (ret < 0)
- goto err;
+ goto err_alloc;
+
+ for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
+ sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
+ ubuf->offsets[i]);
+
ret = dma_map_sgtable(dev, sg, direction, 0);
if (ret < 0)
- goto err;
+ goto err_map;
return sg;
-err:
+err_map:
sg_free_table(sg);
+err_alloc:
kfree(sg);
return ERR_PTR(ret);
}
@@ -130,18 +159,45 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
return put_sg_table(at->dev, sg, direction);
}
+static void unpin_all_folios(struct list_head *unpin_list)
+{
+ struct udmabuf_folio *ubuf_folio;
+
+ while (!list_empty(unpin_list)) {
+ ubuf_folio = list_first_entry(unpin_list,
+ struct udmabuf_folio, list);
+ unpin_folio(ubuf_folio->folio);
+
+ list_del(&ubuf_folio->list);
+ kfree(ubuf_folio);
+ }
+}
+
+static int add_to_unpin_list(struct list_head *unpin_list,
+ struct folio *folio)
+{
+ struct udmabuf_folio *ubuf_folio;
+
+ ubuf_folio = kzalloc(sizeof(*ubuf_folio), GFP_KERNEL);
+ if (!ubuf_folio)
+ return -ENOMEM;
+
+ ubuf_folio->folio = folio;
+ list_add_tail(&ubuf_folio->list, unpin_list);
+ return 0;
+}
+
static void release_udmabuf(struct dma_buf *buf)
{
struct udmabuf *ubuf = buf->priv;
struct device *dev = ubuf->device->this_device;
- pgoff_t pg;
if (ubuf->sg)
put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
- for (pg = 0; pg < ubuf->pagecount; pg++)
- put_page(ubuf->pages[pg]);
- kfree(ubuf->pages);
+ unpin_all_folios(&ubuf->unpin_list);
+ kfree(ubuf->offsets);
+ kfree(ubuf->folios);
kfree(ubuf);
}
@@ -194,24 +250,64 @@ static const struct dma_buf_ops udmabuf_ops = {
#define SEALS_WANTED (F_SEAL_SHRINK)
#define SEALS_DENIED (F_SEAL_WRITE)
+static int check_memfd_seals(struct file *memfd)
+{
+ int seals;
+
+ if (!memfd)
+ return -EBADFD;
+
+ if (!shmem_file(memfd) && !is_file_hugepages(memfd))
+ return -EBADFD;
+
+ seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
+ if (seals == -EINVAL)
+ return -EBADFD;
+
+ if ((seals & SEALS_WANTED) != SEALS_WANTED ||
+ (seals & SEALS_DENIED) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int export_udmabuf(struct udmabuf *ubuf,
+ struct miscdevice *device,
+ u32 flags)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *buf;
+
+ ubuf->device = device;
+ exp_info.ops = &udmabuf_ops;
+ exp_info.size = ubuf->pagecount << PAGE_SHIFT;
+ exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR;
+
+ buf = dma_buf_export(&exp_info);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ return dma_buf_fd(buf, flags);
+}
+
static long udmabuf_create(struct miscdevice *device,
struct udmabuf_create_list *head,
struct udmabuf_create_item *list)
{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ pgoff_t pgoff, pgcnt, pglimit, pgbuf = 0;
+ long nr_folios, ret = -EINVAL;
struct file *memfd = NULL;
- struct address_space *mapping = NULL;
+ struct folio **folios;
struct udmabuf *ubuf;
- struct dma_buf *buf;
- pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
- struct page *page;
- int seals, ret = -EINVAL;
- u32 i, flags;
+ u32 i, j, k, flags;
+ loff_t end;
ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
if (!ubuf)
return -ENOMEM;
+ INIT_LIST_HEAD(&ubuf->unpin_list);
pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
@@ -226,66 +322,84 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf->pagecount)
goto err;
- ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
+ ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
GFP_KERNEL);
- if (!ubuf->pages) {
+ if (!ubuf->folios) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
+ GFP_KERNEL);
+ if (!ubuf->offsets) {
ret = -ENOMEM;
goto err;
}
pgbuf = 0;
for (i = 0; i < head->count; i++) {
- ret = -EBADFD;
memfd = fget(list[i].memfd);
- if (!memfd)
+ ret = check_memfd_seals(memfd);
+ if (ret < 0)
goto err;
- mapping = memfd->f_mapping;
- if (!shmem_mapping(mapping))
- goto err;
- seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
- if (seals == -EINVAL)
+
+ pgcnt = list[i].size >> PAGE_SHIFT;
+ folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
+ if (!folios) {
+ ret = -ENOMEM;
goto err;
- ret = -EINVAL;
- if ((seals & SEALS_WANTED) != SEALS_WANTED ||
- (seals & SEALS_DENIED) != 0)
+ }
+
+ end = list[i].offset + (pgcnt << PAGE_SHIFT) - 1;
+ ret = memfd_pin_folios(memfd, list[i].offset, end,
+ folios, pgcnt, &pgoff);
+ if (ret <= 0) {
+ kfree(folios);
+ if (!ret)
+ ret = -EINVAL;
goto err;
- pgoff = list[i].offset >> PAGE_SHIFT;
- pgcnt = list[i].size >> PAGE_SHIFT;
- for (pgidx = 0; pgidx < pgcnt; pgidx++) {
- page = shmem_read_mapping_page(mapping, pgoff + pgidx);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto err;
+ }
+
+ nr_folios = ret;
+ pgoff >>= PAGE_SHIFT;
+ for (j = 0, k = 0; j < pgcnt; j++) {
+ ubuf->folios[pgbuf] = folios[k];
+ ubuf->offsets[pgbuf] = pgoff << PAGE_SHIFT;
+
+ if (j == 0 || ubuf->folios[pgbuf-1] != folios[k]) {
+ ret = add_to_unpin_list(&ubuf->unpin_list,
+ folios[k]);
+ if (ret < 0) {
+ kfree(folios);
+ goto err;
+ }
+ }
+
+ pgbuf++;
+ if (++pgoff == folio_nr_pages(folios[k])) {
+ pgoff = 0;
+ if (++k == nr_folios)
+ break;
}
- ubuf->pages[pgbuf++] = page;
}
+
+ kfree(folios);
fput(memfd);
memfd = NULL;
}
- exp_info.ops = &udmabuf_ops;
- exp_info.size = ubuf->pagecount << PAGE_SHIFT;
- exp_info.priv = ubuf;
- exp_info.flags = O_RDWR;
-
- ubuf->device = device;
- buf = dma_buf_export(&exp_info);
- if (IS_ERR(buf)) {
- ret = PTR_ERR(buf);
+ flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
+ ret = export_udmabuf(ubuf, device, flags);
+ if (ret < 0)
goto err;
- }
- flags = 0;
- if (head->flags & UDMABUF_FLAGS_CLOEXEC)
- flags |= O_CLOEXEC;
- return dma_buf_fd(buf, flags);
+ return ret;
err:
- while (pgbuf > 0)
- put_page(ubuf->pages[--pgbuf]);
if (memfd)
fput(memfd);
- kfree(ubuf->pages);
+ unpin_all_folios(&ubuf->unpin_list);
+ kfree(ubuf->offsets);
+ kfree(ubuf->folios);
kfree(ubuf);
return ret;
}
diff --git a/drivers/firewire/.kunitconfig b/drivers/firewire/.kunitconfig
index 60d9e7c35417..21b7e9eef63d 100644
--- a/drivers/firewire/.kunitconfig
+++ b/drivers/firewire/.kunitconfig
@@ -4,3 +4,5 @@ CONFIG_FIREWIRE=y
CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y
CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST=y
CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST=y
+CONFIG_FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST=y
+CONFIG_FIREWIRE_KUNIT_OHCI_SERDES_TEST=y
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 5268b3f0a25a..905c82e26ce7 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -66,6 +66,21 @@ config FIREWIRE_KUNIT_PACKET_SERDES_TEST
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
+config FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST
+ tristate "KUnit tests for helpers of self ID sequence" if !KUNIT_ALL_TESTS
+ depends on FIREWIRE && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the KUnit tests for helpers of self ID sequence.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general, refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE && MMU
@@ -77,6 +92,22 @@ config FIREWIRE_OHCI
To compile this driver as a module, say M here: The module will be
called firewire-ohci.
+config FIREWIRE_KUNIT_OHCI_SERDES_TEST
+ tristate "KUnit tests for serialization/deserialization of data in buffers/registers" if !KUNIT_ALL_TESTS
+ depends on FIREWIRE && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the KUnit tests to check serialization and deserialization
+ of data in buffers and registers defined in 1394 OHCI specification.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general, refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
config FIREWIRE_SBP2
tristate "Storage devices (SBP-2 protocol)"
depends on FIREWIRE && SCSI
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index 75c47d046925..1ff550e93a8c 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -18,3 +18,5 @@ obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o
obj-$(CONFIG_FIREWIRE_KUNIT_UAPI_TEST) += uapi-test.o
obj-$(CONFIG_FIREWIRE_KUNIT_PACKET_SERDES_TEST) += packet-serdes-test.o
+obj-$(CONFIG_FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST) += self-id-sequence-helper-test.o
+obj-$(CONFIG_FIREWIRE_KUNIT_OHCI_SERDES_TEST) += ohci-serdes-test.o
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index af70e74f9a7e..b3eda38a36f3 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -22,6 +22,8 @@
#include "core.h"
+#include <trace/events/firewire.h>
+
/*
* Isochronous DMA context management
*/
@@ -148,12 +150,20 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
ctx->callback.sc = callback;
ctx->callback_data = callback_data;
+ trace_isoc_outbound_allocate(ctx, channel, speed);
+ trace_isoc_inbound_single_allocate(ctx, channel, header_size);
+ trace_isoc_inbound_multiple_allocate(ctx);
+
return ctx;
}
EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
+ trace_isoc_outbound_destroy(ctx);
+ trace_isoc_inbound_single_destroy(ctx);
+ trace_isoc_inbound_multiple_destroy(ctx);
+
ctx->card->driver->free_iso_context(ctx);
}
EXPORT_SYMBOL(fw_iso_context_destroy);
@@ -161,12 +171,18 @@ EXPORT_SYMBOL(fw_iso_context_destroy);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags)
{
+ trace_isoc_outbound_start(ctx, cycle);
+ trace_isoc_inbound_single_start(ctx, cycle, sync, tags);
+ trace_isoc_inbound_multiple_start(ctx, cycle, sync, tags);
+
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
{
+ trace_isoc_inbound_multiple_channels(ctx, *channels);
+
return ctx->card->driver->set_iso_channels(ctx, channels);
}
@@ -175,24 +191,40 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
+ trace_isoc_outbound_queue(ctx, payload, packet);
+ trace_isoc_inbound_single_queue(ctx, payload, packet);
+ trace_isoc_inbound_multiple_queue(ctx, payload, packet);
+
return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
}
EXPORT_SYMBOL(fw_iso_context_queue);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
{
+ trace_isoc_outbound_flush(ctx);
+ trace_isoc_inbound_single_flush(ctx);
+ trace_isoc_inbound_multiple_flush(ctx);
+
ctx->card->driver->flush_queue_iso(ctx);
}
EXPORT_SYMBOL(fw_iso_context_queue_flush);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{
+ trace_isoc_outbound_flush_completions(ctx);
+ trace_isoc_inbound_single_flush_completions(ctx);
+ trace_isoc_inbound_multiple_flush_completions(ctx);
+
return ctx->card->driver->flush_iso_completions(ctx);
}
EXPORT_SYMBOL(fw_iso_context_flush_completions);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
+ trace_isoc_outbound_stop(ctx);
+ trace_isoc_inbound_single_stop(ctx);
+ trace_isoc_inbound_multiple_stop(ctx);
+
return ctx->card->driver->stop_iso(ctx);
}
EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 8107eebd4296..b4e637aa6932 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -20,84 +20,9 @@
#include <asm/byteorder.h>
#include "core.h"
+#include "phy-packet-definitions.h"
#include <trace/events/firewire.h>
-#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
-#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
-#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
-#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
-#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
-#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
-#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
-#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
-
-#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
-
-#define SELFID_PORT_CHILD 0x3
-#define SELFID_PORT_PARENT 0x2
-#define SELFID_PORT_NCONN 0x1
-#define SELFID_PORT_NONE 0x0
-
-static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
-{
- u32 q;
- int port_type, shift, seq;
-
- *total_port_count = 0;
- *child_port_count = 0;
-
- shift = 6;
- q = *sid;
- seq = 0;
-
- while (1) {
- port_type = (q >> shift) & 0x03;
- switch (port_type) {
- case SELFID_PORT_CHILD:
- (*child_port_count)++;
- fallthrough;
- case SELFID_PORT_PARENT:
- case SELFID_PORT_NCONN:
- (*total_port_count)++;
- fallthrough;
- case SELFID_PORT_NONE:
- break;
- }
-
- shift -= 2;
- if (shift == 0) {
- if (!SELF_ID_MORE_PACKETS(q))
- return sid + 1;
-
- shift = 16;
- sid++;
- q = *sid;
-
- /*
- * Check that the extra packets actually are
- * extended self ID packets and that the
- * sequence numbers in the extended self ID
- * packets increase as expected.
- */
-
- if (!SELF_ID_EXTENDED(q) ||
- seq != SELF_ID_EXT_SEQUENCE(q))
- return NULL;
-
- seq++;
- }
- }
-}
-
-static int get_port_type(u32 *sid, int port_index)
-{
- int index, shift;
-
- index = (port_index + 5) / 8;
- shift = 16 - ((port_index + 5) & 7) * 2;
- return (sid[index] >> shift) & 0x03;
-}
-
static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{
struct fw_node *node;
@@ -107,10 +32,11 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
return NULL;
node->color = color;
- node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
- node->link_on = SELF_ID_LINK_ON(sid);
- node->phy_speed = SELF_ID_PHY_SPEED(sid);
- node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
+ node->node_id = LOCAL_BUS | phy_packet_self_id_get_phy_id(sid);
+ node->link_on = phy_packet_self_id_zero_get_link_active(sid);
+ // NOTE: Only two bits, thus only for SCODE_100, SCODE_200, SCODE_400, and SCODE_BETA.
+ node->phy_speed = phy_packet_self_id_zero_get_scode(sid);
+ node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
node->port_count = port_count;
refcount_set(&node->ref_count, 1);
@@ -169,13 +95,16 @@ static inline struct fw_node *fw_node(struct list_head *l)
* internally consistent. On success this function returns the
* fw_node corresponding to the local card otherwise NULL.
*/
-static struct fw_node *build_tree(struct fw_card *card,
- u32 *sid, int self_id_count)
+static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self_id_count,
+ unsigned int generation)
{
+ struct self_id_sequence_enumerator enumerator = {
+ .cursor = sid,
+ .quadlet_count = self_id_count,
+ };
struct fw_node *node, *child, *local_node, *irm_node;
- struct list_head stack, *h;
- u32 *next_sid, *end, q;
- int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
+ struct list_head stack;
+ int phy_id, stack_depth;
int gap_count;
bool beta_repeaters_present;
@@ -183,24 +112,56 @@ static struct fw_node *build_tree(struct fw_card *card,
node = NULL;
INIT_LIST_HEAD(&stack);
stack_depth = 0;
- end = sid + self_id_count;
phy_id = 0;
irm_node = NULL;
- gap_count = SELF_ID_GAP_COUNT(*sid);
+ gap_count = phy_packet_self_id_zero_get_gap_count(*sid);
beta_repeaters_present = false;
- while (sid < end) {
- next_sid = count_ports(sid, &port_count, &child_port_count);
+ while (enumerator.quadlet_count > 0) {
+ unsigned int child_port_count = 0;
+ unsigned int total_port_count = 0;
+ unsigned int parent_count = 0;
+ unsigned int quadlet_count;
+ const u32 *self_id_sequence;
+ unsigned int port_capacity;
+ enum phy_packet_self_id_port_status port_status;
+ unsigned int port_index;
+ struct list_head *h;
+ int i;
+
+ self_id_sequence = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
+ if (IS_ERR(self_id_sequence)) {
+ if (PTR_ERR(self_id_sequence) != -ENODATA) {
+ fw_err(card, "inconsistent extended self IDs: %ld\n",
+ PTR_ERR(self_id_sequence));
+ return NULL;
+ }
+ break;
+ }
- if (next_sid == NULL) {
- fw_err(card, "inconsistent extended self IDs\n");
- return NULL;
+ port_capacity = self_id_sequence_get_port_capacity(quadlet_count);
+ trace_self_id_sequence(card->index, self_id_sequence, quadlet_count, generation);
+
+ for (port_index = 0; port_index < port_capacity; ++port_index) {
+ port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count,
+ port_index);
+ switch (port_status) {
+ case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
+ ++child_port_count;
+ fallthrough;
+ case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT:
+ case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN:
+ ++total_port_count;
+ fallthrough;
+ case PHY_PACKET_SELF_ID_PORT_STATUS_NONE:
+ default:
+ break;
+ }
}
- q = *sid;
- if (phy_id != SELF_ID_PHY_ID(q)) {
+ if (phy_id != phy_packet_self_id_get_phy_id(self_id_sequence[0])) {
fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
- phy_id, SELF_ID_PHY_ID(q));
+ phy_id, phy_packet_self_id_get_phy_id(self_id_sequence[0]));
return NULL;
}
@@ -221,7 +182,7 @@ static struct fw_node *build_tree(struct fw_card *card,
*/
child = fw_node(h);
- node = fw_node_create(q, port_count, card->color);
+ node = fw_node_create(self_id_sequence[0], total_port_count, card->color);
if (node == NULL) {
fw_err(card, "out of memory while building topology\n");
return NULL;
@@ -230,48 +191,40 @@ static struct fw_node *build_tree(struct fw_card *card,
if (phy_id == (card->node_id & 0x3f))
local_node = node;
- if (SELF_ID_CONTENDER(q))
+ if (phy_packet_self_id_zero_get_contender(self_id_sequence[0]))
irm_node = node;
- parent_count = 0;
-
- for (i = 0; i < port_count; i++) {
- switch (get_port_type(sid, i)) {
- case SELFID_PORT_PARENT:
- /*
- * Who's your daddy? We dont know the
- * parent node at this time, so we
- * temporarily abuse node->color for
- * remembering the entry in the
- * node->ports array where the parent
- * node should be. Later, when we
- * handle the parent node, we fix up
- * the reference.
- */
- parent_count++;
+ for (port_index = 0; port_index < total_port_count; ++port_index) {
+ port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count,
+ port_index);
+ switch (port_status) {
+ case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT:
+ // Who's your daddy? We dont know the parent node at this time, so
+ // we temporarily abuse node->color for remembering the entry in
+ // the node->ports array where the parent node should be. Later,
+ // when we handle the parent node, we fix up the reference.
+ ++parent_count;
node->color = i;
break;
- case SELFID_PORT_CHILD:
- node->ports[i] = child;
- /*
- * Fix up parent reference for this
- * child node.
- */
+ case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
+ node->ports[port_index] = child;
+ // Fix up parent reference for this child node.
child->ports[child->color] = node;
child->color = card->color;
child = fw_node(child->link.next);
break;
+ case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN:
+ case PHY_PACKET_SELF_ID_PORT_STATUS_NONE:
+ default:
+ break;
}
}
- /*
- * Check that the node reports exactly one parent
- * port, except for the root, which of course should
- * have no parents.
- */
- if ((next_sid == end && parent_count != 0) ||
- (next_sid < end && parent_count != 1)) {
+ // Check that the node reports exactly one parent port, except for the root, which
+ // of course should have no parents.
+ if ((enumerator.quadlet_count == 0 && parent_count != 0) ||
+ (enumerator.quadlet_count > 0 && parent_count != 1)) {
fw_err(card, "parent port inconsistency for node %d: "
"parent_count=%d\n", phy_id, parent_count);
return NULL;
@@ -282,20 +235,16 @@ static struct fw_node *build_tree(struct fw_card *card,
list_add_tail(&node->link, &stack);
stack_depth += 1 - child_port_count;
- if (node->phy_speed == SCODE_BETA &&
- parent_count + child_port_count > 1)
+ if (node->phy_speed == SCODE_BETA && parent_count + child_port_count > 1)
beta_repeaters_present = true;
- /*
- * If PHYs report different gap counts, set an invalid count
- * which will force a gap count reconfiguration and a reset.
- */
- if (SELF_ID_GAP_COUNT(q) != gap_count)
+ // If PHYs report different gap counts, set an invalid count which will force a gap
+ // count reconfiguration and a reset.
+ if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
gap_count = 0;
update_hop_count(node);
- sid = next_sid;
phy_id++;
}
@@ -536,7 +485,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
- local_node = build_tree(card, self_ids, self_id_count);
+ local_node = build_tree(card, self_ids, self_id_count, generation);
update_topology_map(card, self_ids, self_id_count);
diff --git a/drivers/firewire/core-trace.c b/drivers/firewire/core-trace.c
index 96cbd9d384dc..b70947fc7b8d 100644
--- a/drivers/firewire/core-trace.c
+++ b/drivers/firewire/core-trace.c
@@ -1,5 +1,16 @@
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (c) 2024 Takashi Sakamoto
+#include <linux/types.h>
+#include <linux/err.h>
+#include "packet-header-definitions.h"
+#include "phy-packet-definitions.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/firewire.h>
+
+#ifdef TRACEPOINTS_ENABLED
+EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_single_completions);
+EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_multiple_completions);
+EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_outbound_completions);
+#endif
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 76ab6a209768..4d2fc1f31fec 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -29,20 +29,13 @@
#include <asm/byteorder.h>
#include "core.h"
-#include <trace/events/firewire.h>
#include "packet-header-definitions.h"
+#include "phy-packet-definitions.h"
+#include <trace/events/firewire.h>
#define HEADER_DESTINATION_IS_BROADCAST(header) \
((async_header_get_destination(header) & 0x3f) == 0x3f)
-#define PHY_PACKET_CONFIG 0x0
-#define PHY_PACKET_LINK_ON 0x1
-#define PHY_PACKET_SELF_ID 0x2
-
-#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
-#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
-#define PHY_IDENTIFIER(id) ((id) << 30)
-
/* returns 0 if the split timeout handler is already running */
static int try_cancel_split_timeout(struct fw_transaction *t)
{
@@ -481,10 +474,14 @@ void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
long timeout = DIV_ROUND_UP(HZ, 10);
- u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
+ u32 data = 0;
+
+ phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
- if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
- data |= PHY_CONFIG_ROOT_ID(node_id);
+ if (node_id != FW_PHY_CONFIG_NO_NODE_ID) {
+ phy_packet_phy_config_set_root_id(&data, node_id);
+ phy_packet_phy_config_set_force_root_node(&data, true);
+ }
if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
gap_count = card->driver->read_phy_reg(card, 1);
@@ -495,7 +492,8 @@ void fw_send_phy_config(struct fw_card *card,
if (gap_count == 63)
return;
}
- data |= PHY_CONFIG_GAP_COUNT(gap_count);
+ phy_packet_phy_config_set_gap_count(&data, gap_count);
+ phy_packet_phy_config_set_gap_count_optimization(&data, true);
mutex_lock(&phy_config_mutex);
diff --git a/drivers/firewire/ohci-serdes-test.c b/drivers/firewire/ohci-serdes-test.c
new file mode 100644
index 000000000000..304a09ff528e
--- /dev/null
+++ b/drivers/firewire/ohci-serdes-test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// ohci-serdes-test.c - An application of Kunit to check serialization/deserialization of data in
+// buffers and registers defined in 1394 OHCI specification.
+//
+// Copyright (c) 2024 Takashi Sakamoto
+
+#include <kunit/test.h>
+
+#include "ohci.h"
+
+
+static void test_self_id_count_register_deserialization(struct kunit *test)
+{
+ const u32 expected = 0x803d0594;
+
+ bool is_error = ohci1394_self_id_count_is_error(expected);
+ u8 generation = ohci1394_self_id_count_get_generation(expected);
+ u32 size = ohci1394_self_id_count_get_size(expected);
+
+ KUNIT_EXPECT_TRUE(test, is_error);
+ KUNIT_EXPECT_EQ(test, 0x3d, generation);
+ KUNIT_EXPECT_EQ(test, 0x165, size);
+}
+
+static void test_self_id_receive_buffer_deserialization(struct kunit *test)
+{
+ const u32 buffer[] = {
+ 0x0006f38b,
+ 0x807fcc56,
+ 0x7f8033a9,
+ 0x8145cc5e,
+ 0x7eba33a1,
+ };
+
+ u8 generation = ohci1394_self_id_receive_q0_get_generation(buffer[0]);
+ u16 timestamp = ohci1394_self_id_receive_q0_get_timestamp(buffer[0]);
+
+ KUNIT_EXPECT_EQ(test, 0x6, generation);
+ KUNIT_EXPECT_EQ(test, 0xf38b, timestamp);
+}
+
+static struct kunit_case ohci_serdes_test_cases[] = {
+ KUNIT_CASE(test_self_id_count_register_deserialization),
+ KUNIT_CASE(test_self_id_receive_buffer_deserialization),
+ {}
+};
+
+static struct kunit_suite ohci_serdes_test_suite = {
+ .name = "firewire-ohci-serdes",
+ .test_cases = ohci_serdes_test_cases,
+};
+kunit_test_suite(ohci_serdes_test_suite);
+
+MODULE_DESCRIPTION("FireWire buffers and registers serialization/deserialization unit test suite");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index f6de0b3a9a55..314a29c0fd3e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -41,6 +41,14 @@
#include "core.h"
#include "ohci.h"
#include "packet-header-definitions.h"
+#include "phy-packet-definitions.h"
+
+#include <trace/events/firewire.h>
+
+static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk);
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/firewire_ohci.h>
#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
@@ -437,23 +445,25 @@ static void log_irqs(struct fw_ohci *ohci, u32 evt)
? " ?" : "");
}
-static const char *speed[] = {
- [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
-};
-static const char *power[] = {
- [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
- [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
-};
-static const char port[] = { '.', '-', 'p', 'c', };
-
-static char _p(u32 *s, int shift)
-{
- return port[*s >> shift & 3];
-}
-
static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
{
- u32 *s;
+ static const char *const speed[] = {
+ [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
+ };
+ static const char *const power[] = {
+ [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
+ [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
+ };
+ static const char port[] = {
+ [PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
+ [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
+ [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
+ [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
+ };
+ struct self_id_sequence_enumerator enumerator = {
+ .cursor = ohci->self_id_buffer,
+ .quadlet_count = self_id_count,
+ };
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
return;
@@ -461,20 +471,46 @@ static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
self_id_count, generation, ohci->node_id);
- for (s = ohci->self_id_buffer; self_id_count--; ++s)
- if ((*s & 1 << 23) == 0)
- ohci_notice(ohci,
- "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
- *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
- speed[*s >> 14 & 3], *s >> 16 & 63,
- power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
- *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
- else
+ while (enumerator.quadlet_count > 0) {
+ unsigned int quadlet_count;
+ unsigned int port_index;
+ const u32 *s;
+ int i;
+
+ s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
+ if (IS_ERR(s))
+ break;
+
+ ohci_notice(ohci,
+ "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
+ *s,
+ phy_packet_self_id_get_phy_id(*s),
+ port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
+ speed[*s >> 14 & 3], *s >> 16 & 63,
+ power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
+ *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
+
+ port_index = 3;
+ for (i = 1; i < quadlet_count; ++i) {
ohci_notice(ohci,
"selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
- *s, *s >> 24 & 63,
- _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
- _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
+ s[i],
+ phy_packet_self_id_get_phy_id(s[i]),
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
+ port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
+ );
+
+ port_index += 8;
+ }
+ }
}
static const char *evts[] = {
@@ -841,10 +877,25 @@ static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
}
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
-#define cond_le32_to_cpu(v) \
- (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
+static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk)
+{
+ return has_be_header_quirk ? (__force __u32)value : le32_to_cpu(value);
+}
+
+static bool has_be_header_quirk(const struct fw_ohci *ohci)
+{
+ return !!(ohci->quirks & QUIRK_BE_HEADERS);
+}
#else
-#define cond_le32_to_cpu(v) le32_to_cpu(v)
+static u32 cond_le32_to_cpu(__le32 value, bool has_be_header_quirk __maybe_unused)
+{
+ return le32_to_cpu(value);
+}
+
+static bool has_be_header_quirk(const struct fw_ohci *ohci)
+{
+ return false;
+}
#endif
static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
@@ -854,9 +905,9 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
u32 status, length, tcode;
int evt;
- p.header[0] = cond_le32_to_cpu(buffer[0]);
- p.header[1] = cond_le32_to_cpu(buffer[1]);
- p.header[2] = cond_le32_to_cpu(buffer[2]);
+ p.header[0] = cond_le32_to_cpu(buffer[0], has_be_header_quirk(ohci));
+ p.header[1] = cond_le32_to_cpu(buffer[1], has_be_header_quirk(ohci));
+ p.header[2] = cond_le32_to_cpu(buffer[2], has_be_header_quirk(ohci));
tcode = async_header_get_tcode(p.header);
switch (tcode) {
@@ -868,7 +919,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
break;
case TCODE_READ_BLOCK_REQUEST :
- p.header[3] = cond_le32_to_cpu(buffer[3]);
+ p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
p.header_length = 16;
p.payload_length = 0;
break;
@@ -877,7 +928,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
- p.header[3] = cond_le32_to_cpu(buffer[3]);
+ p.header[3] = cond_le32_to_cpu(buffer[3], has_be_header_quirk(ohci));
p.header_length = 16;
p.payload_length = async_header_get_data_length(p.header);
if (p.payload_length > MAX_ASYNC_PAYLOAD) {
@@ -902,7 +953,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
/* FIXME: What to do about evt_* errors? */
length = (p.header_length + p.payload_length + 3) / 4;
- status = cond_le32_to_cpu(buffer[length]);
+ status = cond_le32_to_cpu(buffer[length], has_be_header_quirk(ohci));
evt = (status >> 16) & 0x1f;
p.ack = evt - 16;
@@ -1817,7 +1868,8 @@ static u32 update_bus_time(struct fw_ohci *ohci)
return ohci->bus_time | cycle_time_seconds;
}
-static int get_status_for_port(struct fw_ohci *ohci, int port_index)
+static int get_status_for_port(struct fw_ohci *ohci, int port_index,
+ enum phy_packet_self_id_port_status *status)
{
int reg;
@@ -1831,33 +1883,44 @@ static int get_status_for_port(struct fw_ohci *ohci, int port_index)
switch (reg & 0x0f) {
case 0x06:
- return 2; /* is child node (connected to parent node) */
+ // is child node (connected to parent node)
+ *status = PHY_PACKET_SELF_ID_PORT_STATUS_PARENT;
+ break;
case 0x0e:
- return 3; /* is parent node (connected to child node) */
+ // is parent node (connected to child node)
+ *status = PHY_PACKET_SELF_ID_PORT_STATUS_CHILD;
+ break;
+ default:
+ // not connected
+ *status = PHY_PACKET_SELF_ID_PORT_STATUS_NCONN;
+ break;
}
- return 1; /* not connected */
+
+ return 0;
}
static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
int self_id_count)
{
+ unsigned int left_phy_id = phy_packet_self_id_get_phy_id(self_id);
int i;
- u32 entry;
for (i = 0; i < self_id_count; i++) {
- entry = ohci->self_id_buffer[i];
- if ((self_id & 0xff000000) == (entry & 0xff000000))
+ u32 entry = ohci->self_id_buffer[i];
+ unsigned int right_phy_id = phy_packet_self_id_get_phy_id(entry);
+
+ if (left_phy_id == right_phy_id)
return -1;
- if ((self_id & 0xff000000) < (entry & 0xff000000))
+ if (left_phy_id < right_phy_id)
return i;
}
return i;
}
-static int initiated_reset(struct fw_ohci *ohci)
+static bool initiated_reset(struct fw_ohci *ohci)
{
int reg;
- int ret = 0;
+ int ret = false;
mutex_lock(&ohci->phy_reg_mutex);
reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
@@ -1870,7 +1933,7 @@ static int initiated_reset(struct fw_ohci *ohci)
if (reg >= 0) {
if ((reg & 0x08) == 0x08) {
/* bit 3 indicates "initiated reset" */
- ret = 0x2;
+ ret = true;
}
}
}
@@ -1886,9 +1949,14 @@ static int initiated_reset(struct fw_ohci *ohci)
*/
static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
{
- int reg, i, pos, status;
- /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
- u32 self_id = 0x8040c800;
+ int reg, i, pos;
+ u32 self_id = 0;
+
+ // link active 1, speed 3, bridge 0, contender 1, more packets 0.
+ phy_packet_set_packet_identifier(&self_id, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID);
+ phy_packet_self_id_zero_set_link_active(&self_id, true);
+ phy_packet_self_id_zero_set_scode(&self_id, SCODE_800);
+ phy_packet_self_id_zero_set_contender(&self_id, true);
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
@@ -1896,26 +1964,30 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
"node ID not valid, new bus reset in progress\n");
return -EBUSY;
}
- self_id |= ((reg & 0x3f) << 24); /* phy ID */
+ phy_packet_self_id_set_phy_id(&self_id, reg & 0x3f);
reg = ohci_read_phy_reg(&ohci->card, 4);
if (reg < 0)
return reg;
- self_id |= ((reg & 0x07) << 8); /* power class */
+ phy_packet_self_id_zero_set_power_class(&self_id, reg & 0x07);
reg = ohci_read_phy_reg(&ohci->card, 1);
if (reg < 0)
return reg;
- self_id |= ((reg & 0x3f) << 16); /* gap count */
+ phy_packet_self_id_zero_set_gap_count(&self_id, reg & 0x3f);
for (i = 0; i < 3; i++) {
- status = get_status_for_port(ohci, i);
- if (status < 0)
- return status;
- self_id |= ((status & 0x3) << (6 - (i * 2)));
+ enum phy_packet_self_id_port_status status;
+ int err;
+
+ err = get_status_for_port(ohci, i, &status);
+ if (err < 0)
+ return err;
+
+ self_id_sequence_set_port_status(&self_id, 1, i, status);
}
- self_id |= initiated_reset(ohci);
+ phy_packet_self_id_zero_set_initiated_reset(&self_id, initiated_reset(ohci));
pos = get_self_id_pos(ohci, self_id, self_id_count);
if (pos >= 0) {
@@ -1933,7 +2005,7 @@ static void bus_reset_work(struct work_struct *work)
struct fw_ohci *ohci =
container_of(work, struct fw_ohci, bus_reset_work);
int self_id_count, generation, new_generation, i, j;
- u32 reg;
+ u32 reg, quadlet;
void *free_rom = NULL;
dma_addr_t free_rom_bus = 0;
bool is_new_root;
@@ -1958,7 +2030,7 @@ static void bus_reset_work(struct work_struct *work)
ohci->is_root = is_new_root;
reg = reg_read(ohci, OHCI1394_SelfIDCount);
- if (reg & OHCI1394_SelfIDCount_selfIDError) {
+ if (ohci1394_self_id_count_is_error(reg)) {
ohci_notice(ohci, "self ID receive error\n");
return;
}
@@ -1968,19 +2040,20 @@ static void bus_reset_work(struct work_struct *work)
* the inverted quadlets and a header quadlet, we shift one
* bit extra to get the actual number of self IDs.
*/
- self_id_count = (reg >> 3) & 0xff;
+ self_id_count = ohci1394_self_id_count_get_size(reg) >> 1;
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
return;
}
- generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
+ quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
+ generation = ohci1394_self_id_receive_q0_get_generation(quadlet);
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
- u32 id = cond_le32_to_cpu(ohci->self_id[i]);
- u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
+ u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci));
+ u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci));
if (id != ~id2) {
/*
@@ -2032,7 +2105,8 @@ static void bus_reset_work(struct work_struct *work)
* of self IDs.
*/
- new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
+ reg = reg_read(ohci, OHCI1394_SelfIDCount);
+ new_generation = ohci1394_self_id_count_get_generation(reg);
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
return;
@@ -2130,13 +2204,21 @@ static irqreturn_t irq_handler(int irq, void *data)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
+ trace_irqs(ohci->card.index, event);
log_irqs(ohci, event);
// The flag is masked again at bus_reset_work() scheduled by selfID event.
if (event & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
- if (event & OHCI1394_selfIDComplete)
+ if (event & OHCI1394_selfIDComplete) {
+ if (trace_self_id_complete_enabled()) {
+ u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
+
+ trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
+ has_be_header_quirk(ohci));
+ }
queue_work(selfid_workqueue, &ohci->bus_reset_work);
+ }
if (event & OHCI1394_RQPkt)
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
@@ -2781,8 +2863,13 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
}
}
-static void flush_iso_completions(struct iso_context *ctx)
+static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause)
{
+ trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
+ ctx->header_length);
+ trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
+ ctx->header_length);
+
ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
ctx->header_length, ctx->header,
ctx->base.callback_data);
@@ -2796,7 +2883,7 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return;
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
}
ctx_hdr = ctx->header + ctx->header_length;
@@ -2845,7 +2932,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
copy_iso_headers(ctx, (u32 *) (last + 1));
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
return 1;
}
@@ -2880,6 +2967,9 @@ static int handle_ir_buffer_fill(struct context *context,
completed, DMA_FROM_DEVICE);
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
+ trace_isoc_inbound_multiple_completions(&ctx->base, completed,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
+
ctx->base.callback.mc(&ctx->base,
buffer_dma + completed,
ctx->base.callback_data);
@@ -2896,6 +2986,9 @@ static void flush_ir_buffer_fill(struct iso_context *ctx)
ctx->mc_buffer_bus & ~PAGE_MASK,
ctx->mc_completed, DMA_FROM_DEVICE);
+ trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
+
ctx->base.callback.mc(&ctx->base,
ctx->mc_buffer_bus + ctx->mc_completed,
ctx->base.callback_data);
@@ -2960,7 +3053,7 @@ static int handle_it_packet(struct context *context,
if (ctx->header_length + 4 > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return 1;
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
}
ctx_hdr = ctx->header + ctx->header_length;
@@ -2971,7 +3064,7 @@ static int handle_it_packet(struct context *context,
ctx->header_length += 4;
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
return 1;
}
@@ -3536,7 +3629,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
case FW_ISO_CONTEXT_TRANSMIT:
case FW_ISO_CONTEXT_RECEIVE:
if (ctx->header_length != 0)
- flush_iso_completions(ctx);
+ flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
if (ctx->mc_completed != 0)
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index c4d005a9901a..71c2ed84cafb 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -31,7 +31,6 @@
#define OHCI1394_HCControl_softReset 0x00010000
#define OHCI1394_SelfIDBuffer 0x064
#define OHCI1394_SelfIDCount 0x068
-#define OHCI1394_SelfIDCount_selfIDError 0x80000000
#define OHCI1394_IRMultiChanMaskHiSet 0x070
#define OHCI1394_IRMultiChanMaskHiClear 0x074
#define OHCI1394_IRMultiChanMaskLoSet 0x078
@@ -156,4 +155,46 @@
#define OHCI1394_phy_tcode 0xe
+// Self-ID DMA.
+
+#define OHCI1394_SelfIDCount_selfIDError_MASK 0x80000000
+#define OHCI1394_SelfIDCount_selfIDError_SHIFT 31
+#define OHCI1394_SelfIDCount_selfIDGeneration_MASK 0x00ff0000
+#define OHCI1394_SelfIDCount_selfIDGeneration_SHIFT 16
+#define OHCI1394_SelfIDCount_selfIDSize_MASK 0x000007fc
+#define OHCI1394_SelfIDCount_selfIDSize_SHIFT 2
+
+static inline bool ohci1394_self_id_count_is_error(u32 value)
+{
+ return !!((value & OHCI1394_SelfIDCount_selfIDError_MASK) >> OHCI1394_SelfIDCount_selfIDError_SHIFT);
+}
+
+static inline u8 ohci1394_self_id_count_get_generation(u32 value)
+{
+ return (value & OHCI1394_SelfIDCount_selfIDGeneration_MASK) >> OHCI1394_SelfIDCount_selfIDGeneration_SHIFT;
+}
+
+// In 1394 OHCI specification, the maximum size of self ID stream is 504 quadlets
+// (= 63 devices * 4 self ID packets * 2 quadlets). The selfIDSize field accommodates it and its
+// additional first quadlet, since the field is 9 bits (0x1ff = 511).
+static inline u32 ohci1394_self_id_count_get_size(u32 value)
+{
+ return (value & OHCI1394_SelfIDCount_selfIDSize_MASK) >> OHCI1394_SelfIDCount_selfIDSize_SHIFT;
+}
+
+#define OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK 0x00ff0000
+#define OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT 16
+#define OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK 0x0000ffff
+#define OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT 0
+
+static inline u8 ohci1394_self_id_receive_q0_get_generation(u32 quadlet0)
+{
+ return (quadlet0 & OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT;
+}
+
+static inline u16 ohci1394_self_id_receive_q0_get_timestamp(u32 quadlet0)
+{
+ return (quadlet0 & OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT;
+}
+
#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firewire/packet-header-definitions.h b/drivers/firewire/packet-header-definitions.h
index ab9d0fa790d4..87a5a31845c3 100644
--- a/drivers/firewire/packet-header-definitions.h
+++ b/drivers/firewire/packet-header-definitions.h
@@ -7,6 +7,8 @@
#ifndef _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
#define _FIREWIRE_PACKET_HEADER_DEFINITIONS_H
+#include <linux/types.h>
+
#define ASYNC_HEADER_QUADLET_COUNT 4
#define ASYNC_HEADER_Q0_DESTINATION_SHIFT 16
diff --git a/drivers/firewire/packet-serdes-test.c b/drivers/firewire/packet-serdes-test.c
index e83b1fece780..62ba433756ae 100644
--- a/drivers/firewire/packet-serdes-test.c
+++ b/drivers/firewire/packet-serdes-test.c
@@ -10,6 +10,7 @@
#include <linux/firewire-constants.h>
#include "packet-header-definitions.h"
+#include "phy-packet-definitions.h"
static void serialize_async_header_common(u32 header[ASYNC_HEADER_QUADLET_COUNT],
unsigned int dst_id, unsigned int tlabel,
@@ -187,6 +188,89 @@ static void deserialize_isoc_header(u32 header, unsigned int *data_length, unsig
*sy = isoc_header_get_sy(header);
}
+static void serialize_phy_packet_self_id_zero(u32 *quadlet, unsigned int packet_identifier,
+ unsigned int phy_id, bool extended,
+ bool link_is_active, unsigned int gap_count,
+ unsigned int scode, bool is_contender,
+ unsigned int power_class, bool is_initiated_reset,
+ bool has_more_packets)
+{
+ phy_packet_set_packet_identifier(quadlet, packet_identifier);
+ phy_packet_self_id_set_phy_id(quadlet, phy_id);
+ phy_packet_self_id_set_extended(quadlet, extended);
+ phy_packet_self_id_zero_set_link_active(quadlet, link_is_active);
+ phy_packet_self_id_zero_set_gap_count(quadlet, gap_count);
+ phy_packet_self_id_zero_set_scode(quadlet, scode);
+ phy_packet_self_id_zero_set_contender(quadlet, is_contender);
+ phy_packet_self_id_zero_set_power_class(quadlet, power_class);
+ phy_packet_self_id_zero_set_initiated_reset(quadlet, is_initiated_reset);
+ phy_packet_self_id_set_more_packets(quadlet, has_more_packets);
+}
+
+static void deserialize_phy_packet_self_id_zero(u32 quadlet, unsigned int *packet_identifier,
+ unsigned int *phy_id, bool *extended,
+ bool *link_is_active, unsigned int *gap_count,
+ unsigned int *scode, bool *is_contender,
+ unsigned int *power_class,
+ bool *is_initiated_reset, bool *has_more_packets)
+{
+ *packet_identifier = phy_packet_get_packet_identifier(quadlet);
+ *phy_id = phy_packet_self_id_get_phy_id(quadlet);
+ *extended = phy_packet_self_id_get_extended(quadlet);
+ *link_is_active = phy_packet_self_id_zero_get_link_active(quadlet);
+ *gap_count = phy_packet_self_id_zero_get_gap_count(quadlet);
+ *scode = phy_packet_self_id_zero_get_scode(quadlet);
+ *is_contender = phy_packet_self_id_zero_get_contender(quadlet);
+ *power_class = phy_packet_self_id_zero_get_power_class(quadlet);
+ *is_initiated_reset = phy_packet_self_id_zero_get_initiated_reset(quadlet);
+ *has_more_packets = phy_packet_self_id_get_more_packets(quadlet);
+}
+
+static void serialize_phy_packet_self_id_extended(u32 *quadlet, unsigned int packet_identifier,
+ unsigned int phy_id, bool extended,
+ unsigned int sequence, bool has_more_packets)
+{
+ phy_packet_set_packet_identifier(quadlet, packet_identifier);
+ phy_packet_self_id_set_phy_id(quadlet, phy_id);
+ phy_packet_self_id_set_extended(quadlet, extended);
+ phy_packet_self_id_extended_set_sequence(quadlet, sequence);
+ phy_packet_self_id_set_more_packets(quadlet, has_more_packets);
+}
+
+static void deserialize_phy_packet_self_id_extended(u32 quadlet, unsigned int *packet_identifier,
+ unsigned int *phy_id, bool *extended,
+ unsigned int *sequence, bool *has_more_packets)
+{
+ *packet_identifier = phy_packet_get_packet_identifier(quadlet);
+ *phy_id = phy_packet_self_id_get_phy_id(quadlet);
+ *extended = phy_packet_self_id_get_extended(quadlet);
+ *sequence = phy_packet_self_id_extended_get_sequence(quadlet);
+ *has_more_packets = phy_packet_self_id_get_more_packets(quadlet);
+}
+
+static void serialize_phy_packet_phy_config(u32 *quadlet, unsigned int packet_identifier,
+ unsigned int root_id, bool has_force_root_node,
+ bool has_gap_count_optimization, unsigned int gap_count)
+{
+ phy_packet_set_packet_identifier(quadlet, packet_identifier);
+ phy_packet_phy_config_set_root_id(quadlet, root_id);
+ phy_packet_phy_config_set_force_root_node(quadlet, has_force_root_node);
+ phy_packet_phy_config_set_gap_count_optimization(quadlet, has_gap_count_optimization);
+ phy_packet_phy_config_set_gap_count(quadlet, gap_count);
+}
+
+static void deserialize_phy_packet_phy_config(u32 quadlet, unsigned int *packet_identifier,
+ unsigned int *root_id, bool *has_force_root_node,
+ bool *has_gap_count_optimization,
+ unsigned int *gap_count)
+{
+ *packet_identifier = phy_packet_get_packet_identifier(quadlet);
+ *root_id = phy_packet_phy_config_get_root_id(quadlet);
+ *has_force_root_node = phy_packet_phy_config_get_force_root_node(quadlet);
+ *has_gap_count_optimization = phy_packet_phy_config_get_gap_count_optimization(quadlet);
+ *gap_count = phy_packet_phy_config_get_gap_count(quadlet);
+}
+
static void test_async_header_write_quadlet_request(struct kunit *test)
{
static const u32 expected[ASYNC_HEADER_QUADLET_COUNT] = {
@@ -559,6 +643,251 @@ static void test_isoc_header(struct kunit *test)
KUNIT_EXPECT_EQ(test, header, expected);
}
+static void test_phy_packet_self_id_zero_case0(struct kunit *test)
+{
+ // TSB41AB1/2 with 1 port.
+ const u32 expected[] = {0x80458c80};
+ u32 quadlets[] = {0};
+
+ unsigned int packet_identifier;
+ unsigned int phy_id;
+ bool extended;
+ bool link_is_active;
+ unsigned int gap_count;
+ unsigned int scode;
+ bool is_contender;
+ unsigned int power_class;
+ enum phy_packet_self_id_port_status port_status[3];
+ bool is_initiated_reset;
+ bool has_more_packets;
+ unsigned int port_index;
+
+ deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended,
+ &link_is_active, &gap_count, &scode, &is_contender,
+ &power_class, &is_initiated_reset, &has_more_packets);
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier);
+ KUNIT_EXPECT_EQ(test, 0, phy_id);
+ KUNIT_EXPECT_FALSE(test, extended);
+ KUNIT_EXPECT_TRUE(test, link_is_active);
+ KUNIT_EXPECT_EQ(test, 0x05, gap_count);
+ KUNIT_EXPECT_EQ(test, SCODE_400, scode);
+ KUNIT_EXPECT_TRUE(test, is_contender);
+ KUNIT_EXPECT_EQ(test, 0x4, power_class);
+ KUNIT_EXPECT_FALSE(test, is_initiated_reset);
+ KUNIT_EXPECT_FALSE(test, has_more_packets);
+
+ serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended,
+ link_is_active, gap_count, scode, is_contender,
+ power_class, is_initiated_reset, has_more_packets);
+
+ for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
+ port_status[port_index] =
+ self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
+ }
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[0]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[1]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[2]);
+
+ for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
+ self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
+ port_status[port_index]);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
+}
+
+static void test_phy_packet_self_id_zero_case1(struct kunit *test)
+{
+ // XIO2213 and TSB81BA3E with 3 ports.
+ const u32 expected[] = {0x817fcc5e};
+ u32 quadlets[] = {0};
+
+ unsigned int packet_identifier;
+ unsigned int phy_id;
+ bool extended;
+ bool link_is_active;
+ unsigned int gap_count;
+ unsigned int scode;
+ bool is_contender;
+ unsigned int power_class;
+ enum phy_packet_self_id_port_status port_status[3];
+ bool is_initiated_reset;
+ bool has_more_packets;
+ unsigned int port_index;
+
+ deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended,
+ &link_is_active, &gap_count, &scode, &is_contender,
+ &power_class, &is_initiated_reset, &has_more_packets);
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier);
+ KUNIT_EXPECT_EQ(test, 1, phy_id);
+ KUNIT_EXPECT_FALSE(test, extended);
+ KUNIT_EXPECT_TRUE(test, link_is_active);
+ KUNIT_EXPECT_EQ(test, 0x3f, gap_count);
+ KUNIT_EXPECT_EQ(test, SCODE_800, scode);
+ KUNIT_EXPECT_TRUE(test, is_contender);
+ KUNIT_EXPECT_EQ(test, 0x4, power_class);
+ KUNIT_EXPECT_TRUE(test, is_initiated_reset);
+ KUNIT_EXPECT_FALSE(test, has_more_packets);
+
+ serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended,
+ link_is_active, gap_count, scode, is_contender,
+ power_class, is_initiated_reset, has_more_packets);
+
+ for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
+ port_status[port_index] =
+ self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
+ }
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[0]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[1]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[2]);
+
+ for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
+ self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
+ port_status[port_index]);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
+}
+
+static void test_phy_packet_self_id_zero_and_one(struct kunit *test)
+{
+ // TSB41LV06A with 6 ports.
+ const u32 expected[] = {
+ 0x803f8459,
+ 0x80815000,
+ };
+ u32 quadlets[] = {0, 0};
+
+ unsigned int packet_identifier;
+ unsigned int phy_id;
+ bool extended;
+ bool link_is_active;
+ unsigned int gap_count;
+ unsigned int scode;
+ bool is_contender;
+ unsigned int power_class;
+ enum phy_packet_self_id_port_status port_status[11];
+ bool is_initiated_reset;
+ bool has_more_packets;
+
+ unsigned int sequence;
+ unsigned int port_index;
+
+ deserialize_phy_packet_self_id_zero(expected[0], &packet_identifier, &phy_id, &extended,
+ &link_is_active, &gap_count, &scode, &is_contender,
+ &power_class, &is_initiated_reset, &has_more_packets);
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier);
+ KUNIT_EXPECT_EQ(test, 0, phy_id);
+ KUNIT_EXPECT_FALSE(test, extended);
+ KUNIT_EXPECT_FALSE(test, link_is_active);
+ KUNIT_EXPECT_EQ(test, 0x3f, gap_count);
+ KUNIT_EXPECT_EQ(test, SCODE_400, scode);
+ KUNIT_EXPECT_FALSE(test, is_contender);
+ KUNIT_EXPECT_EQ(test, 0x4, power_class);
+ KUNIT_EXPECT_FALSE(test, is_initiated_reset);
+ KUNIT_EXPECT_TRUE(test, has_more_packets);
+
+ serialize_phy_packet_self_id_zero(quadlets, packet_identifier, phy_id, extended,
+ link_is_active, gap_count, scode, is_contender,
+ power_class, is_initiated_reset, has_more_packets);
+
+ deserialize_phy_packet_self_id_extended(expected[1], &packet_identifier, &phy_id, &extended,
+ &sequence, &has_more_packets);
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_SELF_ID, packet_identifier);
+ KUNIT_EXPECT_EQ(test, 0, phy_id);
+ KUNIT_EXPECT_TRUE(test, extended);
+ KUNIT_EXPECT_EQ(test, 0, sequence);
+ KUNIT_EXPECT_FALSE(test, has_more_packets);
+
+ serialize_phy_packet_self_id_extended(&quadlets[1], packet_identifier, phy_id, extended,
+ sequence, has_more_packets);
+
+
+ for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
+ port_status[port_index] =
+ self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
+ }
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[0]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[1]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[2]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[3]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[4]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[5]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[6]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[7]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[8]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[9]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[10]);
+
+ for (port_index = 0; port_index < ARRAY_SIZE(port_status); ++port_index) {
+ self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
+ port_status[port_index]);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
+}
+
+static void test_phy_packet_phy_config_force_root_node(struct kunit *test)
+{
+ const u32 expected = 0x02800000;
+ u32 quadlet = 0;
+
+ unsigned int packet_identifier;
+ unsigned int root_id;
+ bool has_force_root_node;
+ bool has_gap_count_optimization;
+ unsigned int gap_count;
+
+ deserialize_phy_packet_phy_config(expected, &packet_identifier, &root_id,
+ &has_force_root_node, &has_gap_count_optimization,
+ &gap_count);
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG, packet_identifier);
+ KUNIT_EXPECT_EQ(test, 0x02, root_id);
+ KUNIT_EXPECT_TRUE(test, has_force_root_node);
+ KUNIT_EXPECT_FALSE(test, has_gap_count_optimization);
+ KUNIT_EXPECT_EQ(test, 0, gap_count);
+
+ serialize_phy_packet_phy_config(&quadlet, packet_identifier, root_id, has_force_root_node,
+ has_gap_count_optimization, gap_count);
+
+ KUNIT_EXPECT_EQ(test, quadlet, expected);
+}
+
+static void test_phy_packet_phy_config_gap_count_optimization(struct kunit *test)
+{
+ const u32 expected = 0x034f0000;
+ u32 quadlet = 0;
+
+ unsigned int packet_identifier;
+ unsigned int root_id;
+ bool has_force_root_node;
+ bool has_gap_count_optimization;
+ unsigned int gap_count;
+
+ deserialize_phy_packet_phy_config(expected, &packet_identifier, &root_id,
+ &has_force_root_node, &has_gap_count_optimization,
+ &gap_count);
+
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG, packet_identifier);
+ KUNIT_EXPECT_EQ(test, 0x03, root_id);
+ KUNIT_EXPECT_FALSE(test, has_force_root_node);
+ KUNIT_EXPECT_TRUE(test, has_gap_count_optimization);
+ KUNIT_EXPECT_EQ(test, 0x0f, gap_count);
+
+ serialize_phy_packet_phy_config(&quadlet, packet_identifier, root_id, has_force_root_node,
+ has_gap_count_optimization, gap_count);
+
+ KUNIT_EXPECT_EQ(test, quadlet, expected);
+}
+
static struct kunit_case packet_serdes_test_cases[] = {
KUNIT_CASE(test_async_header_write_quadlet_request),
KUNIT_CASE(test_async_header_write_block_request),
@@ -570,6 +899,11 @@ static struct kunit_case packet_serdes_test_cases[] = {
KUNIT_CASE(test_async_header_lock_request),
KUNIT_CASE(test_async_header_lock_response),
KUNIT_CASE(test_isoc_header),
+ KUNIT_CASE(test_phy_packet_self_id_zero_case0),
+ KUNIT_CASE(test_phy_packet_self_id_zero_case1),
+ KUNIT_CASE(test_phy_packet_self_id_zero_and_one),
+ KUNIT_CASE(test_phy_packet_phy_config_force_root_node),
+ KUNIT_CASE(test_phy_packet_phy_config_gap_count_optimization),
{}
};
diff --git a/drivers/firewire/phy-packet-definitions.h b/drivers/firewire/phy-packet-definitions.h
new file mode 100644
index 000000000000..03c7c606759f
--- /dev/null
+++ b/drivers/firewire/phy-packet-definitions.h
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// phy-packet-definitions.h - The definitions of phy packet for IEEE 1394.
+//
+// Copyright (c) 2024 Takashi Sakamoto
+
+#ifndef _FIREWIRE_PHY_PACKET_DEFINITIONS_H
+#define _FIREWIRE_PHY_PACKET_DEFINITIONS_H
+
+#define PACKET_IDENTIFIER_MASK 0xc0000000
+#define PACKET_IDENTIFIER_SHIFT 30
+
+static inline unsigned int phy_packet_get_packet_identifier(u32 quadlet)
+{
+ return (quadlet & PACKET_IDENTIFIER_MASK) >> PACKET_IDENTIFIER_SHIFT;
+}
+
+static inline void phy_packet_set_packet_identifier(u32 *quadlet, unsigned int packet_identifier)
+{
+ *quadlet &= ~PACKET_IDENTIFIER_MASK;
+ *quadlet |= (packet_identifier << PACKET_IDENTIFIER_SHIFT) & PACKET_IDENTIFIER_MASK;
+}
+
+#define PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG 0
+
+#define PHY_CONFIG_ROOT_ID_MASK 0x3f000000
+#define PHY_CONFIG_ROOT_ID_SHIFT 24
+#define PHY_CONFIG_FORCE_ROOT_NODE_MASK 0x00800000
+#define PHY_CONFIG_FORCE_ROOT_NODE_SHIFT 23
+#define PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK 0x00400000
+#define PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT 22
+#define PHY_CONFIG_GAP_COUNT_MASK 0x003f0000
+#define PHY_CONFIG_GAP_COUNT_SHIFT 16
+
+static inline unsigned int phy_packet_phy_config_get_root_id(u32 quadlet)
+{
+ return (quadlet & PHY_CONFIG_ROOT_ID_MASK) >> PHY_CONFIG_ROOT_ID_SHIFT;
+}
+
+static inline void phy_packet_phy_config_set_root_id(u32 *quadlet, unsigned int root_id)
+{
+ *quadlet &= ~PHY_CONFIG_ROOT_ID_MASK;
+ *quadlet |= (root_id << PHY_CONFIG_ROOT_ID_SHIFT) & PHY_CONFIG_ROOT_ID_MASK;
+}
+
+static inline bool phy_packet_phy_config_get_force_root_node(u32 quadlet)
+{
+ return (quadlet & PHY_CONFIG_FORCE_ROOT_NODE_MASK) >> PHY_CONFIG_FORCE_ROOT_NODE_SHIFT;
+}
+
+static inline void phy_packet_phy_config_set_force_root_node(u32 *quadlet, bool has_force_root_node)
+{
+ *quadlet &= ~PHY_CONFIG_FORCE_ROOT_NODE_MASK;
+ *quadlet |= (has_force_root_node << PHY_CONFIG_FORCE_ROOT_NODE_SHIFT) & PHY_CONFIG_FORCE_ROOT_NODE_MASK;
+}
+
+static inline bool phy_packet_phy_config_get_gap_count_optimization(u32 quadlet)
+{
+ return (quadlet & PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK) >> PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT;
+}
+
+static inline void phy_packet_phy_config_set_gap_count_optimization(u32 *quadlet, bool has_gap_count_optimization)
+{
+ *quadlet &= ~PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK;
+ *quadlet |= (has_gap_count_optimization << PHY_CONFIG_GAP_COUNT_OPTIMIZATION_SHIFT) & PHY_CONFIG_GAP_COUNT_OPTIMIZATION_MASK;
+}
+
+static inline unsigned int phy_packet_phy_config_get_gap_count(u32 quadlet)
+{
+ return (quadlet & PHY_CONFIG_GAP_COUNT_MASK) >> PHY_CONFIG_GAP_COUNT_SHIFT;
+}
+
+static inline void phy_packet_phy_config_set_gap_count(u32 *quadlet, unsigned int gap_count)
+{
+ *quadlet &= ~PHY_CONFIG_GAP_COUNT_MASK;
+ *quadlet |= (gap_count << PHY_CONFIG_GAP_COUNT_SHIFT) & PHY_CONFIG_GAP_COUNT_MASK;
+}
+
+#define PHY_PACKET_PACKET_IDENTIFIER_SELF_ID 2
+
+#define SELF_ID_PHY_ID_MASK 0x3f000000
+#define SELF_ID_PHY_ID_SHIFT 24
+#define SELF_ID_EXTENDED_MASK 0x00800000
+#define SELF_ID_EXTENDED_SHIFT 23
+#define SELF_ID_MORE_PACKETS_MASK 0x00000001
+#define SELF_ID_MORE_PACKETS_SHIFT 0
+
+#define SELF_ID_ZERO_LINK_ACTIVE_MASK 0x00400000
+#define SELF_ID_ZERO_LINK_ACTIVE_SHIFT 22
+#define SELF_ID_ZERO_GAP_COUNT_MASK 0x003f0000
+#define SELF_ID_ZERO_GAP_COUNT_SHIFT 16
+#define SELF_ID_ZERO_SCODE_MASK 0x0000c000
+#define SELF_ID_ZERO_SCODE_SHIFT 14
+#define SELF_ID_ZERO_CONTENDER_MASK 0x00000800
+#define SELF_ID_ZERO_CONTENDER_SHIFT 11
+#define SELF_ID_ZERO_POWER_CLASS_MASK 0x00000700
+#define SELF_ID_ZERO_POWER_CLASS_SHIFT 8
+#define SELF_ID_ZERO_INITIATED_RESET_MASK 0x00000002
+#define SELF_ID_ZERO_INITIATED_RESET_SHIFT 1
+
+#define SELF_ID_EXTENDED_SEQUENCE_MASK 0x00700000
+#define SELF_ID_EXTENDED_SEQUENCE_SHIFT 20
+
+#define SELF_ID_PORT_STATUS_MASK 0x3
+
+#define SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT 4
+
+static inline unsigned int phy_packet_self_id_get_phy_id(u32 quadlet)
+{
+ return (quadlet & SELF_ID_PHY_ID_MASK) >> SELF_ID_PHY_ID_SHIFT;
+}
+
+static inline void phy_packet_self_id_set_phy_id(u32 *quadlet, unsigned int phy_id)
+{
+ *quadlet &= ~SELF_ID_PHY_ID_MASK;
+ *quadlet |= (phy_id << SELF_ID_PHY_ID_SHIFT) & SELF_ID_PHY_ID_MASK;
+}
+
+static inline bool phy_packet_self_id_get_extended(u32 quadlet)
+{
+ return (quadlet & SELF_ID_EXTENDED_MASK) >> SELF_ID_EXTENDED_SHIFT;
+}
+
+static inline void phy_packet_self_id_set_extended(u32 *quadlet, bool extended)
+{
+ *quadlet &= ~SELF_ID_EXTENDED_MASK;
+ *quadlet |= (extended << SELF_ID_EXTENDED_SHIFT) & SELF_ID_EXTENDED_MASK;
+}
+
+static inline bool phy_packet_self_id_zero_get_link_active(u32 quadlet)
+{
+ return (quadlet & SELF_ID_ZERO_LINK_ACTIVE_MASK) >> SELF_ID_ZERO_LINK_ACTIVE_SHIFT;
+}
+
+static inline void phy_packet_self_id_zero_set_link_active(u32 *quadlet, bool is_active)
+{
+ *quadlet &= ~SELF_ID_ZERO_LINK_ACTIVE_MASK;
+ *quadlet |= (is_active << SELF_ID_ZERO_LINK_ACTIVE_SHIFT) & SELF_ID_ZERO_LINK_ACTIVE_MASK;
+}
+
+static inline unsigned int phy_packet_self_id_zero_get_gap_count(u32 quadlet)
+{
+ return (quadlet & SELF_ID_ZERO_GAP_COUNT_MASK) >> SELF_ID_ZERO_GAP_COUNT_SHIFT;
+}
+
+static inline void phy_packet_self_id_zero_set_gap_count(u32 *quadlet, unsigned int gap_count)
+{
+ *quadlet &= ~SELF_ID_ZERO_GAP_COUNT_MASK;
+ *quadlet |= (gap_count << SELF_ID_ZERO_GAP_COUNT_SHIFT) & SELF_ID_ZERO_GAP_COUNT_MASK;
+}
+
+static inline unsigned int phy_packet_self_id_zero_get_scode(u32 quadlet)
+{
+ return (quadlet & SELF_ID_ZERO_SCODE_MASK) >> SELF_ID_ZERO_SCODE_SHIFT;
+}
+
+static inline void phy_packet_self_id_zero_set_scode(u32 *quadlet, unsigned int speed)
+{
+ *quadlet &= ~SELF_ID_ZERO_SCODE_MASK;
+ *quadlet |= (speed << SELF_ID_ZERO_SCODE_SHIFT) & SELF_ID_ZERO_SCODE_MASK;
+}
+
+static inline bool phy_packet_self_id_zero_get_contender(u32 quadlet)
+{
+ return (quadlet & SELF_ID_ZERO_CONTENDER_MASK) >> SELF_ID_ZERO_CONTENDER_SHIFT;
+}
+
+static inline void phy_packet_self_id_zero_set_contender(u32 *quadlet, bool is_contender)
+{
+ *quadlet &= ~SELF_ID_ZERO_CONTENDER_MASK;
+ *quadlet |= (is_contender << SELF_ID_ZERO_CONTENDER_SHIFT) & SELF_ID_ZERO_CONTENDER_MASK;
+}
+
+static inline unsigned int phy_packet_self_id_zero_get_power_class(u32 quadlet)
+{
+ return (quadlet & SELF_ID_ZERO_POWER_CLASS_MASK) >> SELF_ID_ZERO_POWER_CLASS_SHIFT;
+}
+
+static inline void phy_packet_self_id_zero_set_power_class(u32 *quadlet, unsigned int power_class)
+{
+ *quadlet &= ~SELF_ID_ZERO_POWER_CLASS_MASK;
+ *quadlet |= (power_class << SELF_ID_ZERO_POWER_CLASS_SHIFT) & SELF_ID_ZERO_POWER_CLASS_MASK;
+}
+
+static inline bool phy_packet_self_id_zero_get_initiated_reset(u32 quadlet)
+{
+ return (quadlet & SELF_ID_ZERO_INITIATED_RESET_MASK) >> SELF_ID_ZERO_INITIATED_RESET_SHIFT;
+}
+
+static inline void phy_packet_self_id_zero_set_initiated_reset(u32 *quadlet, bool is_initiated_reset)
+{
+ *quadlet &= ~SELF_ID_ZERO_INITIATED_RESET_MASK;
+ *quadlet |= (is_initiated_reset << SELF_ID_ZERO_INITIATED_RESET_SHIFT) & SELF_ID_ZERO_INITIATED_RESET_MASK;
+}
+
+static inline bool phy_packet_self_id_get_more_packets(u32 quadlet)
+{
+ return (quadlet & SELF_ID_MORE_PACKETS_MASK) >> SELF_ID_MORE_PACKETS_SHIFT;
+}
+
+static inline void phy_packet_self_id_set_more_packets(u32 *quadlet, bool is_more_packets)
+{
+ *quadlet &= ~SELF_ID_MORE_PACKETS_MASK;
+ *quadlet |= (is_more_packets << SELF_ID_MORE_PACKETS_SHIFT) & SELF_ID_MORE_PACKETS_MASK;
+}
+
+static inline unsigned int phy_packet_self_id_extended_get_sequence(u32 quadlet)
+{
+ return (quadlet & SELF_ID_EXTENDED_SEQUENCE_MASK) >> SELF_ID_EXTENDED_SEQUENCE_SHIFT;
+}
+
+static inline void phy_packet_self_id_extended_set_sequence(u32 *quadlet, unsigned int sequence)
+{
+ *quadlet &= ~SELF_ID_EXTENDED_SEQUENCE_MASK;
+ *quadlet |= (sequence << SELF_ID_EXTENDED_SHIFT) & SELF_ID_EXTENDED_SEQUENCE_MASK;
+}
+
+struct self_id_sequence_enumerator {
+ const u32 *cursor;
+ unsigned int quadlet_count;
+};
+
+static inline const u32 *self_id_sequence_enumerator_next(
+ struct self_id_sequence_enumerator *enumerator, unsigned int *quadlet_count)
+{
+ const u32 *self_id_sequence, *cursor;
+ u32 quadlet;
+ unsigned int count;
+ unsigned int sequence;
+
+ if (enumerator->cursor == NULL || enumerator->quadlet_count == 0)
+ return ERR_PTR(-ENODATA);
+ cursor = enumerator->cursor;
+ count = 1;
+
+ quadlet = *cursor;
+ sequence = 0;
+ while (phy_packet_self_id_get_more_packets(quadlet)) {
+ if (count >= enumerator->quadlet_count ||
+ count >= SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT)
+ return ERR_PTR(-EPROTO);
+ ++cursor;
+ ++count;
+ quadlet = *cursor;
+
+ if (!phy_packet_self_id_get_extended(quadlet) ||
+ sequence != phy_packet_self_id_extended_get_sequence(quadlet))
+ return ERR_PTR(-EPROTO);
+ ++sequence;
+ }
+
+ *quadlet_count = count;
+ self_id_sequence = enumerator->cursor;
+
+ enumerator->cursor += count;
+ enumerator->quadlet_count -= count;
+
+ return self_id_sequence;
+}
+
+enum phy_packet_self_id_port_status {
+ PHY_PACKET_SELF_ID_PORT_STATUS_NONE = 0,
+ PHY_PACKET_SELF_ID_PORT_STATUS_NCONN = 1,
+ PHY_PACKET_SELF_ID_PORT_STATUS_PARENT = 2,
+ PHY_PACKET_SELF_ID_PORT_STATUS_CHILD = 3,
+};
+
+static inline unsigned int self_id_sequence_get_port_capacity(unsigned int quadlet_count)
+{
+ return quadlet_count * 8 - 5;
+}
+
+static inline enum phy_packet_self_id_port_status self_id_sequence_get_port_status(
+ const u32 *self_id_sequence, unsigned int quadlet_count, unsigned int port_index)
+{
+ unsigned int index, shift;
+
+ index = (port_index + 5) / 8;
+ shift = 16 - ((port_index + 5) % 8) * 2;
+
+ if (index < quadlet_count && index < SELF_ID_SEQUENCE_MAXIMUM_QUADLET_COUNT)
+ return (self_id_sequence[index] >> shift) & SELF_ID_PORT_STATUS_MASK;
+
+ return PHY_PACKET_SELF_ID_PORT_STATUS_NONE;
+}
+
+static inline void self_id_sequence_set_port_status(u32 *self_id_sequence, unsigned int quadlet_count,
+ unsigned int port_index,
+ enum phy_packet_self_id_port_status status)
+{
+ unsigned int index, shift;
+
+ index = (port_index + 5) / 8;
+ shift = 16 - ((port_index + 5) % 8) * 2;
+
+ if (index < quadlet_count) {
+ self_id_sequence[index] &= ~(SELF_ID_PORT_STATUS_MASK << shift);
+ self_id_sequence[index] |= status << shift;
+ }
+}
+
+#endif // _FIREWIRE_PHY_PACKET_DEFINITIONS_H
diff --git a/drivers/firewire/self-id-sequence-helper-test.c b/drivers/firewire/self-id-sequence-helper-test.c
new file mode 100644
index 000000000000..eed7a2294e64
--- /dev/null
+++ b/drivers/firewire/self-id-sequence-helper-test.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// self-id-sequence-helper-test.c - An application of Kunit to test helpers of self ID sequence.
+//
+// Copyright (c) 2024 Takashi Sakamoto
+
+#include <kunit/test.h>
+
+#include "phy-packet-definitions.h"
+
+static void test_self_id_sequence_enumerator_valid(struct kunit *test)
+{
+ static const u32 valid_sequences[] = {
+ 0x00000000,
+ 0x00000001, 0x00800000,
+ 0x00000001, 0x00800001, 0x00900000,
+ 0x00000000,
+ };
+ struct self_id_sequence_enumerator enumerator;
+ const u32 *entry;
+ unsigned int quadlet_count;
+
+ enumerator.cursor = valid_sequences;
+ enumerator.quadlet_count = ARRAY_SIZE(valid_sequences);
+
+ entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
+ KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[0]);
+ KUNIT_EXPECT_EQ(test, quadlet_count, 1);
+ KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 6);
+
+ entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
+ KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[1]);
+ KUNIT_EXPECT_EQ(test, quadlet_count, 2);
+ KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 4);
+
+ entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
+ KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[3]);
+ KUNIT_EXPECT_EQ(test, quadlet_count, 3);
+ KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 1);
+
+ entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
+ KUNIT_EXPECT_PTR_EQ(test, entry, &valid_sequences[6]);
+ KUNIT_EXPECT_EQ(test, quadlet_count, 1);
+ KUNIT_EXPECT_EQ(test, enumerator.quadlet_count, 0);
+
+ entry = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
+ KUNIT_EXPECT_EQ(test, PTR_ERR(entry), -ENODATA);
+}
+
+static void test_self_id_sequence_enumerator_invalid(struct kunit *test)
+{
+ static const u32 invalid_sequences[] = {
+ 0x00000001,
+ };
+ struct self_id_sequence_enumerator enumerator;
+ const u32 *entry;
+ unsigned int count;
+
+ enumerator.cursor = invalid_sequences;
+ enumerator.quadlet_count = ARRAY_SIZE(invalid_sequences);
+
+ entry = self_id_sequence_enumerator_next(&enumerator, &count);
+ KUNIT_EXPECT_EQ(test, PTR_ERR(entry), -EPROTO);
+}
+
+static void test_self_id_sequence_get_port_status(struct kunit *test)
+{
+ static const u32 expected[] = {
+ 0x000000e5,
+ 0x00839e79,
+ 0x0091e79d,
+ 0x00a279e4,
+ };
+ u32 quadlets [] = {
+ 0x00000001,
+ 0x00800001,
+ 0x00900001,
+ 0x00a00000,
+ };
+ enum phy_packet_self_id_port_status port_status[28];
+ unsigned int port_capacity;
+ unsigned int port_index;
+
+ KUNIT_ASSERT_EQ(test, ARRAY_SIZE(expected), ARRAY_SIZE(quadlets));
+
+ // With an extra port.
+ port_capacity = self_id_sequence_get_port_capacity(ARRAY_SIZE(expected)) + 1;
+ KUNIT_ASSERT_EQ(test, port_capacity, ARRAY_SIZE(port_status));
+
+ for (port_index = 0; port_index < port_capacity; ++port_index) {
+ port_status[port_index] =
+ self_id_sequence_get_port_status(expected, ARRAY_SIZE(expected), port_index);
+ self_id_sequence_set_port_status(quadlets, ARRAY_SIZE(quadlets), port_index,
+ port_status[port_index]);
+ }
+
+ // Self ID zero.
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[0]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[1]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[2]);
+
+ // Self ID one.
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[3]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[4]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[5]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[6]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[7]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[8]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[9]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[10]);
+
+ // Self ID two.
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[11]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[12]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[13]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[14]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[15]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[16]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[17]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[18]);
+
+ // Self ID three.
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[19]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[20]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[21]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[22]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[23]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_CHILD, port_status[24]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_PARENT, port_status[25]);
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NCONN, port_status[26]);
+
+ // Our of order.
+ KUNIT_EXPECT_EQ(test, PHY_PACKET_SELF_ID_PORT_STATUS_NONE, port_status[27]);
+
+ KUNIT_EXPECT_MEMEQ(test, quadlets, expected, sizeof(expected));
+}
+
+static struct kunit_case self_id_sequence_helper_test_cases[] = {
+ KUNIT_CASE(test_self_id_sequence_enumerator_valid),
+ KUNIT_CASE(test_self_id_sequence_enumerator_invalid),
+ KUNIT_CASE(test_self_id_sequence_get_port_status),
+ {}
+};
+
+static struct kunit_suite self_id_sequence_helper_test_suite = {
+ .name = "self-id-sequence-helper",
+ .test_cases = self_id_sequence_helper_test_cases,
+};
+kunit_test_suite(self_id_sequence_helper_test_suite);
+
+MODULE_DESCRIPTION("Unit test suite for helpers of self ID sequence");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
index d0ef93551c44..3782d0a187d1 100644
--- a/drivers/firmware/efi/libstub/loongarch.c
+++ b/drivers/firmware/efi/libstub/loongarch.c
@@ -74,6 +74,8 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
/* Config Direct Mapping */
csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
+ csr_write64(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2);
+ csr_write64(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3);
real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image);
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
index 01f0f90ea418..fa71cd898120 100644
--- a/drivers/firmware/efi/riscv-runtime.c
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -152,3 +152,16 @@ void arch_efi_call_virt_teardown(void)
{
efi_virtmap_unload();
}
+
+static int __init riscv_dmi_init(void)
+{
+ /*
+ * On riscv, DMI depends on UEFI, and dmi_setup() needs to
+ * be called early because dmi_id_init(), which is an arch_initcall
+ * itself, depends on dmi_scan_machine() having been called already.
+ */
+ dmi_setup();
+
+ return 0;
+}
+core_initcall(riscv_dmi_init);
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index da35ca9e84a6..f7157c1d77d8 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -656,17 +656,16 @@ static int occ_probe(struct platform_device *pdev)
rc = of_property_read_u32(dev->of_node, "reg", &reg);
if (!rc) {
/* make sure we don't have a duplicate from dts */
- occ->idx = ida_simple_get(&occ_ida, reg, reg + 1,
- GFP_KERNEL);
+ occ->idx = ida_alloc_range(&occ_ida, reg, reg,
+ GFP_KERNEL);
if (occ->idx < 0)
- occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX,
- GFP_KERNEL);
+ occ->idx = ida_alloc_min(&occ_ida, 1,
+ GFP_KERNEL);
} else {
- occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX,
- GFP_KERNEL);
+ occ->idx = ida_alloc_min(&occ_ida, 1, GFP_KERNEL);
}
} else {
- occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX, GFP_KERNEL);
+ occ->idx = ida_alloc_min(&occ_ida, 1, GFP_KERNEL);
}
platform_set_drvdata(pdev, occ);
@@ -680,7 +679,7 @@ static int occ_probe(struct platform_device *pdev)
rc = misc_register(&occ->mdev);
if (rc) {
dev_err(dev, "failed to register miscdevice: %d\n", rc);
- ida_simple_remove(&occ_ida, occ->idx);
+ ida_free(&occ_ida, occ->idx);
kvfree(occ->buffer);
return rc;
}
@@ -719,7 +718,7 @@ static int occ_remove(struct platform_device *pdev)
else
device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child);
- ida_simple_remove(&occ_ida, occ->idx);
+ ida_free(&occ_ida, occ->idx);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 090724fa766c..d54162ce0f99 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -340,7 +340,7 @@ static int eb_create(struct i915_execbuffer *eb)
* Without a 1:1 association between relocation handles and
* the execobject[] index, we instead create a hashtable.
* We size it dynamically based on available memory, starting
- * first with 1:1 assocative hash and scaling back until
+ * first with 1:1 associative hash and scaling back until
* the allocation succeeds.
*
* Later on we use a positive lut_size to indicate we are
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 42c2d8a99509..d4ade9325401 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -42,12 +42,11 @@ static int vbox_accel_init(struct vbox_private *vbox)
/* Take a command buffer for each screen from the end of usable VRAM. */
vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
- vbox->vbva_buffers = pci_iomap_range(pdev, 0,
- vbox->available_vram_size,
- vbox->num_crtcs *
- VBVA_MIN_BUFFER_SIZE);
- if (!vbox->vbva_buffers)
- return -ENOMEM;
+ vbox->vbva_buffers = pcim_iomap_range(
+ pdev, 0, vbox->available_vram_size,
+ vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE);
+ if (IS_ERR(vbox->vbva_buffers))
+ return PTR_ERR(vbox->vbva_buffers);
for (i = 0; i < vbox->num_crtcs; ++i) {
vbva_setup_buffer_context(&vbox->vbva_info[i],
@@ -116,11 +115,10 @@ int vbox_hw_init(struct vbox_private *vbox)
DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
/* Map guest-heap at end of vram */
- vbox->guest_heap =
- pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox),
- GUEST_HEAP_SIZE);
- if (!vbox->guest_heap)
- return -ENOMEM;
+ vbox->guest_heap = pcim_iomap_range(pdev, 0,
+ GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE);
+ if (IS_ERR(vbox->guest_heap))
+ return PTR_ERR(vbox->guest_heap);
/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 10926359e6d2..afe470f3661c 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -14,7 +14,6 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/if_phonet.h>
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 0e7427c2baf5..c38dcdfcb914 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -683,9 +683,8 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
if (!PageOffline(pg))
__SetPageOffline(pg);
return;
- }
- if (PageOffline(pg))
- __ClearPageOffline(pg);
+ } else if (!PageOffline(pg))
+ return;
/* This frame is currently backed; online the page. */
generic_online_page(pg, 0);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 14464716bacb..d078bdc48c38 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -26,6 +26,7 @@ config ARM_GIC_V2M
bool
depends on PCI
select ARM_GIC
+ select IRQ_MSI_LIB
select PCI_MSI
config GIC_NON_BANKED
@@ -41,6 +42,7 @@ config ARM_GIC_V3
config ARM_GIC_V3_ITS
bool
select GENERIC_MSI_IRQ
+ select IRQ_MSI_LIB
default ARM_GIC_V3
config ARM_GIC_V3_ITS_PCI
@@ -74,6 +76,9 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
+config IRQ_MSI_LIB
+ bool
+
config ARMADA_370_XP_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -169,6 +174,18 @@ config IXP4XX_IRQ
select IRQ_DOMAIN
select SPARSE_IRQ
+config LAN966X_OIC
+ tristate "Microchip LAN966x OIC Support"
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ help
+ Enable support for the LAN966x Outbound Interrupt Controller.
+ This controller is present on the Microchip LAN966x PCI device and
+ maps the internal interrupts sources to PCIe interrupt.
+
+ To compile this driver as a module, choose M here: the module
+ will be called irq-lan966x-oic.
+
config MADERA_IRQ
tristate
@@ -366,6 +383,7 @@ config MSCC_OCELOT_IRQ
select GENERIC_IRQ_CHIP
config MVEBU_GICP
+ select IRQ_MSI_LIB
bool
config MVEBU_ICU
@@ -373,6 +391,7 @@ config MVEBU_ICU
config MVEBU_ODMI
bool
+ select IRQ_MSI_LIB
select GENERIC_MSI_IRQ
config MVEBU_PIC
@@ -392,6 +411,15 @@ config LS_SCFG_MSI
config PARTITION_PERCPU
bool
+config STM32MP_EXTI
+ tristate "STM32MP extended interrupts and event controller"
+ depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST
+ default y
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_CHIP
+ help
+ Support STM32MP EXTI (extended interrupts and event) controller.
+
config STM32_EXTI
bool
select IRQ_DOMAIN
@@ -487,6 +515,7 @@ config IMX_MU_MSI
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
select GENERIC_MSI_IRQ
+ select IRQ_MSI_LIB
help
Provide a driver for the i.MX Messaging Unit block used as a
CPU-to-CPU MSI controller. This requires a specially crafted DT
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d9dc3d99aaa8..15635812b2d6 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -29,10 +29,10 @@ obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
+obj-$(CONFIG_IRQ_MSI_LIB) += irq-msi-lib.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
-obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o irq-gic-v3-its-msi-parent.o
obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
@@ -84,6 +84,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
+obj-$(CONFIG_STM32MP_EXTI) += irq-stm32mp-exti.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
@@ -104,6 +105,7 @@ obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
+obj-$(CONFIG_LAN966X_OIC) += irq-lan966x-oic.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 4b021a67bdfe..dce2b80bf439 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -13,6 +13,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -29,6 +30,7 @@
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/msi.h>
+#include <linux/types.h>
#include <asm/mach/arch.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
@@ -135,6 +137,7 @@
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
+/* IPI and MSI interrupt definitions for IPI platforms */
#define IPI_DOORBELL_START (0)
#define IPI_DOORBELL_END (8)
#define IPI_DOORBELL_MASK 0xFF
@@ -143,6 +146,14 @@
#define PCI_MSI_DOORBELL_END (32)
#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
+/* MSI interrupt definitions for non-IPI platforms */
+#define PCI_MSI_FULL_DOORBELL_START 0
+#define PCI_MSI_FULL_DOORBELL_NR 32
+#define PCI_MSI_FULL_DOORBELL_END 32
+#define PCI_MSI_FULL_DOORBELL_MASK GENMASK(31, 0)
+#define PCI_MSI_FULL_DOORBELL_SRC0_MASK GENMASK(15, 0)
+#define PCI_MSI_FULL_DOORBELL_SRC1_MASK GENMASK(31, 16)
+
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
@@ -151,11 +162,46 @@ static int parent_irq;
#ifdef CONFIG_PCI_MSI
static struct irq_domain *armada_370_xp_msi_domain;
static struct irq_domain *armada_370_xp_msi_inner_domain;
-static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
+static DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR);
static DEFINE_MUTEX(msi_used_lock);
static phys_addr_t msi_doorbell_addr;
#endif
+static inline bool is_ipi_available(void)
+{
+ /*
+ * We distinguish IPI availability in the IC by the IC not having a
+ * parent irq defined. If a parent irq is defined, there is a parent
+ * interrupt controller (e.g. GIC) that takes care of inter-processor
+ * interrupts.
+ */
+ return parent_irq <= 0;
+}
+
+static inline u32 msi_doorbell_mask(void)
+{
+ return is_ipi_available() ? PCI_MSI_DOORBELL_MASK :
+ PCI_MSI_FULL_DOORBELL_MASK;
+}
+
+static inline unsigned int msi_doorbell_start(void)
+{
+ return is_ipi_available() ? PCI_MSI_DOORBELL_START :
+ PCI_MSI_FULL_DOORBELL_START;
+}
+
+static inline unsigned int msi_doorbell_size(void)
+{
+ return is_ipi_available() ? PCI_MSI_DOORBELL_NR :
+ PCI_MSI_FULL_DOORBELL_NR;
+}
+
+static inline unsigned int msi_doorbell_end(void)
+{
+ return is_ipi_available() ? PCI_MSI_DOORBELL_END :
+ PCI_MSI_FULL_DOORBELL_END;
+}
+
static inline bool is_percpu_irq(irq_hw_number_t irq)
{
if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
@@ -213,7 +259,7 @@ static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg
msg->address_lo = lower_32_bits(msi_doorbell_addr);
msg->address_hi = upper_32_bits(msi_doorbell_addr);
- msg->data = BIT(cpu + 8) | (data->hwirq + PCI_MSI_DOORBELL_START);
+ msg->data = BIT(cpu + 8) | (data->hwirq + msi_doorbell_start());
}
static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
@@ -246,7 +292,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
int hwirq, i;
mutex_lock(&msi_used_lock);
- hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
+ hwirq = bitmap_find_free_region(msi_used, msi_doorbell_size(),
order_base_2(nr_irqs));
mutex_unlock(&msi_used_lock);
@@ -283,9 +329,10 @@ static void armada_370_xp_msi_reenable_percpu(void)
u32 reg;
/* Enable MSI doorbell mask and combined cpu local interrupt */
- reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
- | PCI_MSI_DOORBELL_MASK;
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ reg |= msi_doorbell_mask();
writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
/* Unmask local doorbell interrupt */
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
@@ -297,7 +344,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
ARMADA_370_XP_SW_TRIG_INT_OFFS;
armada_370_xp_msi_inner_domain =
- irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+ irq_domain_add_linear(NULL, msi_doorbell_size(),
&armada_370_xp_msi_domain_ops, NULL);
if (!armada_370_xp_msi_inner_domain)
return -ENOMEM;
@@ -313,6 +360,10 @@ static int armada_370_xp_msi_init(struct device_node *node,
armada_370_xp_msi_reenable_percpu();
+ /* Unmask low 16 MSI irqs on non-IPI platforms */
+ if (!is_ipi_available())
+ writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
return 0;
}
#else
@@ -461,24 +512,18 @@ static __init void armada_xp_ipi_init(struct device_node *node)
set_smp_ipi_range(base_ipi, IPI_DOORBELL_END);
}
-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
-
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long reg, mask;
int cpu;
/* Select a single core from the affinity mask which is online */
cpu = cpumask_any_and(mask_val, cpu_online_mask);
- mask = 1UL << cpu_logical_map(cpu);
- raw_spin_lock(&irq_controller_lock);
- reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
- reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
- writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
- raw_spin_unlock(&irq_controller_lock);
+ atomic_io_modify(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq),
+ ARMADA_370_XP_INT_SOURCE_CPU_MASK,
+ BIT(cpu_logical_map(cpu)));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -496,6 +541,9 @@ static void armada_xp_mpic_smp_cpu_init(void)
for (i = 0; i < nr_irqs; i++)
writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+ if (!is_ipi_available())
+ return;
+
/* Disable all IPIs */
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
@@ -527,7 +575,8 @@ static void armada_xp_mpic_reenable_percpu(void)
armada_370_xp_irq_unmask(data);
}
- ipi_resume();
+ if (is_ipi_available())
+ ipi_resume();
armada_370_xp_msi_reenable_percpu();
}
@@ -566,6 +615,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
+ /* IRQs 0 and 1 cannot be mapped, they are handled internally */
+ if (hw <= 1)
+ return -EINVAL;
+
armada_370_xp_irq_mask(irq_get_irq_data(virq));
if (!is_percpu_irq(hw))
writel(hw, per_cpu_int_base +
@@ -599,20 +652,20 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
u32 msimask, msinr;
msimask = readl_relaxed(per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
- & PCI_MSI_DOORBELL_MASK;
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+ msimask &= msi_doorbell_mask();
writel(~msimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
- for (msinr = PCI_MSI_DOORBELL_START;
- msinr < PCI_MSI_DOORBELL_END; msinr++) {
+ for (msinr = msi_doorbell_start();
+ msinr < msi_doorbell_end(); msinr++) {
unsigned int irq;
if (!(msimask & BIT(msinr)))
continue;
- irq = msinr - PCI_MSI_DOORBELL_START;
+ irq = msinr - msi_doorbell_start();
generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
}
@@ -641,7 +694,7 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
continue;
- if (irqn == 1) {
+ if (irqn == 0 || irqn == 1) {
armada_370_xp_handle_msi_irq(NULL, true);
continue;
}
@@ -702,6 +755,7 @@ static int armada_370_xp_mpic_suspend(void)
static void armada_370_xp_mpic_resume(void)
{
+ bool src0, src1;
int nirqs;
irq_hw_number_t irq;
@@ -741,12 +795,22 @@ static void armada_370_xp_mpic_resume(void)
/* Reconfigure doorbells for IPIs and MSIs */
writel(doorbell_mask_reg,
per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
- if (doorbell_mask_reg & IPI_DOORBELL_MASK)
+
+ if (is_ipi_available()) {
+ src0 = doorbell_mask_reg & IPI_DOORBELL_MASK;
+ src1 = doorbell_mask_reg & PCI_MSI_DOORBELL_MASK;
+ } else {
+ src0 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC0_MASK;
+ src1 = doorbell_mask_reg & PCI_MSI_FULL_DOORBELL_SRC1_MASK;
+ }
+
+ if (src0)
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
- if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
+ if (src1)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
- ipi_resume();
+ if (is_ipi_available())
+ ipi_resume();
}
static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
@@ -791,13 +855,18 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
BUG_ON(!armada_370_xp_mpic_domain);
irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
+ /*
+ * Initialize parent_irq before calling any other functions, since it is
+ * used to distinguish between IPI and non-IPI platforms.
+ */
+ parent_irq = irq_of_parse_and_map(node, 0);
+
/* Setup for the boot CPU */
armada_xp_mpic_perf_init();
armada_xp_mpic_smp_cpu_init();
armada_370_xp_msi_init(node, main_int_res.start);
- parent_irq = irq_of_parse_and_map(node, 0);
if (parent_irq <= 0) {
irq_set_default_host(armada_370_xp_mpic_domain);
set_handle_irq(armada_370_xp_handle_irq);
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index e94e2882286c..6c20604c2242 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -102,7 +102,9 @@ static void armctrl_unmask_irq(struct irq_data *d)
static struct irq_chip armctrl_chip = {
.name = "ARMCTRL-level",
.irq_mask = armctrl_mask_irq,
- .irq_unmask = armctrl_unmask_irq
+ .irq_unmask = armctrl_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
};
static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index e8eab72ef195..020ecdf16901 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
+#include <linux/msi.h>
#include <linux/irqchip/arm-gic-common.h>
struct gic_quirk {
@@ -28,6 +29,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data);
+extern const struct msi_parent_ops gic_v3_its_msi_parent_ops;
+
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index f2ff4387870d..51af63c046ed 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -26,6 +26,8 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/arm-gic-common.h>
+#include "irq-msi-lib.h"
+
/*
* MSI_TYPER:
* [31:26] Reserved
@@ -72,31 +74,6 @@ struct v2m_data {
u32 flags; /* v2m flags for specific implementation */
};
-static void gicv2m_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void gicv2m_unmask_msi_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip gicv2m_msi_irq_chip = {
- .name = "MSI",
- .irq_mask = gicv2m_mask_msi_irq,
- .irq_unmask = gicv2m_unmask_msi_irq,
- .irq_eoi = irq_chip_eoi_parent,
-};
-
-static struct msi_domain_info gicv2m_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &gicv2m_msi_irq_chip,
-};
-
static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
{
if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
@@ -230,6 +207,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops gicv2m_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = gicv2m_irq_domain_alloc,
.free = gicv2m_irq_domain_free,
};
@@ -250,19 +228,6 @@ static bool is_msi_spi_valid(u32 base, u32 num)
return true;
}
-static struct irq_chip gicv2m_pmsi_irq_chip = {
- .name = "pMSI",
-};
-
-static struct msi_domain_ops gicv2m_pmsi_ops = {
-};
-
-static struct msi_domain_info gicv2m_pmsi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .ops = &gicv2m_pmsi_ops,
- .chip = &gicv2m_pmsi_irq_chip,
-};
-
static void __init gicv2m_teardown(void)
{
struct v2m_data *v2m, *tmp;
@@ -278,9 +243,27 @@ static void __init gicv2m_teardown(void)
}
}
+
+#define GICV2M_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define GICV2M_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static struct msi_parent_ops gicv2m_msi_parent_ops = {
+ .supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
+ .required_flags = GICV2M_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .prefix = "GICv2m-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static __init int gicv2m_allocate_domains(struct irq_domain *parent)
{
- struct irq_domain *inner_domain, *pci_domain, *plat_domain;
+ struct irq_domain *inner_domain;
struct v2m_data *v2m;
v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
@@ -295,22 +278,8 @@ static __init int gicv2m_allocate_domains(struct irq_domain *parent)
}
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
- &gicv2m_msi_domain_info,
- inner_domain);
- plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
- &gicv2m_pmsi_domain_info,
- inner_domain);
- if (!pci_domain || !plat_domain) {
- pr_err("Failed to create MSI domains\n");
- if (plat_domain)
- irq_domain_remove(plat_domain);
- if (pci_domain)
- irq_domain_remove(pci_domain);
- irq_domain_remove(inner_domain);
- return -ENOMEM;
- }
-
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ inner_domain->msi_parent_ops = &gicv2m_msi_parent_ops;
return 0;
}
@@ -511,7 +480,7 @@ acpi_parse_madt_msi(union acpi_subtable_headers *header,
pr_info("applying Amazon Graviton quirk\n");
res.end = res.start + SZ_8K - 1;
flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
- gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
+ gicv2m_msi_parent_ops.supported_flags &= ~MSI_FLAG_MULTI_PCI_MSI;
}
if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
new file mode 100644
index 000000000000..e150365fbe89
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+// Author: Marc Zyngier <marc.zyngier@arm.com>
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#include <linux/acpi_iort.h>
+#include <linux/pci.h>
+
+#include "irq-gic-common.h"
+#include "irq-msi-lib.h"
+
+#define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define ITS_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+#ifdef CONFIG_PCI_MSI
+static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
+{
+ int msi, msix, *count = data;
+
+ msi = max(pci_msi_vec_count(pdev), 0);
+ msix = max(pci_msix_vec_count(pdev), 0);
+ *count += max(msi, msix);
+
+ return 0;
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct pci_dev **alias_dev = data;
+
+ *alias_dev = pdev;
+
+ return 0;
+}
+
+static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct pci_dev *pdev, *alias_dev;
+ struct msi_domain_info *msi_info;
+ int alias_count = 0, minnvec = 1;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ pdev = to_pci_dev(dev);
+ /*
+ * If pdev is downstream of any aliasing bridges, take an upper
+ * bound of how many other vectors could map to the same DevID.
+ * Also tell the ITS that the signalling will come from a proxy
+ * device, and that special allocation rules apply.
+ */
+ pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
+ if (alias_dev != pdev) {
+ if (alias_dev->subordinate)
+ pci_walk_bus(alias_dev->subordinate,
+ its_pci_msi_vec_count, &alias_count);
+ info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
+ }
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev);
+
+ /*
+ * @domain->msi_domain_info->hwsize contains the size of the
+ * MSI[-X] domain, but vector allocation happens one by one. This
+ * needs some thought when MSI comes into play as the size of MSI
+ * might be unknown at domain creation time and therefore set to
+ * MSI_MAX_INDEX.
+ */
+ msi_info = msi_get_domain_info(domain);
+ if (msi_info->hwsize > nvec)
+ nvec = msi_info->hwsize;
+
+ /*
+ * Always allocate a power of 2, and special case device 0 for
+ * broken systems where the DevID is not wired (and all devices
+ * appear as DevID 0). For that reason, we generously allocate a
+ * minimum of 32 MSIs for DevID 0. If you want more because all
+ * your devices are aliasing to DevID 0, consider fixing your HW.
+ */
+ nvec = max(nvec, alias_count);
+ if (!info->scratchpad[0].ul)
+ minnvec = 32;
+ nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+
+ msi_info = msi_get_domain_info(domain->parent);
+ return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
+}
+#else /* CONFIG_PCI_MSI */
+#define its_pci_msi_prepare NULL
+#endif /* !CONFIG_PCI_MSI */
+
+static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
+ u32 *dev_id)
+{
+ int ret, index = 0;
+
+ /* Suck the DeviceID out of the msi-parent property */
+ do {
+ struct of_phandle_args args;
+
+ ret = of_parse_phandle_with_args(dev->of_node,
+ "msi-parent", "#msi-cells",
+ index, &args);
+ if (args.np == irq_domain_get_of_node(domain)) {
+ if (WARN_ON(args.args_count != 1))
+ return -EINVAL;
+ *dev_id = args.args[0];
+ break;
+ }
+ index++;
+ } while (!ret);
+
+ return ret;
+}
+
+int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
+{
+ return -1;
+}
+
+static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int ret;
+
+ if (dev->of_node)
+ ret = of_pmsi_get_dev_id(domain->parent, dev, &dev_id);
+ else
+ ret = iort_pmsi_get_dev_id(dev, &dev_id);
+ if (ret)
+ return ret;
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = dev_id;
+
+ /*
+ * @domain->msi_domain_info->hwsize contains the size of the device
+ * domain, but vector allocation happens one by one.
+ */
+ msi_info = msi_get_domain_info(domain);
+ if (msi_info->hwsize > nvec)
+ nvec = msi_info->hwsize;
+
+ /* Allocate at least 32 MSIs, and always as a power of 2 */
+ nvec = max_t(int, 32, roundup_pow_of_two(nvec));
+
+ msi_info = msi_get_domain_info(domain->parent);
+ return msi_info->ops->msi_prepare(domain->parent,
+ dev, nvec, info);
+}
+
+static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ switch(info->bus_token) {
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ /*
+ * FIXME: This probably should be done after a (not yet
+ * existing) post domain creation callback once to make
+ * support for dynamic post-enable MSI-X allocations
+ * work without having to reevaluate the domain size
+ * over and over. It is known already at allocation
+ * time via info->hwsize.
+ *
+ * That should work perfectly fine for MSI/MSI-X but needs
+ * some thoughts for purely software managed MSI domains
+ * where the index space is only limited artificially via
+ * %MSI_MAX_INDEX.
+ */
+ info->ops->msi_prepare = its_pci_msi_prepare;
+ break;
+ case DOMAIN_BUS_DEVICE_MSI:
+ case DOMAIN_BUS_WIRED_TO_MSI:
+ /*
+ * FIXME: See the above PCI prepare comment. The domain
+ * size is also known at domain creation time.
+ */
+ info->ops->msi_prepare = its_pmsi_prepare;
+ break;
+ default:
+ /* Confused. How did the lib return true? */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ return true;
+}
+
+const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
+ .supported_flags = ITS_MSI_FLAGS_SUPPORTED,
+ .required_flags = ITS_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .prefix = "ITS-",
+ .init_dev_msi_info = its_init_dev_msi_info,
+};
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
deleted file mode 100644
index 93f77a8196da..000000000000
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/acpi_iort.h>
-#include <linux/pci.h>
-#include <linux/msi.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_pci.h>
-
-static void its_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void its_unmask_msi_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip its_msi_irq_chip = {
- .name = "ITS-MSI",
- .irq_unmask = its_unmask_msi_irq,
- .irq_mask = its_mask_msi_irq,
- .irq_eoi = irq_chip_eoi_parent,
-};
-
-static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
-{
- int msi, msix, *count = data;
-
- msi = max(pci_msi_vec_count(pdev), 0);
- msix = max(pci_msix_vec_count(pdev), 0);
- *count += max(msi, msix);
-
- return 0;
-}
-
-static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- struct pci_dev **alias_dev = data;
-
- *alias_dev = pdev;
-
- return 0;
-}
-
-static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *info)
-{
- struct pci_dev *pdev, *alias_dev;
- struct msi_domain_info *msi_info;
- int alias_count = 0, minnvec = 1;
-
- if (!dev_is_pci(dev))
- return -EINVAL;
-
- msi_info = msi_get_domain_info(domain->parent);
-
- pdev = to_pci_dev(dev);
- /*
- * If pdev is downstream of any aliasing bridges, take an upper
- * bound of how many other vectors could map to the same DevID.
- * Also tell the ITS that the signalling will come from a proxy
- * device, and that special allocation rules apply.
- */
- pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
- if (alias_dev != pdev) {
- if (alias_dev->subordinate)
- pci_walk_bus(alias_dev->subordinate,
- its_pci_msi_vec_count, &alias_count);
- info->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
- }
-
- /* ITS specific DeviceID, as the core ITS ignores dev. */
- info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
-
- /*
- * Always allocate a power of 2, and special case device 0 for
- * broken systems where the DevID is not wired (and all devices
- * appear as DevID 0). For that reason, we generously allocate a
- * minimum of 32 MSIs for DevID 0. If you want more because all
- * your devices are aliasing to DevID 0, consider fixing your HW.
- */
- nvec = max(nvec, alias_count);
- if (!info->scratchpad[0].ul)
- minnvec = 32;
- nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
- return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
-}
-
-static struct msi_domain_ops its_pci_msi_ops = {
- .msi_prepare = its_pci_msi_prepare,
-};
-
-static struct msi_domain_info its_pci_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .ops = &its_pci_msi_ops,
- .chip = &its_msi_irq_chip,
-};
-
-static struct of_device_id its_device_id[] = {
- { .compatible = "arm,gic-v3-its", },
- {},
-};
-
-static int __init its_pci_msi_init_one(struct fwnode_handle *handle,
- const char *name)
-{
- struct irq_domain *parent;
-
- parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
- if (!parent || !msi_get_domain_info(parent)) {
- pr_err("%s: Unable to locate ITS domain\n", name);
- return -ENXIO;
- }
-
- if (!pci_msi_create_irq_domain(handle, &its_pci_msi_domain_info,
- parent)) {
- pr_err("%s: Unable to create PCI domain\n", name);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int __init its_pci_of_msi_init(void)
-{
- struct device_node *np;
-
- for (np = of_find_matching_node(NULL, its_device_id); np;
- np = of_find_matching_node(np, its_device_id)) {
- if (!of_device_is_available(np))
- continue;
- if (!of_property_read_bool(np, "msi-controller"))
- continue;
-
- if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
- continue;
-
- pr_info("PCI/MSI: %pOF domain created\n", np);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_ACPI
-
-static int __init
-its_pci_msi_parse_madt(union acpi_subtable_headers *header,
- const unsigned long end)
-{
- struct acpi_madt_generic_translator *its_entry;
- struct fwnode_handle *dom_handle;
- const char *node_name;
- int err = -ENXIO;
-
- its_entry = (struct acpi_madt_generic_translator *)header;
- node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
- (long)its_entry->base_address);
- dom_handle = iort_find_domain_token(its_entry->translation_id);
- if (!dom_handle) {
- pr_err("%s: Unable to locate ITS domain handle\n", node_name);
- goto out;
- }
-
- err = its_pci_msi_init_one(dom_handle, node_name);
- if (!err)
- pr_info("PCI/MSI: %s domain created\n", node_name);
-
-out:
- kfree(node_name);
- return err;
-}
-
-static int __init its_pci_acpi_msi_init(void)
-{
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
- its_pci_msi_parse_madt, 0);
- return 0;
-}
-#else
-static int __init its_pci_acpi_msi_init(void)
-{
- return 0;
-}
-#endif
-
-static int __init its_pci_msi_init(void)
-{
- its_pci_of_msi_init();
- its_pci_acpi_msi_init();
-
- return 0;
-}
-early_initcall(its_pci_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
deleted file mode 100644
index daa6d5053bc3..000000000000
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/acpi_iort.h>
-#include <linux/device.h>
-#include <linux/msi.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-
-static struct irq_chip its_pmsi_irq_chip = {
- .name = "ITS-pMSI",
-};
-
-static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
- u32 *dev_id)
-{
- int ret, index = 0;
-
- /* Suck the DeviceID out of the msi-parent property */
- do {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_args(dev->of_node,
- "msi-parent", "#msi-cells",
- index, &args);
- if (args.np == irq_domain_get_of_node(domain)) {
- if (WARN_ON(args.args_count != 1))
- return -EINVAL;
- *dev_id = args.args[0];
- break;
- }
- index++;
- } while (!ret);
-
- return ret;
-}
-
-int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
-{
- return -1;
-}
-
-static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *info)
-{
- struct msi_domain_info *msi_info;
- u32 dev_id;
- int ret;
-
- msi_info = msi_get_domain_info(domain->parent);
-
- if (dev->of_node)
- ret = of_pmsi_get_dev_id(domain, dev, &dev_id);
- else
- ret = iort_pmsi_get_dev_id(dev, &dev_id);
- if (ret)
- return ret;
-
- /* ITS specific DeviceID, as the core ITS ignores dev. */
- info->scratchpad[0].ul = dev_id;
-
- /* Allocate at least 32 MSIs, and always as a power of 2 */
- nvec = max_t(int, 32, roundup_pow_of_two(nvec));
- return msi_info->ops->msi_prepare(domain->parent,
- dev, nvec, info);
-}
-
-static struct msi_domain_ops its_pmsi_ops = {
- .msi_prepare = its_pmsi_prepare,
-};
-
-static struct msi_domain_info its_pmsi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .ops = &its_pmsi_ops,
- .chip = &its_pmsi_irq_chip,
-};
-
-static const struct of_device_id its_device_id[] = {
- { .compatible = "arm,gic-v3-its", },
- {},
-};
-
-static int __init its_pmsi_init_one(struct fwnode_handle *fwnode,
- const char *name)
-{
- struct irq_domain *parent;
-
- parent = irq_find_matching_fwnode(fwnode, DOMAIN_BUS_NEXUS);
- if (!parent || !msi_get_domain_info(parent)) {
- pr_err("%s: unable to locate ITS domain\n", name);
- return -ENXIO;
- }
-
- if (!platform_msi_create_irq_domain(fwnode, &its_pmsi_domain_info,
- parent)) {
- pr_err("%s: unable to create platform domain\n", name);
- return -ENXIO;
- }
-
- pr_info("Platform MSI: %s domain created\n", name);
- return 0;
-}
-
-#ifdef CONFIG_ACPI
-static int __init
-its_pmsi_parse_madt(union acpi_subtable_headers *header,
- const unsigned long end)
-{
- struct acpi_madt_generic_translator *its_entry;
- struct fwnode_handle *domain_handle;
- const char *node_name;
- int err = -ENXIO;
-
- its_entry = (struct acpi_madt_generic_translator *)header;
- node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
- (long)its_entry->base_address);
- domain_handle = iort_find_domain_token(its_entry->translation_id);
- if (!domain_handle) {
- pr_err("%s: Unable to locate ITS domain handle\n", node_name);
- goto out;
- }
-
- err = its_pmsi_init_one(domain_handle, node_name);
-
-out:
- kfree(node_name);
- return err;
-}
-
-static void __init its_pmsi_acpi_init(void)
-{
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
- its_pmsi_parse_madt, 0);
-}
-#else
-static inline void its_pmsi_acpi_init(void) { }
-#endif
-
-static void __init its_pmsi_of_init(void)
-{
- struct device_node *np;
-
- for (np = of_find_matching_node(NULL, its_device_id); np;
- np = of_find_matching_node(np, its_device_id)) {
- if (!of_device_is_available(np))
- continue;
- if (!of_property_read_bool(np, "msi-controller"))
- continue;
-
- its_pmsi_init_one(of_node_to_fwnode(np), np->full_name);
- }
-}
-
-static int __init its_pmsi_init(void)
-{
- its_pmsi_of_init();
- its_pmsi_acpi_init();
- return 0;
-}
-early_initcall(its_pmsi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 42e63272154e..9b34596b3542 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -38,6 +38,7 @@
#include <asm/exception.h>
#include "irq-gic-common.h"
+#include "irq-msi-lib.h"
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
@@ -1317,7 +1318,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
{
struct its_cmd_desc desc = {};
struct its_node *its;
- unsigned long flags;
int col_id = vpe->col_idx;
desc.its_vmovp_cmd.vpe = vpe;
@@ -1330,6 +1330,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
}
/*
+ * Protect against concurrent updates of the mapping state on
+ * individual VMs.
+ */
+ guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
+
+ /*
* Yet another marvel of the architecture. If using the
* its_list "feature", we need to make sure that all ITSs
* receive all VMOVP commands in the same order. The only way
@@ -1337,8 +1343,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
*
* Wall <-- Head.
*/
- raw_spin_lock_irqsave(&vmovp_lock, flags);
-
+ guard(raw_spinlock)(&vmovp_lock);
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
@@ -1353,8 +1358,6 @@ static void its_send_vmovp(struct its_vpe *vpe)
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
@@ -1791,12 +1794,10 @@ static bool gic_requires_eager_mapping(void)
static void its_map_vm(struct its_node *its, struct its_vm *vm)
{
- unsigned long flags;
-
if (gic_requires_eager_mapping())
return;
- raw_spin_lock_irqsave(&vmovp_lock, flags);
+ guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
/*
* If the VM wasn't mapped yet, iterate over the vpes and get
@@ -1809,37 +1810,31 @@ static void its_map_vm(struct its_node *its, struct its_vm *vm)
for (i = 0; i < vm->nr_vpes; i++) {
struct its_vpe *vpe = vm->vpes[i];
- struct irq_data *d = irq_get_irq_data(vpe->irq);
- /* Map the VPE to the first possible CPU */
- vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(its, vpe, true);
+ scoped_guard(raw_spinlock, &vpe->vpe_lock)
+ its_send_vmapp(its, vpe, true);
+
its_send_vinvall(its, vpe);
- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
}
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
{
- unsigned long flags;
-
/* Not using the ITS list? Everything is always mapped. */
if (gic_requires_eager_mapping())
return;
- raw_spin_lock_irqsave(&vmovp_lock, flags);
+ guard(raw_spinlock_irqsave)(&vm->vmapp_lock);
if (!--vm->vlpi_count[its->list_nr]) {
int i;
- for (i = 0; i < vm->nr_vpes; i++)
+ for (i = 0; i < vm->nr_vpes; i++) {
+ guard(raw_spinlock)(&vm->vpes[i]->vpe_lock);
its_send_vmapp(its, vm->vpes[i], false);
+ }
}
-
- raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
@@ -3688,6 +3683,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops its_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = its_irq_domain_alloc,
.free = its_irq_domain_free,
.activate = its_irq_domain_activate,
@@ -3926,6 +3922,8 @@ static void its_vpe_invall(struct its_vpe *vpe)
{
struct its_node *its;
+ guard(raw_spinlock_irqsave)(&vpe->its_vm->vmapp_lock);
+
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
@@ -4531,6 +4529,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
vm->db_lpi_base = base;
vm->nr_db_lpis = nr_ids;
vm->vprop_page = vprop_page;
+ raw_spin_lock_init(&vm->vmapp_lock);
if (gic_rdists->has_rvpeid)
irqchip = &its_vpe_4_1_irq_chip;
@@ -4562,6 +4561,10 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+
/*
* If we use the list map, we issue VMAPP on demand... Unless
* we're on a GICv4.1 and we eagerly map the VPE on all ITSs
@@ -4570,9 +4573,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
if (!gic_requires_eager_mapping())
return 0;
- /* Map the VPE to the first possible CPU */
- vpe->col_idx = cpumask_first(cpu_online_mask);
-
list_for_each_entry(its, &its_nodes, entry) {
if (!is_v4(its))
continue;
@@ -4581,8 +4581,6 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
its_send_vinvall(its, vpe);
}
- irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
-
return 0;
}
@@ -4993,6 +4991,9 @@ static int its_init_domain(struct its_node *its)
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
+ inner_domain->msi_parent_ops = &gic_v3_its_msi_parent_ops;
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+
return 0;
}
@@ -5580,6 +5581,10 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
goto node_err;
}
+ if (acpi_get_madt_revision() >= 7 &&
+ (its_entry->flags & ACPI_MADT_ITS_NON_COHERENT))
+ its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
+
err = its_probe_one(its);
if (!err)
return 0;
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index dbb8b1efda44..3fe870f8ee17 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -18,6 +18,8 @@
#include <linux/irqchip/arm-gic-v3.h>
+#include "irq-msi-lib.h"
+
struct mbi_range {
u32 spi_start;
u32 nr_spis;
@@ -138,6 +140,7 @@ static void mbi_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops mbi_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = mbi_irq_domain_alloc,
.free = mbi_irq_domain_free,
};
@@ -151,54 +154,6 @@ static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
}
-#ifdef CONFIG_PCI_MSI
-/* PCI-specific irqchip */
-static void mbi_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void mbi_unmask_msi_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip mbi_msi_irq_chip = {
- .name = "MSI",
- .irq_mask = mbi_mask_msi_irq,
- .irq_unmask = mbi_unmask_msi_irq,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_compose_msi_msg = mbi_compose_msi_msg,
-};
-
-static struct msi_domain_info mbi_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &mbi_msi_irq_chip,
-};
-
-static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
- struct irq_domain **pci_domain)
-{
- *pci_domain = pci_msi_create_irq_domain(nexus_domain->parent->fwnode,
- &mbi_msi_domain_info,
- nexus_domain);
- if (!*pci_domain)
- return -ENOMEM;
-
- return 0;
-}
-#else
-static int mbi_allocate_pci_domain(struct irq_domain *nexus_domain,
- struct irq_domain **pci_domain)
-{
- *pci_domain = NULL;
- return 0;
-}
-#endif
-
static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
{
mbi_compose_msi_msg(data, msg);
@@ -210,28 +165,51 @@ static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
}
-/* Platform-MSI specific irqchip */
-static struct irq_chip mbi_pmsi_irq_chip = {
- .name = "pMSI",
- .irq_set_type = irq_chip_set_type_parent,
- .irq_compose_msi_msg = mbi_compose_mbi_msg,
- .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
-};
-
-static struct msi_domain_ops mbi_pmsi_ops = {
-};
+static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ switch (info->bus_token) {
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ info->chip->irq_compose_msi_msg = mbi_compose_msi_msg;
+ return true;
+
+ case DOMAIN_BUS_DEVICE_MSI:
+ info->chip->irq_compose_msi_msg = mbi_compose_mbi_msg;
+ info->chip->irq_set_type = irq_chip_set_type_parent;
+ info->chip->flags |= IRQCHIP_SUPPORTS_LEVEL_MSI;
+ info->flags |= MSI_FLAG_LEVEL_CAPABLE;
+ return true;
+
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+}
-static struct msi_domain_info mbi_pmsi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_LEVEL_CAPABLE),
- .ops = &mbi_pmsi_ops,
- .chip = &mbi_pmsi_irq_chip,
+#define MBI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define MBI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
+ .supported_flags = MBI_MSI_FLAGS_SUPPORTED,
+ .required_flags = MBI_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .prefix = "MBI-",
+ .init_dev_msi_info = mbi_init_dev_msi_info,
};
-static int mbi_allocate_domains(struct irq_domain *parent)
+static int mbi_allocate_domain(struct irq_domain *parent)
{
- struct irq_domain *nexus_domain, *pci_domain, *plat_domain;
- int err;
+ struct irq_domain *nexus_domain;
nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode,
&mbi_domain_ops, NULL);
@@ -239,22 +217,8 @@ static int mbi_allocate_domains(struct irq_domain *parent)
return -ENOMEM;
irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
-
- err = mbi_allocate_pci_domain(nexus_domain, &pci_domain);
-
- plat_domain = platform_msi_create_irq_domain(parent->fwnode,
- &mbi_pmsi_domain_info,
- nexus_domain);
-
- if (err || !plat_domain) {
- if (plat_domain)
- irq_domain_remove(plat_domain);
- if (pci_domain)
- irq_domain_remove(pci_domain);
- irq_domain_remove(nexus_domain);
- return -ENOMEM;
- }
-
+ nexus_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ nexus_domain->msi_parent_ops = &gic_v3_mbi_msi_parent_ops;
return 0;
}
@@ -317,7 +281,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
pr_info("Using MBI frame %pa\n", &mbi_phys_base);
- ret = mbi_allocate_domains(parent);
+ ret = mbi_allocate_domain(parent);
if (ret)
goto err_free_mbi;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6393f3d780e9..c19083bfb943 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -2203,11 +2203,10 @@ out_put_node:
of_node_put(parts_node);
}
-static void __init gic_of_setup_kvm_info(struct device_node *node)
+static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions)
{
int ret;
struct resource r;
- u32 gicv_idx;
gic_v3_kvm_info.type = GIC_V3;
@@ -2215,12 +2214,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (!gic_v3_kvm_info.maint_irq)
return;
- if (of_property_read_u32(node, "#redistributor-regions",
- &gicv_idx))
- gicv_idx = 1;
-
- gicv_idx += 3; /* Also skip GICD, GICC, GICH */
- ret = of_address_to_resource(node, gicv_idx, &r);
+ /* Also skip GICD, GICC, GICH */
+ ret = of_address_to_resource(node, nr_redist_regions + 3, &r);
if (!ret)
gic_v3_kvm_info.vcpu = r;
@@ -2310,7 +2305,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
gic_populate_ppi_partitions(node);
if (static_branch_likely(&supports_deactivate_key))
- gic_of_setup_kvm_info(node);
+ gic_of_setup_kvm_info(node, nr_redist_regions);
return 0;
out_unmap_rdist:
@@ -2362,6 +2357,11 @@ gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
return -ENOMEM;
}
+
+ if (acpi_get_madt_revision() >= 7 &&
+ (redist->flags & ACPI_MADT_GICR_NON_COHERENT))
+ gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
+
gic_request_region(redist->base_address, redist->length, "GICR");
gic_acpi_register_redist(redist->base_address, redist_base);
@@ -2402,6 +2402,10 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
return -ENOMEM;
gic_request_region(gicc->gicr_base_address, size, "GICR");
+ if (acpi_get_madt_revision() >= 7 &&
+ (gicc->flags & ACPI_MADT_GICC_NON_COHERENT))
+ gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
+
gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
return 0;
}
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 20cf7a9e9ece..75a0e980ff35 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -36,6 +36,7 @@ struct irqsteer_data {
int channel;
struct irq_domain *domain;
u32 *saved_reg;
+ struct device *dev;
};
static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
@@ -72,10 +73,26 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&data->lock, flags);
}
+static void imx_irqsteer_irq_bus_lock(struct irq_data *d)
+{
+ struct irqsteer_data *data = d->chip_data;
+
+ pm_runtime_get_sync(data->dev);
+}
+
+static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct irqsteer_data *data = d->chip_data;
+
+ pm_runtime_put_autosuspend(data->dev);
+}
+
static const struct irq_chip imx_irqsteer_irq_chip = {
- .name = "irqsteer",
- .irq_mask = imx_irqsteer_irq_mask,
- .irq_unmask = imx_irqsteer_irq_unmask,
+ .name = "irqsteer",
+ .irq_mask = imx_irqsteer_irq_mask,
+ .irq_unmask = imx_irqsteer_irq_unmask,
+ .irq_bus_lock = imx_irqsteer_irq_bus_lock,
+ .irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock,
};
static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
@@ -150,6 +167,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ data->dev = &pdev->dev;
data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
dev_err(&pdev->dev, "failed to initialize reg\n");
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 90d41c1407ac..4342a21de1eb 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -24,6 +24,8 @@
#include <linux/pm_domain.h>
#include <linux/spinlock.h>
+#include "irq-msi-lib.h"
+
#define IMX_MU_CHANS 4
enum imx_mu_xcr {
@@ -114,20 +116,6 @@ static void imx_mu_msi_parent_ack_irq(struct irq_data *data)
imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4);
}
-static struct irq_chip imx_mu_msi_irq_chip = {
- .name = "MU-MSI",
- .irq_ack = irq_chip_ack_parent,
-};
-
-static struct msi_domain_ops imx_mu_msi_irq_ops = {
-};
-
-static struct msi_domain_info imx_mu_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .ops = &imx_mu_msi_irq_ops,
- .chip = &imx_mu_msi_irq_chip,
-};
-
static void imx_mu_msi_parent_compose_msg(struct irq_data *data,
struct msi_msg *msg)
{
@@ -195,6 +183,7 @@ static void imx_mu_msi_domain_irq_free(struct irq_domain *domain,
}
static const struct irq_domain_ops imx_mu_msi_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = imx_mu_msi_domain_irq_alloc,
.free = imx_mu_msi_domain_irq_free,
};
@@ -216,35 +205,38 @@ static void imx_mu_msi_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+#define IMX_MU_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PARENT_PM_DEV)
+
+#define IMX_MU_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops imx_mu_msi_parent_ops = {
+ .supported_flags = IMX_MU_MSI_FLAGS_SUPPORTED,
+ .required_flags = IMX_MU_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PLATFORM_MSI,
+ .prefix = "MU-MSI-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev)
{
struct fwnode_handle *fwnodes = dev_fwnode(dev);
struct irq_domain *parent;
/* Initialize MSI domain parent */
- parent = irq_domain_create_linear(fwnodes,
- IMX_MU_CHANS,
- &imx_mu_msi_domain_ops,
- msi_data);
+ parent = irq_domain_create_linear(fwnodes, IMX_MU_CHANS,
+ &imx_mu_msi_domain_ops, msi_data);
if (!parent) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes,
- &imx_mu_msi_domain_info,
- parent);
-
- if (!msi_data->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
- return -ENOMEM;
- }
-
- irq_domain_set_pm_device(msi_data->msi_domain, dev);
-
+ parent->dev = parent->pm_dev = dev;
+ parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ parent->msi_parent_ops = &imx_mu_msi_parent_ops;
return 0;
}
diff --git a/drivers/irqchip/irq-lan966x-oic.c b/drivers/irqchip/irq-lan966x-oic.c
new file mode 100644
index 000000000000..41ac880e3b87
--- /dev/null
+++ b/drivers/irqchip/irq-lan966x-oic.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Microchip LAN966x outbound interrupt controller
+ *
+ * Copyright (c) 2024 Technology Inc. and its subsidiaries.
+ *
+ * Authors:
+ * Horatiu Vultur <horatiu.vultur@microchip.com>
+ * Clément Léger <clement.leger@bootlin.com>
+ * Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct lan966x_oic_chip_regs {
+ int reg_off_ena_set;
+ int reg_off_ena_clr;
+ int reg_off_sticky;
+ int reg_off_ident;
+ int reg_off_map;
+};
+
+struct lan966x_oic_data {
+ void __iomem *regs;
+ int irq;
+};
+
+#define LAN966X_OIC_NR_IRQ 86
+
+/* Interrupt sticky status */
+#define LAN966X_OIC_INTR_STICKY 0x30
+#define LAN966X_OIC_INTR_STICKY1 0x34
+#define LAN966X_OIC_INTR_STICKY2 0x38
+
+/* Interrupt enable */
+#define LAN966X_OIC_INTR_ENA 0x48
+#define LAN966X_OIC_INTR_ENA1 0x4c
+#define LAN966X_OIC_INTR_ENA2 0x50
+
+/* Atomic clear of interrupt enable */
+#define LAN966X_OIC_INTR_ENA_CLR 0x54
+#define LAN966X_OIC_INTR_ENA_CLR1 0x58
+#define LAN966X_OIC_INTR_ENA_CLR2 0x5c
+
+/* Atomic set of interrupt */
+#define LAN966X_OIC_INTR_ENA_SET 0x60
+#define LAN966X_OIC_INTR_ENA_SET1 0x64
+#define LAN966X_OIC_INTR_ENA_SET2 0x68
+
+/* Mapping of source to destination interrupts (_n = 0..8) */
+#define LAN966X_OIC_DST_INTR_MAP(_n) (0x78 + (_n) * 4)
+#define LAN966X_OIC_DST_INTR_MAP1(_n) (0x9c + (_n) * 4)
+#define LAN966X_OIC_DST_INTR_MAP2(_n) (0xc0 + (_n) * 4)
+
+/* Currently active interrupt sources per destination (_n = 0..8) */
+#define LAN966X_OIC_DST_INTR_IDENT(_n) (0xe4 + (_n) * 4)
+#define LAN966X_OIC_DST_INTR_IDENT1(_n) (0x108 + (_n) * 4)
+#define LAN966X_OIC_DST_INTR_IDENT2(_n) (0x12c + (_n) * 4)
+
+static unsigned int lan966x_oic_irq_startup(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct irq_chip_type *ct = irq_data_get_chip_type(data);
+ struct lan966x_oic_chip_regs *chip_regs = gc->private;
+ u32 map;
+
+ irq_gc_lock(gc);
+
+ /* Map the source interrupt to the destination */
+ map = irq_reg_readl(gc, chip_regs->reg_off_map);
+ map |= data->mask;
+ irq_reg_writel(gc, map, chip_regs->reg_off_map);
+
+ irq_gc_unlock(gc);
+
+ ct->chip.irq_ack(data);
+ ct->chip.irq_unmask(data);
+
+ return 0;
+}
+
+static void lan966x_oic_irq_shutdown(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct irq_chip_type *ct = irq_data_get_chip_type(data);
+ struct lan966x_oic_chip_regs *chip_regs = gc->private;
+ u32 map;
+
+ ct->chip.irq_mask(data);
+
+ irq_gc_lock(gc);
+
+ /* Unmap the interrupt */
+ map = irq_reg_readl(gc, chip_regs->reg_off_map);
+ map &= ~data->mask;
+ irq_reg_writel(gc, map, chip_regs->reg_off_map);
+
+ irq_gc_unlock(gc);
+}
+
+static int lan966x_oic_irq_set_type(struct irq_data *data,
+ unsigned int flow_type)
+{
+ if (flow_type != IRQ_TYPE_LEVEL_HIGH) {
+ pr_err("lan966x oic doesn't support flow type %d\n", flow_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void lan966x_oic_irq_handler_domain(struct irq_domain *d, u32 first_irq)
+{
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, first_irq);
+ struct lan966x_oic_chip_regs *chip_regs = gc->private;
+ unsigned long ident;
+ unsigned int hwirq;
+
+ ident = irq_reg_readl(gc, chip_regs->reg_off_ident);
+ if (!ident)
+ return;
+
+ for_each_set_bit(hwirq, &ident, 32)
+ generic_handle_domain_irq(d, hwirq + first_irq);
+}
+
+static void lan966x_oic_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+ lan966x_oic_irq_handler_domain(d, 0);
+ lan966x_oic_irq_handler_domain(d, 32);
+ lan966x_oic_irq_handler_domain(d, 64);
+ chained_irq_exit(chip, desc);
+}
+
+static struct lan966x_oic_chip_regs lan966x_oic_chip_regs[3] = {
+ {
+ .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET,
+ .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR,
+ .reg_off_sticky = LAN966X_OIC_INTR_STICKY,
+ .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT(0),
+ .reg_off_map = LAN966X_OIC_DST_INTR_MAP(0),
+ }, {
+ .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET1,
+ .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR1,
+ .reg_off_sticky = LAN966X_OIC_INTR_STICKY1,
+ .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT1(0),
+ .reg_off_map = LAN966X_OIC_DST_INTR_MAP1(0),
+ }, {
+ .reg_off_ena_set = LAN966X_OIC_INTR_ENA_SET2,
+ .reg_off_ena_clr = LAN966X_OIC_INTR_ENA_CLR2,
+ .reg_off_sticky = LAN966X_OIC_INTR_STICKY2,
+ .reg_off_ident = LAN966X_OIC_DST_INTR_IDENT2(0),
+ .reg_off_map = LAN966X_OIC_DST_INTR_MAP2(0),
+ }
+};
+
+static int lan966x_oic_chip_init(struct irq_chip_generic *gc)
+{
+ struct lan966x_oic_data *lan966x_oic = gc->domain->host_data;
+ struct lan966x_oic_chip_regs *chip_regs;
+
+ chip_regs = &lan966x_oic_chip_regs[gc->irq_base / 32];
+
+ gc->reg_base = lan966x_oic->regs;
+ gc->chip_types[0].regs.enable = chip_regs->reg_off_ena_set;
+ gc->chip_types[0].regs.disable = chip_regs->reg_off_ena_clr;
+ gc->chip_types[0].regs.ack = chip_regs->reg_off_sticky;
+ gc->chip_types[0].chip.irq_startup = lan966x_oic_irq_startup;
+ gc->chip_types[0].chip.irq_shutdown = lan966x_oic_irq_shutdown;
+ gc->chip_types[0].chip.irq_set_type = lan966x_oic_irq_set_type;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
+ gc->private = chip_regs;
+
+ /* Disable all interrupts handled by this chip */
+ irq_reg_writel(gc, ~0U, chip_regs->reg_off_ena_clr);
+
+ return 0;
+}
+
+static void lan966x_oic_chip_exit(struct irq_chip_generic *gc)
+{
+ /* Disable and ack all interrupts handled by this chip */
+ irq_reg_writel(gc, ~0U, gc->chip_types[0].regs.disable);
+ irq_reg_writel(gc, ~0U, gc->chip_types[0].regs.ack);
+}
+
+static int lan966x_oic_domain_init(struct irq_domain *d)
+{
+ struct lan966x_oic_data *lan966x_oic = d->host_data;
+
+ irq_set_chained_handler_and_data(lan966x_oic->irq, lan966x_oic_irq_handler, d);
+
+ return 0;
+}
+
+static void lan966x_oic_domain_exit(struct irq_domain *d)
+{
+ struct lan966x_oic_data *lan966x_oic = d->host_data;
+
+ irq_set_chained_handler_and_data(lan966x_oic->irq, NULL, NULL);
+}
+
+static int lan966x_oic_probe(struct platform_device *pdev)
+{
+ struct irq_domain_chip_generic_info dgc_info = {
+ .name = "lan966x-oic",
+ .handler = handle_level_irq,
+ .irqs_per_chip = 32,
+ .num_ct = 1,
+ .init = lan966x_oic_chip_init,
+ .exit = lan966x_oic_chip_exit,
+ };
+ struct irq_domain_info d_info = {
+ .fwnode = of_node_to_fwnode(pdev->dev.of_node),
+ .domain_flags = IRQ_DOMAIN_FLAG_DESTROY_GC,
+ .size = LAN966X_OIC_NR_IRQ,
+ .hwirq_max = LAN966X_OIC_NR_IRQ,
+ .ops = &irq_generic_chip_ops,
+ .dgc_info = &dgc_info,
+ .init = lan966x_oic_domain_init,
+ .exit = lan966x_oic_domain_exit,
+ };
+ struct lan966x_oic_data *lan966x_oic;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *domain;
+
+ lan966x_oic = devm_kmalloc(dev, sizeof(*lan966x_oic), GFP_KERNEL);
+ if (!lan966x_oic)
+ return -ENOMEM;
+
+ lan966x_oic->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lan966x_oic->regs))
+ return dev_err_probe(dev, PTR_ERR(lan966x_oic->regs),
+ "failed to map resource\n");
+
+ lan966x_oic->irq = platform_get_irq(pdev, 0);
+ if (lan966x_oic->irq < 0)
+ return dev_err_probe(dev, lan966x_oic->irq, "failed to get the IRQ\n");
+
+ d_info.host_data = lan966x_oic;
+ domain = devm_irq_domain_instantiate(dev, &d_info);
+ if (IS_ERR(domain))
+ return dev_err_probe(dev, PTR_ERR(domain),
+ "failed to instantiate the IRQ domain\n");
+ return 0;
+}
+
+static const struct of_device_id lan966x_oic_of_match[] = {
+ { .compatible = "microchip,lan966x-oic" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, lan966x_oic_of_match);
+
+static struct platform_driver lan966x_oic_driver = {
+ .probe = lan966x_oic_probe,
+ .driver = {
+ .name = "lan966x-oic",
+ .of_match_table = lan966x_oic_of_match,
+ },
+};
+module_platform_driver(lan966x_oic_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("Microchip LAN966x OIC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 58881d313979..093fd42893a7 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -135,24 +135,14 @@ static int mbigen_set_type(struct irq_data *data, unsigned int type)
return 0;
}
-static struct irq_chip mbigen_irq_chip = {
- .name = "mbigen-v2",
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_eoi = mbigen_eoi_irq,
- .irq_set_type = mbigen_set_type,
- .irq_set_affinity = irq_chip_set_affinity_parent,
-};
-
-static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+static void mbigen_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct irq_data *d = irq_get_irq_data(desc->irq);
void __iomem *base = d->chip_data;
u32 val;
if (!msg->address_lo && !msg->address_hi)
return;
-
+
base += get_mbigen_vec_reg(d->hwirq);
val = readl_relaxed(base);
@@ -165,10 +155,8 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
writel_relaxed(val, base);
}
-static int mbigen_domain_translate(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *hwirq,
- unsigned int *type)
+static int mbigen_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
{
if (is_of_node(fwspec->fwnode) || is_acpi_device_node(fwspec->fwnode)) {
if (fwspec->param_count != 2)
@@ -192,51 +180,48 @@ static int mbigen_domain_translate(struct irq_domain *d,
return -EINVAL;
}
-static int mbigen_irq_domain_alloc(struct irq_domain *domain,
- unsigned int virq,
- unsigned int nr_irqs,
- void *args)
+static void mbigen_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
- struct irq_fwspec *fwspec = args;
- irq_hw_number_t hwirq;
- unsigned int type;
- struct mbigen_device *mgn_chip;
- int i, err;
-
- err = mbigen_domain_translate(domain, fwspec, &hwirq, &type);
- if (err)
- return err;
-
- err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
- if (err)
- return err;
+ arg->desc = desc;
+ arg->hwirq = (u32)desc->data.icookie.value;
+}
- mgn_chip = platform_msi_get_host_data(domain);
+static const struct msi_domain_template mbigen_msi_template = {
+ .chip = {
+ .name = "mbigen-v2",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = mbigen_eoi_irq,
+ .irq_set_type = mbigen_set_type,
+ .irq_write_msi_msg = mbigen_write_msi_msg,
+ },
- for (i = 0; i < nr_irqs; i++)
- irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &mbigen_irq_chip, mgn_chip->base);
+ .ops = {
+ .set_desc = mbigen_domain_set_desc,
+ .msi_translate = mbigen_domain_translate,
+ },
- return 0;
-}
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_USE_DEV_FWNODE,
+ },
+};
-static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+static bool mbigen_create_device_domain(struct device *dev, unsigned int size,
+ struct mbigen_device *mgn_chip)
{
- platform_msi_device_domain_free(domain, virq, nr_irqs);
-}
+ if (WARN_ON_ONCE(!dev->msi.domain))
+ return false;
-static const struct irq_domain_ops mbigen_domain_ops = {
- .translate = mbigen_domain_translate,
- .alloc = mbigen_irq_domain_alloc,
- .free = mbigen_irq_domain_free,
-};
+ return msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &mbigen_msi_template, size,
+ NULL, mgn_chip->base);
+}
static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
struct platform_device *child;
- struct irq_domain *domain;
struct device_node *np;
u32 num_pins;
int ret = 0;
@@ -258,11 +243,7 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
break;
}
- domain = platform_msi_create_device_domain(&child->dev, num_pins,
- mbigen_write_msg,
- &mbigen_domain_ops,
- mgn_chip);
- if (!domain) {
+ if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip)) {
ret = -ENOMEM;
break;
}
@@ -284,7 +265,6 @@ MODULE_DEVICE_TABLE(acpi, mbigen_acpi_match);
static int mbigen_acpi_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
- struct irq_domain *domain;
u32 num_pins = 0;
int ret;
@@ -315,11 +295,7 @@ static int mbigen_acpi_create_domain(struct platform_device *pdev,
if (ret || num_pins == 0)
return -EINVAL;
- domain = platform_msi_create_device_domain(&pdev->dev, num_pins,
- mbigen_write_msg,
- &mbigen_domain_ops,
- mgn_chip);
- if (!domain)
+ if (!mbigen_create_device_domain(&pdev->dev, num_pins, mgn_chip))
return -ENOMEM;
return 0;
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 9a1791908598..27e30ce41db3 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -608,5 +608,6 @@ IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_DESCRIPTION("Meson GPIO Interrupt Multiplexer driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:meson-gpio-intc");
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
new file mode 100644
index 000000000000..b5b90003311a
--- /dev/null
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#include <linux/export.h>
+
+#include "irq-msi-lib.h"
+
+/**
+ * msi_lib_init_dev_msi_info - Domain info setup for MSI domains
+ * @dev: The device for which the domain is created for
+ * @domain: The domain providing this callback
+ * @real_parent: The real parent domain of the domain to be initialized
+ * which might be a domain built on top of @domain or
+ * @domain itself
+ * @info: The domain info for the domain to be initialize
+ *
+ * This function is to be used for all types of MSI domains above the root
+ * parent domain and any intermediates. The topmost parent domain specific
+ * functionality is determined via @real_parent.
+ *
+ * All intermediate domains between the root and the device domain must
+ * have either msi_parent_ops.init_dev_msi_info = msi_parent_init_dev_msi_info
+ * or invoke it down the line.
+ */
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
+{
+ const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+ u32 required_flags;
+
+ /* Parent ops available? */
+ if (WARN_ON_ONCE(!pops))
+ return false;
+
+ /*
+ * MSI parent domain specific settings. For now there is only the
+ * root parent domain, e.g. NEXUS, acting as a MSI parent, but it is
+ * possible to stack MSI parents. See x86 vector -> irq remapping
+ */
+ if (domain->bus_token == pops->bus_select_token) {
+ if (WARN_ON_ONCE(domain != real_parent))
+ return false;
+ } else {
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ required_flags = pops->required_flags;
+
+ /* Is the target domain bus token supported? */
+ switch(info->bus_token) {
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PCI_MSI)))
+ return false;
+
+ break;
+ case DOMAIN_BUS_DEVICE_MSI:
+ /*
+ * Per device MSI should never have any MSI feature bits
+ * set. It's sole purpose is to create a dumb interrupt
+ * chip which has a device specific irq_write_msi_msg()
+ * callback.
+ */
+ if (WARN_ON_ONCE(info->flags))
+ return false;
+
+ /* Core managed MSI descriptors */
+ info->flags = MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
+ fallthrough;
+ case DOMAIN_BUS_WIRED_TO_MSI:
+ /* Remove PCI specific flags */
+ required_flags &= ~MSI_FLAG_PCI_MSI_MASK_PARENT;
+ break;
+ default:
+ /*
+ * This should never be reached. See
+ * msi_lib_irq_domain_select()
+ */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ /*
+ * Mask out the domain specific MSI feature flags which are not
+ * supported by the real parent.
+ */
+ info->flags &= pops->supported_flags;
+ /* Enforce the required flags */
+ info->flags |= required_flags;
+
+ /* Chip updates for all child bus types */
+ if (!info->chip->irq_eoi)
+ info->chip->irq_eoi = irq_chip_eoi_parent;
+ if (!info->chip->irq_ack)
+ info->chip->irq_ack = irq_chip_ack_parent;
+
+ /*
+ * The device MSI domain can never have a set affinity callback. It
+ * always has to rely on the parent domain to handle affinity
+ * settings. The device MSI domain just has to write the resulting
+ * MSI message into the hardware which is the whole purpose of the
+ * device MSI domain aside of mask/unmask which is provided e.g. by
+ * PCI/MSI device domains.
+ */
+ info->chip->irq_set_affinity = msi_domain_set_affinity;
+ return true;
+}
+EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
+
+/**
+ * msi_lib_irq_domain_select - Shared select function for NEXUS domains
+ * @d: Pointer to the irq domain on which select is invoked
+ * @fwspec: Firmware spec describing what is searched
+ * @bus_token: The bus token for which a matching irq domain is looked up
+ *
+ * Returns: %0 if @d is not what is being looked for
+ *
+ * %1 if @d is either the domain which is directly searched for or
+ * if @d is providing the parent MSI domain for the functionality
+ * requested with @bus_token.
+ */
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ const struct msi_parent_ops *ops = d->msi_parent_ops;
+ u32 busmask = BIT(bus_token);
+
+ if (fwspec->fwnode != d->fwnode || fwspec->param_count != 0)
+ return 0;
+
+ /* Handle pure domain searches */
+ if (bus_token == ops->bus_select_token)
+ return 1;
+
+ return ops && !!(ops->bus_select_mask & busmask);
+}
+EXPORT_SYMBOL_GPL(msi_lib_irq_domain_select);
diff --git a/drivers/irqchip/irq-msi-lib.h b/drivers/irqchip/irq-msi-lib.h
new file mode 100644
index 000000000000..681ceabb7bc7
--- /dev/null
+++ b/drivers/irqchip/irq-msi-lib.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#ifndef _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
+#define _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
+
+#include <linux/bits.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+
+#ifdef CONFIG_PCI_MSI
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+#else
+#define MATCH_PCI_MSI (0)
+#endif
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info);
+
+#endif /* _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index c43a345061d5..2b6183919ea4 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -17,6 +17,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include "irq-msi-lib.h"
+
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define GICP_SETSPI_NSR_OFFSET 0x0
@@ -145,32 +147,32 @@ static void gicp_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops gicp_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = gicp_irq_domain_alloc,
.free = gicp_irq_domain_free,
};
-static struct irq_chip gicp_msi_irq_chip = {
- .name = "GICP",
- .irq_set_type = irq_chip_set_type_parent,
- .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
-};
+#define GICP_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
-static struct msi_domain_ops gicp_msi_ops = {
-};
+#define GICP_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_LEVEL_CAPABLE)
-static struct msi_domain_info gicp_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_LEVEL_CAPABLE),
- .ops = &gicp_msi_ops,
- .chip = &gicp_msi_irq_chip,
+static const struct msi_parent_ops gicp_msi_parent_ops = {
+ .supported_flags = GICP_MSI_FLAGS_SUPPORTED,
+ .required_flags = GICP_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_GENERIC_MSI,
+ .bus_select_mask = MATCH_PLATFORM_MSI,
+ .prefix = "GICP-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int mvebu_gicp_probe(struct platform_device *pdev)
{
- struct mvebu_gicp *gicp;
- struct irq_domain *inner_domain, *plat_domain, *parent_domain;
+ struct irq_domain *inner_domain, *parent_domain;
struct device_node *node = pdev->dev.of_node;
struct device_node *irq_parent_dn;
+ struct mvebu_gicp *gicp;
int ret, i;
gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
@@ -234,17 +236,9 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
if (!inner_domain)
return -ENOMEM;
-
- plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
- &gicp_msi_domain_info,
- inner_domain);
- if (!plat_domain) {
- irq_domain_remove(inner_domain);
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, gicp);
-
+ irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ inner_domain->msi_parent_ops = &gicp_msi_parent_ops;
return 0;
}
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 3c77acc7ec6a..b337f6c05f18 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -20,6 +20,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include "irq-msi-lib.h"
+
#include <dt-bindings/interrupt-controller/mvebu-icu.h>
/* ICU registers */
@@ -60,99 +62,13 @@ struct mvebu_icu_msi_data {
const struct mvebu_icu_subset_data *subset_data;
};
-struct mvebu_icu_irq_data {
- struct mvebu_icu *icu;
- unsigned int icu_group;
- unsigned int type;
-};
-
static DEFINE_STATIC_KEY_FALSE(legacy_bindings);
-static void mvebu_icu_init(struct mvebu_icu *icu,
- struct mvebu_icu_msi_data *msi_data,
- struct msi_msg *msg)
-{
- const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
-
- if (atomic_cmpxchg(&msi_data->initialized, false, true))
- return;
-
- /* Set 'SET' ICU SPI message address in AP */
- writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
- writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
-
- if (subset->icu_group != ICU_GRP_NSR)
- return;
-
- /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
- writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
- writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
-}
-
-static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
-{
- struct irq_data *d = irq_get_irq_data(desc->irq);
- struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain);
- struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
- struct mvebu_icu *icu = icu_irqd->icu;
- unsigned int icu_int;
-
- if (msg->address_lo || msg->address_hi) {
- /* One off initialization per domain */
- mvebu_icu_init(icu, msi_data, msg);
- /* Configure the ICU with irq number & type */
- icu_int = msg->data | ICU_INT_ENABLE;
- if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
- icu_int |= ICU_IS_EDGE;
- icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT;
- } else {
- /* De-configure the ICU */
- icu_int = 0;
- }
-
- writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
-
- /*
- * The SATA unit has 2 ports, and a dedicated ICU entry per
- * port. The ahci sata driver supports only one irq interrupt
- * per SATA unit. To solve this conflict, we configure the 2
- * SATA wired interrupts in the south bridge into 1 GIC
- * interrupt in the north bridge. Even if only a single port
- * is enabled, if sata node is enabled, both interrupts are
- * configured (regardless of which port is actually in use).
- */
- if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
- writel_relaxed(icu_int,
- icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
- writel_relaxed(icu_int,
- icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
- }
-}
-
-static struct irq_chip mvebu_icu_nsr_chip = {
- .name = "ICU-NSR",
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_set_type = irq_chip_set_type_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
-};
-
-static struct irq_chip mvebu_icu_sei_chip = {
- .name = "ICU-SEI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_set_type = irq_chip_set_type_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
-};
-
-static int
-mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
- struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d);
+ struct mvebu_icu_msi_data *msi_data = d->host_data;
struct mvebu_icu *icu = msi_data->icu;
/* Check the count of the parameters in dt */
@@ -192,81 +108,126 @@ mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
return 0;
}
-static int
-mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
+static void mvebu_icu_init(struct mvebu_icu *icu,
+ struct mvebu_icu_msi_data *msi_data,
+ struct msi_msg *msg)
{
- int err;
- unsigned long hwirq;
- struct irq_fwspec *fwspec = args;
- struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain);
- struct mvebu_icu *icu = msi_data->icu;
- struct mvebu_icu_irq_data *icu_irqd;
- struct irq_chip *chip = &mvebu_icu_nsr_chip;
+ const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
- icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
- if (!icu_irqd)
- return -ENOMEM;
+ if (atomic_cmpxchg(&msi_data->initialized, false, true))
+ return;
- err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq,
- &icu_irqd->type);
- if (err) {
- dev_err(icu->dev, "failed to translate ICU parameters\n");
- goto free_irqd;
- }
+ /* Set 'SET' ICU SPI message address in AP */
+ writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
+ writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
- if (static_branch_unlikely(&legacy_bindings))
- icu_irqd->icu_group = fwspec->param[0];
- else
- icu_irqd->icu_group = msi_data->subset_data->icu_group;
- icu_irqd->icu = icu;
+ if (subset->icu_group != ICU_GRP_NSR)
+ return;
- err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
- if (err) {
- dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
- goto free_irqd;
- }
+ /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
+ writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
+ writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
+}
- /* Make sure there is no interrupt left pending by the firmware */
- err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
- if (err)
- goto free_msi;
+static int mvebu_icu_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg)
+{
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, info->chip_data);
+ return irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
+}
- if (icu_irqd->icu_group == ICU_GRP_SEI)
- chip = &mvebu_icu_sei_chip;
+static void mvebu_icu_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = (u32)desc->data.icookie.value;
+}
- err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- chip, icu_irqd);
- if (err) {
- dev_err(icu->dev, "failed to set the data to IRQ domain\n");
- goto free_msi;
+static void mvebu_icu_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct mvebu_icu_msi_data *msi_data = d->chip_data;
+ unsigned int icu_group = msi_data->subset_data->icu_group;
+ struct msi_desc *desc = irq_data_get_msi_desc(d);
+ struct mvebu_icu *icu = msi_data->icu;
+ unsigned int type;
+ u32 icu_int;
+
+ if (msg->address_lo || msg->address_hi) {
+ /* One off initialization per domain */
+ mvebu_icu_init(icu, msi_data, msg);
+ /* Configure the ICU with irq number & type */
+ icu_int = msg->data | ICU_INT_ENABLE;
+ type = (unsigned int)(desc->data.icookie.value >> 32);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ icu_int |= ICU_IS_EDGE;
+ icu_int |= icu_group << ICU_GROUP_SHIFT;
+ } else {
+ /* De-configure the ICU */
+ icu_int = 0;
}
- return 0;
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
-free_msi:
- platform_msi_device_domain_free(domain, virq, nr_irqs);
-free_irqd:
- kfree(icu_irqd);
- return err;
+ /*
+ * The SATA unit has 2 ports, and a dedicated ICU entry per
+ * port. The ahci sata driver supports only one irq interrupt
+ * per SATA unit. To solve this conflict, we configure the 2
+ * SATA wired interrupts in the south bridge into 1 GIC
+ * interrupt in the north bridge. Even if only a single port
+ * is enabled, if sata node is enabled, both interrupts are
+ * configured (regardless of which port is actually in use).
+ */
+ if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
+ }
}
-static void
-mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d = irq_get_irq_data(virq);
- struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
+static const struct msi_domain_template mvebu_icu_nsr_msi_template = {
+ .chip = {
+ .name = "ICU-NSR",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_write_msi_msg = mvebu_icu_write_msi_msg,
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+ },
- kfree(icu_irqd);
+ .ops = {
+ .msi_translate = mvebu_icu_translate,
+ .msi_init = mvebu_icu_msi_init,
+ .set_desc = mvebu_icu_set_desc,
+ },
- platform_msi_device_domain_free(domain, virq, nr_irqs);
-}
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_LEVEL_CAPABLE |
+ MSI_FLAG_USE_DEV_FWNODE,
+ },
+};
+
+static const struct msi_domain_template mvebu_icu_sei_msi_template = {
+ .chip = {
+ .name = "ICU-SEI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_write_msi_msg = mvebu_icu_write_msi_msg,
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+ },
-static const struct irq_domain_ops mvebu_icu_domain_ops = {
- .translate = mvebu_icu_irq_domain_translate,
- .alloc = mvebu_icu_irq_domain_alloc,
- .free = mvebu_icu_irq_domain_free,
+ .ops = {
+ .msi_translate = mvebu_icu_translate,
+ .msi_init = mvebu_icu_msi_init,
+ .set_desc = mvebu_icu_set_desc,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_LEVEL_CAPABLE |
+ MSI_FLAG_USE_DEV_FWNODE,
+ },
};
static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
@@ -297,10 +258,10 @@ static const struct of_device_id mvebu_icu_subset_of_match[] = {
static int mvebu_icu_subset_probe(struct platform_device *pdev)
{
+ const struct msi_domain_template *tmpl;
struct mvebu_icu_msi_data *msi_data;
- struct device_node *msi_parent_dn;
struct device *dev = &pdev->dev;
- struct irq_domain *irq_domain;
+ bool sei;
msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL);
if (!msi_data)
@@ -314,20 +275,18 @@ static int mvebu_icu_subset_probe(struct platform_device *pdev)
msi_data->subset_data = of_device_get_match_data(dev);
}
- dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
- DOMAIN_BUS_PLATFORM_MSI);
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_PLATFORM_MSI);
if (!dev->msi.domain)
return -EPROBE_DEFER;
- msi_parent_dn = irq_domain_get_of_node(dev->msi.domain);
- if (!msi_parent_dn)
+ if (!irq_domain_get_of_node(dev->msi.domain))
return -ENODEV;
- irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
- mvebu_icu_write_msg,
- &mvebu_icu_domain_ops,
- msi_data);
- if (!irq_domain) {
+ sei = msi_data->subset_data->icu_group == ICU_GRP_SEI;
+ tmpl = sei ? &mvebu_icu_sei_msi_template : &mvebu_icu_nsr_msi_template;
+
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, tmpl,
+ ICU_MAX_IRQS, NULL, msi_data)) {
dev_err(dev, "Failed to create ICU MSI domain\n");
return -ENOMEM;
}
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index 108091533e10..ff19bfd258dc 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -17,6 +17,9 @@
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/slab.h>
+
+#include "irq-msi-lib.h"
+
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define GICP_ODMIN_SET 0x40
@@ -141,27 +144,29 @@ static void odmi_irq_domain_free(struct irq_domain *domain,
}
static const struct irq_domain_ops odmi_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = odmi_irq_domain_alloc,
.free = odmi_irq_domain_free,
};
-static struct irq_chip odmi_msi_irq_chip = {
- .name = "ODMI",
-};
+#define ODMI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
-static struct msi_domain_ops odmi_msi_ops = {
-};
+#define ODMI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
-static struct msi_domain_info odmi_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .ops = &odmi_msi_ops,
- .chip = &odmi_msi_irq_chip,
+static const struct msi_parent_ops odmi_msi_parent_ops = {
+ .supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
+ .required_flags = ODMI_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_GENERIC_MSI,
+ .bus_select_mask = MATCH_PLATFORM_MSI,
+ .prefix = "ODMI-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int __init mvebu_odmi_init(struct device_node *node,
struct device_node *parent)
{
- struct irq_domain *parent_domain, *inner_domain, *plat_domain;
+ struct irq_domain *parent_domain, *inner_domain;
int ret, i;
if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
@@ -208,18 +213,12 @@ static int __init mvebu_odmi_init(struct device_node *node,
goto err_unmap;
}
- plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
- &odmi_msi_domain_info,
- inner_domain);
- if (!plat_domain) {
- ret = -ENOMEM;
- goto err_remove_inner;
- }
+ irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ inner_domain->msi_parent_ops = &odmi_msi_parent_ops;
return 0;
-err_remove_inner:
- irq_domain_remove(inner_domain);
err_unmap:
for (i = 0; i < odmis_count; i++) {
struct odmi_data *odmi = &odmis[i];
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index d17d9c0e2880..08b0cc862adf 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -193,6 +193,7 @@ module_platform_driver(mvebu_pic_driver);
MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 7K/8K PIC driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:mvebu_pic");
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index a48dbe91b036..f8c70f2d100a 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -14,6 +14,8 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include "irq-msi-lib.h"
+
/* Cause register */
#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
/* Mask register */
@@ -190,6 +192,7 @@ static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops mvebu_sei_domain_ops = {
+ .select = msi_lib_irq_domain_select,
.alloc = mvebu_sei_domain_alloc,
.free = mvebu_sei_domain_free,
};
@@ -307,21 +310,6 @@ static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
.free = mvebu_sei_cp_domain_free,
};
-static struct irq_chip mvebu_sei_msi_irq_chip = {
- .name = "SEI pMSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_set_type = irq_chip_set_type_parent,
-};
-
-static struct msi_domain_ops mvebu_sei_msi_ops = {
-};
-
-static struct msi_domain_info mvebu_sei_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
- .ops = &mvebu_sei_msi_ops,
- .chip = &mvebu_sei_msi_irq_chip,
-};
-
static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
{
struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
@@ -360,10 +348,23 @@ static void mvebu_sei_reset(struct mvebu_sei *sei)
}
}
+#define SEI_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+
+#define SEI_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops sei_msi_parent_ops = {
+ .supported_flags = SEI_MSI_FLAGS_SUPPORTED,
+ .required_flags = SEI_MSI_FLAGS_REQUIRED,
+ .bus_select_mask = MATCH_PLATFORM_MSI,
+ .bus_select_token = DOMAIN_BUS_GENERIC_MSI,
+ .prefix = "SEI-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int mvebu_sei_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct irq_domain *plat_domain;
struct mvebu_sei *sei;
u32 parent_irq;
int ret;
@@ -440,33 +441,20 @@ static int mvebu_sei_probe(struct platform_device *pdev)
}
irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
-
- plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
- &mvebu_sei_msi_domain_info,
- sei->cp_domain);
- if (!plat_domain) {
- pr_err("Failed to create CPs MSI domain\n");
- ret = -ENOMEM;
- goto remove_cp_domain;
- }
+ sei->cp_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ sei->cp_domain->msi_parent_ops = &sei_msi_parent_ops;
mvebu_sei_reset(sei);
- irq_set_chained_handler_and_data(parent_irq,
- mvebu_sei_handle_cascade_irq,
- sei);
-
+ irq_set_chained_handler_and_data(parent_irq, mvebu_sei_handle_cascade_irq, sei);
return 0;
-remove_cp_domain:
- irq_domain_remove(sei->cp_domain);
remove_ap_domain:
irq_domain_remove(sei->ap_domain);
remove_sei_domain:
irq_domain_remove(sei->sei_domain);
dispose_irq:
irq_dispose_mapping(parent_irq);
-
return ret;
}
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index f6484bf15e0b..693ff285ca2c 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -37,6 +37,8 @@
#define TSSEL_SHIFT(n) (8 * (n))
#define TSSEL_MASK GENMASK(7, 0)
#define IRQ_MASK 0x3
+#define IMSK 0x10010
+#define TMSK 0x10020
#define TSSR_OFFSET(n) ((n) % 4)
#define TSSR_INDEX(n) ((n) / 4)
@@ -69,12 +71,14 @@ struct rzg2l_irqc_reg_cache {
/**
* struct rzg2l_irqc_priv - IRQ controller private data structure
* @base: Controller's base address
+ * @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
* @cache: Registers cache for suspend/resume
*/
static struct rzg2l_irqc_priv {
void __iomem *base;
+ const struct irq_chip *irqchip;
struct irq_fwspec fwspec[IRQC_NUM_IRQ];
raw_spinlock_t lock;
struct rzg2l_irqc_reg_cache cache;
@@ -138,6 +142,111 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
irq_chip_eoi_parent(d);
}
+static void rzfive_irqc_mask_irq_interrupt(struct rzg2l_irqc_priv *priv,
+ unsigned int hwirq)
+{
+ u32 bit = BIT(hwirq - IRQC_IRQ_START);
+
+ writel_relaxed(readl_relaxed(priv->base + IMSK) | bit, priv->base + IMSK);
+}
+
+static void rzfive_irqc_unmask_irq_interrupt(struct rzg2l_irqc_priv *priv,
+ unsigned int hwirq)
+{
+ u32 bit = BIT(hwirq - IRQC_IRQ_START);
+
+ writel_relaxed(readl_relaxed(priv->base + IMSK) & ~bit, priv->base + IMSK);
+}
+
+static void rzfive_irqc_mask_tint_interrupt(struct rzg2l_irqc_priv *priv,
+ unsigned int hwirq)
+{
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
+
+ writel_relaxed(readl_relaxed(priv->base + TMSK) | bit, priv->base + TMSK);
+}
+
+static void rzfive_irqc_unmask_tint_interrupt(struct rzg2l_irqc_priv *priv,
+ unsigned int hwirq)
+{
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
+
+ writel_relaxed(readl_relaxed(priv->base + TMSK) & ~bit, priv->base + TMSK);
+}
+
+static void rzfive_irqc_mask(struct irq_data *d)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ raw_spin_lock(&priv->lock);
+ if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
+ rzfive_irqc_mask_irq_interrupt(priv, hwirq);
+ else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
+ rzfive_irqc_mask_tint_interrupt(priv, hwirq);
+ raw_spin_unlock(&priv->lock);
+ irq_chip_mask_parent(d);
+}
+
+static void rzfive_irqc_unmask(struct irq_data *d)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ raw_spin_lock(&priv->lock);
+ if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
+ rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
+ else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
+ rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
+ raw_spin_unlock(&priv->lock);
+ irq_chip_unmask_parent(d);
+}
+
+static void rzfive_tint_irq_endisable(struct irq_data *d, bool enable)
+{
+ struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+
+ if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ) {
+ u32 offset = hwirq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(offset);
+ u8 tssr_index = TSSR_INDEX(offset);
+ u32 reg;
+
+ raw_spin_lock(&priv->lock);
+ if (enable)
+ rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
+ else
+ rzfive_irqc_mask_tint_interrupt(priv, hwirq);
+ reg = readl_relaxed(priv->base + TSSR(tssr_index));
+ if (enable)
+ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
+ else
+ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+ raw_spin_unlock(&priv->lock);
+ } else {
+ raw_spin_lock(&priv->lock);
+ if (enable)
+ rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
+ else
+ rzfive_irqc_mask_irq_interrupt(priv, hwirq);
+ raw_spin_unlock(&priv->lock);
+ }
+}
+
+static void rzfive_irqc_irq_disable(struct irq_data *d)
+{
+ irq_chip_disable_parent(d);
+ rzfive_tint_irq_endisable(d, false);
+}
+
+static void rzfive_irqc_irq_enable(struct irq_data *d)
+{
+ rzfive_tint_irq_endisable(d, true);
+ irq_chip_enable_parent(d);
+}
+
static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
{
unsigned int hw_irq = irqd_to_hwirq(d);
@@ -162,8 +271,8 @@ static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
static void rzg2l_irqc_irq_disable(struct irq_data *d)
{
- rzg2l_tint_irq_endisable(d, false);
irq_chip_disable_parent(d);
+ rzg2l_tint_irq_endisable(d, false);
}
static void rzg2l_irqc_irq_enable(struct irq_data *d)
@@ -321,7 +430,7 @@ static struct syscore_ops rzg2l_irqc_syscore_ops = {
.resume = rzg2l_irqc_irq_resume,
};
-static const struct irq_chip irqc_chip = {
+static const struct irq_chip rzg2l_irqc_chip = {
.name = "rzg2l-irqc",
.irq_eoi = rzg2l_irqc_eoi,
.irq_mask = irq_chip_mask_parent,
@@ -338,6 +447,23 @@ static const struct irq_chip irqc_chip = {
IRQCHIP_SKIP_SET_WAKE,
};
+static const struct irq_chip rzfive_irqc_chip = {
+ .name = "rzfive-irqc",
+ .irq_eoi = rzg2l_irqc_eoi,
+ .irq_mask = rzfive_irqc_mask,
+ .irq_unmask = rzfive_irqc_unmask,
+ .irq_disable = rzfive_irqc_irq_disable,
+ .irq_enable = rzfive_irqc_irq_enable,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = rzg2l_irqc_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE,
+};
+
static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -369,7 +495,7 @@ static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
if (hwirq > (IRQC_NUM_IRQ - 1))
return -EINVAL;
- ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip,
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
@@ -401,7 +527,8 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
return 0;
}
-static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
+static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
+ const struct irq_chip *irq_chip)
{
struct irq_domain *irq_domain, *parent_domain;
struct platform_device *pdev;
@@ -422,6 +549,8 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
if (!rzg2l_irqc_data)
return -ENOMEM;
+ rzg2l_irqc_data->irqchip = irq_chip;
+
rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
if (IS_ERR(rzg2l_irqc_data->base))
return PTR_ERR(rzg2l_irqc_data->base);
@@ -472,8 +601,21 @@ pm_disable:
return ret;
}
+static int __init rzg2l_irqc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return rzg2l_irqc_common_init(node, parent, &rzg2l_irqc_chip);
+}
+
+static int __init rzfive_irqc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return rzg2l_irqc_common_init(node, parent, &rzfive_irqc_chip);
+}
+
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
+IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_init)
IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
index 774a0c97fdab..28dd175b5764 100644
--- a/drivers/irqchip/irq-riscv-aplic-main.c
+++ b/drivers/irqchip/irq-riscv-aplic-main.c
@@ -127,6 +127,7 @@ static void aplic_init_hw_irqs(struct aplic_priv *priv)
int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs)
{
+ struct device_node *np = to_of_node(dev->fwnode);
struct of_phandle_args parent;
int rc;
@@ -134,7 +135,7 @@ int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *
* Currently, only OF fwnode is supported so extend this
* function for ACPI support.
*/
- if (!is_of_node(dev->fwnode))
+ if (!np)
return -EINVAL;
/* Save device pointer and register base */
@@ -142,8 +143,7 @@ int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *
priv->regs = regs;
/* Find out number of interrupt sources */
- rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,num-sources",
- &priv->nr_irqs);
+ rc = of_property_read_u32(np, "riscv,num-sources", &priv->nr_irqs);
if (rc) {
dev_err(dev, "failed to get number of interrupt sources\n");
return rc;
@@ -155,8 +155,8 @@ int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *
* If "msi-parent" property is present then we ignore the
* APLIC IDCs which forces the APLIC driver to use MSI mode.
*/
- if (!of_property_present(to_of_node(dev->fwnode), "msi-parent")) {
- while (!of_irq_parse_one(to_of_node(dev->fwnode), priv->nr_idcs, &parent))
+ if (!of_property_present(np, "msi-parent")) {
+ while (!of_irq_parse_one(np, priv->nr_idcs, &parent))
priv->nr_idcs++;
}
@@ -184,8 +184,7 @@ static int aplic_probe(struct platform_device *pdev)
* If msi-parent property is present then setup APLIC MSI
* mode otherwise setup APLIC direct mode.
*/
- if (is_of_node(dev->fwnode))
- msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
+ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
if (msi_mode)
rc = aplic_msi_setup(dev, regs);
else
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 4f3a12383a1e..47f3200476da 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -26,7 +26,7 @@ static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
-static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+static void riscv_intc_irq(struct pt_regs *regs)
{
unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
@@ -34,7 +34,7 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
}
-static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs)
+static void riscv_intc_aia_irq(struct pt_regs *regs)
{
unsigned long topi;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 2cc9f3b7d669..7c6a0080c330 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -1,45 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Maxime Coquelin 2015
- * Copyright (C) STMicroelectronics 2017
+ * Copyright (C) STMicroelectronics 2017-2024
* Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
*/
#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/hwspinlock.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-
-#include <dt-bindings/interrupt-controller/arm-gic.h>
#define IRQS_PER_BANK 32
-#define HWSPNLCK_TIMEOUT 1000 /* usec */
-
-#define EXTI_EnCIDCFGR(n) (0x180 + (n) * 4)
-#define EXTI_HWCFGR1 0x3f0
-
-/* Register: EXTI_EnCIDCFGR(n) */
-#define EXTI_CIDCFGR_CFEN_MASK BIT(0)
-#define EXTI_CIDCFGR_CID_MASK GENMASK(6, 4)
-#define EXTI_CIDCFGR_CID_SHIFT 4
-
-/* Register: EXTI_HWCFGR1 */
-#define EXTI_HWCFGR1_CIDWIDTH_MASK GENMASK(27, 24)
-
-#define EXTI_CID1 1
-
struct stm32_exti_bank {
u32 imr_ofst;
u32 emr_ofst;
@@ -47,13 +24,8 @@ struct stm32_exti_bank {
u32 ftsr_ofst;
u32 swier_ofst;
u32 rpr_ofst;
- u32 fpr_ofst;
- u32 trg_ofst;
- u32 seccfgr_ofst;
};
-#define UNDEF_REG ~0
-
struct stm32_exti_drv_data {
const struct stm32_exti_bank **exti_banks;
const u8 *desc_irqs;
@@ -63,7 +35,6 @@ struct stm32_exti_drv_data {
struct stm32_exti_chip_data {
struct stm32_exti_host_data *host_data;
const struct stm32_exti_bank *reg_bank;
- struct raw_spinlock rlock;
u32 wake_active;
u32 mask_cache;
u32 rtsr_cache;
@@ -76,8 +47,6 @@ struct stm32_exti_host_data {
struct device *dev;
struct stm32_exti_chip_data *chips_data;
const struct stm32_exti_drv_data *drv_data;
- struct hwspinlock *hwlock;
- bool dt_has_irqs_desc; /* skip internal desc_irqs array and get it from DT */
};
static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
@@ -87,9 +56,6 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
.ftsr_ofst = 0x0C,
.swier_ofst = 0x10,
.rpr_ofst = 0x14,
- .fpr_ofst = UNDEF_REG,
- .trg_ofst = UNDEF_REG,
- .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
@@ -108,9 +74,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
.ftsr_ofst = 0x04,
.swier_ofst = 0x08,
.rpr_ofst = 0x88,
- .fpr_ofst = UNDEF_REG,
- .trg_ofst = UNDEF_REG,
- .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
@@ -120,9 +83,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
.ftsr_ofst = 0x24,
.swier_ofst = 0x28,
.rpr_ofst = 0x98,
- .fpr_ofst = UNDEF_REG,
- .trg_ofst = UNDEF_REG,
- .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
@@ -132,9 +92,6 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
.ftsr_ofst = 0x44,
.swier_ofst = 0x48,
.rpr_ofst = 0xA8,
- .fpr_ofst = UNDEF_REG,
- .trg_ofst = UNDEF_REG,
- .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
@@ -148,183 +105,12 @@ static const struct stm32_exti_drv_data stm32h7xx_drv_data = {
.bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks),
};
-static const struct stm32_exti_bank stm32mp1_exti_b1 = {
- .imr_ofst = 0x80,
- .emr_ofst = UNDEF_REG,
- .rtsr_ofst = 0x00,
- .ftsr_ofst = 0x04,
- .swier_ofst = 0x08,
- .rpr_ofst = 0x0C,
- .fpr_ofst = 0x10,
- .trg_ofst = 0x3EC,
- .seccfgr_ofst = 0x14,
-};
-
-static const struct stm32_exti_bank stm32mp1_exti_b2 = {
- .imr_ofst = 0x90,
- .emr_ofst = UNDEF_REG,
- .rtsr_ofst = 0x20,
- .ftsr_ofst = 0x24,
- .swier_ofst = 0x28,
- .rpr_ofst = 0x2C,
- .fpr_ofst = 0x30,
- .trg_ofst = 0x3E8,
- .seccfgr_ofst = 0x34,
-};
-
-static const struct stm32_exti_bank stm32mp1_exti_b3 = {
- .imr_ofst = 0xA0,
- .emr_ofst = UNDEF_REG,
- .rtsr_ofst = 0x40,
- .ftsr_ofst = 0x44,
- .swier_ofst = 0x48,
- .rpr_ofst = 0x4C,
- .fpr_ofst = 0x50,
- .trg_ofst = 0x3E4,
- .seccfgr_ofst = 0x54,
-};
-
-static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
- &stm32mp1_exti_b1,
- &stm32mp1_exti_b2,
- &stm32mp1_exti_b3,
-};
-
-static struct irq_chip stm32_exti_h_chip;
-static struct irq_chip stm32_exti_h_chip_direct;
-
-#define EXTI_INVALID_IRQ U8_MAX
-#define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK)
-
-/*
- * Use some intentionally tricky logic here to initialize the whole array to
- * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
- * that we "know" that there are overrides in this structure, and we'll need to
- * disable that warning from W=1 builds.
- */
-__diag_push();
-__diag_ignore_all("-Woverride-init",
- "logic to initialize all and then override some is OK");
-
-static const u8 stm32mp1_desc_irq[] = {
- /* default value */
- [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
-
- [0] = 6,
- [1] = 7,
- [2] = 8,
- [3] = 9,
- [4] = 10,
- [5] = 23,
- [6] = 64,
- [7] = 65,
- [8] = 66,
- [9] = 67,
- [10] = 40,
- [11] = 42,
- [12] = 76,
- [13] = 77,
- [14] = 121,
- [15] = 127,
- [16] = 1,
- [19] = 3,
- [21] = 31,
- [22] = 33,
- [23] = 72,
- [24] = 95,
- [25] = 107,
- [26] = 37,
- [27] = 38,
- [28] = 39,
- [29] = 71,
- [30] = 52,
- [31] = 53,
- [32] = 82,
- [33] = 83,
- [46] = 151,
- [47] = 93,
- [48] = 138,
- [50] = 139,
- [52] = 140,
- [53] = 141,
- [54] = 135,
- [61] = 100,
- [65] = 144,
- [68] = 143,
- [70] = 62,
- [73] = 129,
-};
-
-static const u8 stm32mp13_desc_irq[] = {
- /* default value */
- [0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
-
- [0] = 6,
- [1] = 7,
- [2] = 8,
- [3] = 9,
- [4] = 10,
- [5] = 24,
- [6] = 65,
- [7] = 66,
- [8] = 67,
- [9] = 68,
- [10] = 41,
- [11] = 43,
- [12] = 77,
- [13] = 78,
- [14] = 106,
- [15] = 109,
- [16] = 1,
- [19] = 3,
- [21] = 32,
- [22] = 34,
- [23] = 73,
- [24] = 93,
- [25] = 114,
- [26] = 38,
- [27] = 39,
- [28] = 40,
- [29] = 72,
- [30] = 53,
- [31] = 54,
- [32] = 83,
- [33] = 84,
- [44] = 96,
- [47] = 92,
- [48] = 116,
- [50] = 117,
- [52] = 118,
- [53] = 119,
- [68] = 63,
- [70] = 98,
-};
-
-__diag_pop();
-
-static const struct stm32_exti_drv_data stm32mp1_drv_data = {
- .exti_banks = stm32mp1_exti_banks,
- .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
- .desc_irqs = stm32mp1_desc_irq,
-};
-
-static const struct stm32_exti_drv_data stm32mp13_drv_data = {
- .exti_banks = stm32mp1_exti_banks,
- .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
- .desc_irqs = stm32mp13_desc_irq,
-};
-
static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- unsigned long pending;
- pending = irq_reg_readl(gc, stm32_bank->rpr_ofst);
- if (stm32_bank->fpr_ofst != UNDEF_REG)
- pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst);
-
- return pending;
+ return irq_reg_readl(gc, stm32_bank->rpr_ofst);
}
static void stm32_irq_handler(struct irq_desc *desc)
@@ -380,33 +166,21 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct stm32_exti_chip_data *chip_data = gc->private;
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- struct hwspinlock *hwlock = chip_data->host_data->hwlock;
u32 rtsr, ftsr;
int err;
irq_gc_lock(gc);
- if (hwlock) {
- err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
- if (err) {
- pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
- goto unlock;
- }
- }
-
rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
if (err)
- goto unspinlock;
+ goto unlock;
irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
-unspinlock:
- if (hwlock)
- hwspin_unlock_in_atomic(hwlock);
unlock:
irq_gc_unlock(gc);
@@ -494,287 +268,10 @@ static void stm32_irq_ack(struct irq_data *d)
irq_gc_lock(gc);
irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
- if (stm32_bank->fpr_ofst != UNDEF_REG)
- irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst);
irq_gc_unlock(gc);
}
-/* directly set the target bit without reading first. */
-static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- void __iomem *base = chip_data->host_data->base;
- u32 val = BIT(d->hwirq % IRQS_PER_BANK);
-
- writel_relaxed(val, base + reg);
-}
-
-static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- void __iomem *base = chip_data->host_data->base;
- u32 val;
-
- val = readl_relaxed(base + reg);
- val |= BIT(d->hwirq % IRQS_PER_BANK);
- writel_relaxed(val, base + reg);
-
- return val;
-}
-
-static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- void __iomem *base = chip_data->host_data->base;
- u32 val;
-
- val = readl_relaxed(base + reg);
- val &= ~BIT(d->hwirq % IRQS_PER_BANK);
- writel_relaxed(val, base + reg);
-
- return val;
-}
-
-static void stm32_exti_h_eoi(struct irq_data *d)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
-
- raw_spin_lock(&chip_data->rlock);
-
- stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
- if (stm32_bank->fpr_ofst != UNDEF_REG)
- stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
-
- raw_spin_unlock(&chip_data->rlock);
-
- if (d->parent_data->chip)
- irq_chip_eoi_parent(d);
-}
-
-static void stm32_exti_h_mask(struct irq_data *d)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
-
- raw_spin_lock(&chip_data->rlock);
- chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst);
- raw_spin_unlock(&chip_data->rlock);
-
- if (d->parent_data->chip)
- irq_chip_mask_parent(d);
-}
-
-static void stm32_exti_h_unmask(struct irq_data *d)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
-
- raw_spin_lock(&chip_data->rlock);
- chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst);
- raw_spin_unlock(&chip_data->rlock);
-
- if (d->parent_data->chip)
- irq_chip_unmask_parent(d);
-}
-
-static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- struct hwspinlock *hwlock = chip_data->host_data->hwlock;
- void __iomem *base = chip_data->host_data->base;
- u32 rtsr, ftsr;
- int err;
-
- raw_spin_lock(&chip_data->rlock);
-
- if (hwlock) {
- err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
- if (err) {
- pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
- goto unlock;
- }
- }
-
- rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
- ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
-
- err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
- if (err)
- goto unspinlock;
-
- writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst);
- writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
-
-unspinlock:
- if (hwlock)
- hwspin_unlock_in_atomic(hwlock);
-unlock:
- raw_spin_unlock(&chip_data->rlock);
-
- return err;
-}
-
-static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
-
- raw_spin_lock(&chip_data->rlock);
-
- if (on)
- chip_data->wake_active |= mask;
- else
- chip_data->wake_active &= ~mask;
-
- raw_spin_unlock(&chip_data->rlock);
-
- return 0;
-}
-
-static int stm32_exti_h_set_affinity(struct irq_data *d,
- const struct cpumask *dest, bool force)
-{
- if (d->parent_data->chip)
- return irq_chip_set_affinity_parent(d, dest, force);
-
- return IRQ_SET_MASK_OK_DONE;
-}
-
-static int stm32_exti_h_suspend(struct device *dev)
-{
- struct stm32_exti_host_data *host_data = dev_get_drvdata(dev);
- struct stm32_exti_chip_data *chip_data;
- int i;
-
- for (i = 0; i < host_data->drv_data->bank_nr; i++) {
- chip_data = &host_data->chips_data[i];
- stm32_chip_suspend(chip_data, chip_data->wake_active);
- }
-
- return 0;
-}
-
-static int stm32_exti_h_resume(struct device *dev)
-{
- struct stm32_exti_host_data *host_data = dev_get_drvdata(dev);
- struct stm32_exti_chip_data *chip_data;
- int i;
-
- for (i = 0; i < host_data->drv_data->bank_nr; i++) {
- chip_data = &host_data->chips_data[i];
- stm32_chip_resume(chip_data, chip_data->mask_cache);
- }
-
- return 0;
-}
-
-static int stm32_exti_h_retrigger(struct irq_data *d)
-{
- struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
- const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- void __iomem *base = chip_data->host_data->base;
- u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
-
- writel_relaxed(mask, base + stm32_bank->swier_ofst);
-
- return 0;
-}
-
-static struct irq_chip stm32_exti_h_chip = {
- .name = "stm32-exti-h",
- .irq_eoi = stm32_exti_h_eoi,
- .irq_mask = stm32_exti_h_mask,
- .irq_unmask = stm32_exti_h_unmask,
- .irq_retrigger = stm32_exti_h_retrigger,
- .irq_set_type = stm32_exti_h_set_type,
- .irq_set_wake = stm32_exti_h_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
- .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
-};
-
-static struct irq_chip stm32_exti_h_chip_direct = {
- .name = "stm32-exti-h-direct",
- .irq_eoi = irq_chip_eoi_parent,
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = stm32_exti_h_mask,
- .irq_unmask = stm32_exti_h_unmask,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_type = irq_chip_set_type_parent,
- .irq_set_wake = stm32_exti_h_set_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
- .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL,
-};
-
-static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
- unsigned int virq,
- unsigned int nr_irqs, void *data)
-{
- struct stm32_exti_host_data *host_data = dm->host_data;
- struct stm32_exti_chip_data *chip_data;
- u8 desc_irq;
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec p_fwspec;
- irq_hw_number_t hwirq;
- int bank;
- u32 event_trg;
- struct irq_chip *chip;
-
- hwirq = fwspec->param[0];
- if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK)
- return -EINVAL;
-
- bank = hwirq / IRQS_PER_BANK;
- chip_data = &host_data->chips_data[bank];
-
- /* Check if event is reserved (Secure) */
- if (chip_data->event_reserved & BIT(hwirq % IRQS_PER_BANK)) {
- dev_err(host_data->dev, "event %lu is reserved, secure\n", hwirq);
- return -EPERM;
- }
-
- event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst);
- chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ?
- &stm32_exti_h_chip : &stm32_exti_h_chip_direct;
-
- irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
-
- if (host_data->dt_has_irqs_desc) {
- struct of_phandle_args out_irq;
- int ret;
-
- ret = of_irq_parse_one(host_data->dev->of_node, hwirq, &out_irq);
- if (ret)
- return ret;
- /* we only support one parent, so far */
- if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode)
- return -EINVAL;
-
- of_phandle_args_to_fwspec(out_irq.np, out_irq.args,
- out_irq.args_count, &p_fwspec);
-
- return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
- }
-
- if (!host_data->drv_data->desc_irqs)
- return -EINVAL;
-
- desc_irq = host_data->drv_data->desc_irqs[hwirq];
- if (desc_irq != EXTI_INVALID_IRQ) {
- p_fwspec.fwnode = dm->parent->fwnode;
- p_fwspec.param_count = 3;
- p_fwspec.param[0] = GIC_SPI;
- p_fwspec.param[1] = desc_irq;
- p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
-
- return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
- }
-
- return 0;
-}
-
static struct
stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
struct device_node *node)
@@ -822,19 +319,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
chip_data->host_data = h_data;
chip_data->reg_bank = stm32_bank;
- raw_spin_lock_init(&chip_data->rlock);
-
/*
* This IP has no reset, so after hot reboot we should
* clear registers to avoid residue
*/
writel_relaxed(0, base + stm32_bank->imr_ofst);
- if (stm32_bank->emr_ofst != UNDEF_REG)
- writel_relaxed(0, base + stm32_bank->emr_ofst);
-
- /* reserve Secure events */
- if (stm32_bank->seccfgr_ofst != UNDEF_REG)
- chip_data->event_reserved = readl_relaxed(base + stm32_bank->seccfgr_ofst);
+ writel_relaxed(0, base + stm32_bank->emr_ofst);
pr_info("%pOF: bank%d\n", node, bank_idx);
@@ -914,158 +404,6 @@ out_unmap:
return ret;
}
-static const struct irq_domain_ops stm32_exti_h_domain_ops = {
- .alloc = stm32_exti_h_domain_alloc,
- .free = irq_domain_free_irqs_common,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static void stm32_exti_check_rif(struct stm32_exti_host_data *host_data)
-{
- unsigned int bank, i, event;
- u32 cid, cidcfgr, hwcfgr1;
-
- /* quit on CID not supported */
- hwcfgr1 = readl_relaxed(host_data->base + EXTI_HWCFGR1);
- if ((hwcfgr1 & EXTI_HWCFGR1_CIDWIDTH_MASK) == 0)
- return;
-
- for (bank = 0; bank < host_data->drv_data->bank_nr; bank++) {
- for (i = 0; i < IRQS_PER_BANK; i++) {
- event = bank * IRQS_PER_BANK + i;
- cidcfgr = readl_relaxed(host_data->base + EXTI_EnCIDCFGR(event));
- cid = (cidcfgr & EXTI_CIDCFGR_CID_MASK) >> EXTI_CIDCFGR_CID_SHIFT;
- if ((cidcfgr & EXTI_CIDCFGR_CFEN_MASK) && cid != EXTI_CID1)
- host_data->chips_data[bank].event_reserved |= BIT(i);
- }
- }
-}
-
-static void stm32_exti_remove_irq(void *data)
-{
- struct irq_domain *domain = data;
-
- irq_domain_remove(domain);
-}
-
-static int stm32_exti_probe(struct platform_device *pdev)
-{
- int ret, i;
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct irq_domain *parent_domain, *domain;
- struct stm32_exti_host_data *host_data;
- const struct stm32_exti_drv_data *drv_data;
-
- host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
- if (!host_data)
- return -ENOMEM;
-
- dev_set_drvdata(dev, host_data);
- host_data->dev = dev;
-
- /* check for optional hwspinlock which may be not available yet */
- ret = of_hwspin_lock_get_id(np, 0);
- if (ret == -EPROBE_DEFER)
- /* hwspinlock framework not yet ready */
- return ret;
-
- if (ret >= 0) {
- host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
- if (!host_data->hwlock) {
- dev_err(dev, "Failed to request hwspinlock\n");
- return -EINVAL;
- }
- } else if (ret != -ENOENT) {
- /* note: ENOENT is a valid case (means 'no hwspinlock') */
- dev_err(dev, "Failed to get hwspinlock\n");
- return ret;
- }
-
- /* initialize host_data */
- drv_data = of_device_get_match_data(dev);
- if (!drv_data) {
- dev_err(dev, "no of match data\n");
- return -ENODEV;
- }
- host_data->drv_data = drv_data;
-
- host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr,
- sizeof(*host_data->chips_data),
- GFP_KERNEL);
- if (!host_data->chips_data)
- return -ENOMEM;
-
- host_data->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(host_data->base))
- return PTR_ERR(host_data->base);
-
- for (i = 0; i < drv_data->bank_nr; i++)
- stm32_exti_chip_init(host_data, i, np);
-
- stm32_exti_check_rif(host_data);
-
- parent_domain = irq_find_host(of_irq_find_parent(np));
- if (!parent_domain) {
- dev_err(dev, "GIC interrupt-parent not found\n");
- return -EINVAL;
- }
-
- domain = irq_domain_add_hierarchy(parent_domain, 0,
- drv_data->bank_nr * IRQS_PER_BANK,
- np, &stm32_exti_h_domain_ops,
- host_data);
-
- if (!domain) {
- dev_err(dev, "Could not register exti domain\n");
- return -ENOMEM;
- }
-
- ret = devm_add_action_or_reset(dev, stm32_exti_remove_irq, domain);
- if (ret)
- return ret;
-
- if (of_property_read_bool(np, "interrupts-extended"))
- host_data->dt_has_irqs_desc = true;
-
- return 0;
-}
-
-/* platform driver only for MP1 */
-static const struct of_device_id stm32_exti_ids[] = {
- { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
- { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data},
- {},
-};
-MODULE_DEVICE_TABLE(of, stm32_exti_ids);
-
-static const struct dev_pm_ops stm32_exti_dev_pm_ops = {
- NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_exti_h_suspend, stm32_exti_h_resume)
-};
-
-static struct platform_driver stm32_exti_driver = {
- .probe = stm32_exti_probe,
- .driver = {
- .name = "stm32_exti",
- .of_match_table = stm32_exti_ids,
- .pm = &stm32_exti_dev_pm_ops,
- },
-};
-
-static int __init stm32_exti_arch_init(void)
-{
- return platform_driver_register(&stm32_exti_driver);
-}
-
-static void __exit stm32_exti_arch_exit(void)
-{
- return platform_driver_unregister(&stm32_exti_driver);
-}
-
-arch_initcall(stm32_exti_arch_init);
-module_exit(stm32_exti_arch_exit);
-
-/* no platform driver for F4 and H7 */
static int __init stm32f4_exti_of_init(struct device_node *np,
struct device_node *parent)
{
diff --git a/drivers/irqchip/irq-stm32mp-exti.c b/drivers/irqchip/irq-stm32mp-exti.c
new file mode 100644
index 000000000000..33e0cfdea654
--- /dev/null
+++ b/drivers/irqchip/irq-stm32mp-exti.c
@@ -0,0 +1,729 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Copyright (C) STMicroelectronics 2017-2024
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwspinlock.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define IRQS_PER_BANK 32
+
+#define HWSPNLCK_TIMEOUT 1000 /* usec */
+
+#define EXTI_EnCIDCFGR(n) (0x180 + (n) * 4)
+#define EXTI_HWCFGR1 0x3f0
+
+/* Register: EXTI_EnCIDCFGR(n) */
+#define EXTI_CIDCFGR_CFEN_MASK BIT(0)
+#define EXTI_CIDCFGR_CID_MASK GENMASK(6, 4)
+#define EXTI_CIDCFGR_CID_SHIFT 4
+
+/* Register: EXTI_HWCFGR1 */
+#define EXTI_HWCFGR1_CIDWIDTH_MASK GENMASK(27, 24)
+
+#define EXTI_CID1 1
+
+struct stm32mp_exti_bank {
+ u32 imr_ofst;
+ u32 rtsr_ofst;
+ u32 ftsr_ofst;
+ u32 swier_ofst;
+ u32 rpr_ofst;
+ u32 fpr_ofst;
+ u32 trg_ofst;
+ u32 seccfgr_ofst;
+};
+
+struct stm32mp_exti_drv_data {
+ const struct stm32mp_exti_bank **exti_banks;
+ const u8 *desc_irqs;
+ u32 bank_nr;
+};
+
+struct stm32mp_exti_chip_data {
+ struct stm32mp_exti_host_data *host_data;
+ const struct stm32mp_exti_bank *reg_bank;
+ struct raw_spinlock rlock;
+ u32 wake_active;
+ u32 mask_cache;
+ u32 rtsr_cache;
+ u32 ftsr_cache;
+ u32 event_reserved;
+};
+
+struct stm32mp_exti_host_data {
+ void __iomem *base;
+ struct device *dev;
+ struct stm32mp_exti_chip_data *chips_data;
+ const struct stm32mp_exti_drv_data *drv_data;
+ struct hwspinlock *hwlock;
+ /* skip internal desc_irqs array and get it from DT */
+ bool dt_has_irqs_desc;
+};
+
+static const struct stm32mp_exti_bank stm32mp_exti_b1 = {
+ .imr_ofst = 0x80,
+ .rtsr_ofst = 0x00,
+ .ftsr_ofst = 0x04,
+ .swier_ofst = 0x08,
+ .rpr_ofst = 0x0C,
+ .fpr_ofst = 0x10,
+ .trg_ofst = 0x3EC,
+ .seccfgr_ofst = 0x14,
+};
+
+static const struct stm32mp_exti_bank stm32mp_exti_b2 = {
+ .imr_ofst = 0x90,
+ .rtsr_ofst = 0x20,
+ .ftsr_ofst = 0x24,
+ .swier_ofst = 0x28,
+ .rpr_ofst = 0x2C,
+ .fpr_ofst = 0x30,
+ .trg_ofst = 0x3E8,
+ .seccfgr_ofst = 0x34,
+};
+
+static const struct stm32mp_exti_bank stm32mp_exti_b3 = {
+ .imr_ofst = 0xA0,
+ .rtsr_ofst = 0x40,
+ .ftsr_ofst = 0x44,
+ .swier_ofst = 0x48,
+ .rpr_ofst = 0x4C,
+ .fpr_ofst = 0x50,
+ .trg_ofst = 0x3E4,
+ .seccfgr_ofst = 0x54,
+};
+
+static const struct stm32mp_exti_bank *stm32mp_exti_banks[] = {
+ &stm32mp_exti_b1,
+ &stm32mp_exti_b2,
+ &stm32mp_exti_b3,
+};
+
+static struct irq_chip stm32mp_exti_chip;
+static struct irq_chip stm32mp_exti_chip_direct;
+
+#define EXTI_INVALID_IRQ U8_MAX
+#define STM32MP_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp_exti_banks) * IRQS_PER_BANK)
+
+/*
+ * Use some intentionally tricky logic here to initialize the whole array to
+ * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
+ * that we "know" that there are overrides in this structure, and we'll need to
+ * disable that warning from W=1 builds.
+ */
+__diag_push();
+__diag_ignore_all("-Woverride-init",
+ "logic to initialize all and then override some is OK");
+
+static const u8 stm32mp1_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+
+ [0] = 6,
+ [1] = 7,
+ [2] = 8,
+ [3] = 9,
+ [4] = 10,
+ [5] = 23,
+ [6] = 64,
+ [7] = 65,
+ [8] = 66,
+ [9] = 67,
+ [10] = 40,
+ [11] = 42,
+ [12] = 76,
+ [13] = 77,
+ [14] = 121,
+ [15] = 127,
+ [16] = 1,
+ [19] = 3,
+ [21] = 31,
+ [22] = 33,
+ [23] = 72,
+ [24] = 95,
+ [25] = 107,
+ [26] = 37,
+ [27] = 38,
+ [28] = 39,
+ [29] = 71,
+ [30] = 52,
+ [31] = 53,
+ [32] = 82,
+ [33] = 83,
+ [46] = 151,
+ [47] = 93,
+ [48] = 138,
+ [50] = 139,
+ [52] = 140,
+ [53] = 141,
+ [54] = 135,
+ [61] = 100,
+ [65] = 144,
+ [68] = 143,
+ [70] = 62,
+ [73] = 129,
+};
+
+static const u8 stm32mp13_desc_irq[] = {
+ /* default value */
+ [0 ... (STM32MP_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
+
+ [0] = 6,
+ [1] = 7,
+ [2] = 8,
+ [3] = 9,
+ [4] = 10,
+ [5] = 24,
+ [6] = 65,
+ [7] = 66,
+ [8] = 67,
+ [9] = 68,
+ [10] = 41,
+ [11] = 43,
+ [12] = 77,
+ [13] = 78,
+ [14] = 106,
+ [15] = 109,
+ [16] = 1,
+ [19] = 3,
+ [21] = 32,
+ [22] = 34,
+ [23] = 73,
+ [24] = 93,
+ [25] = 114,
+ [26] = 38,
+ [27] = 39,
+ [28] = 40,
+ [29] = 72,
+ [30] = 53,
+ [31] = 54,
+ [32] = 83,
+ [33] = 84,
+ [44] = 96,
+ [47] = 92,
+ [48] = 116,
+ [50] = 117,
+ [52] = 118,
+ [53] = 119,
+ [68] = 63,
+ [70] = 98,
+};
+
+__diag_pop();
+
+static const struct stm32mp_exti_drv_data stm32mp1_drv_data = {
+ .exti_banks = stm32mp_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp_exti_banks),
+ .desc_irqs = stm32mp1_desc_irq,
+};
+
+static const struct stm32mp_exti_drv_data stm32mp13_drv_data = {
+ .exti_banks = stm32mp_exti_banks,
+ .bank_nr = ARRAY_SIZE(stm32mp_exti_banks),
+ .desc_irqs = stm32mp13_desc_irq,
+};
+
+static int stm32mp_exti_convert_type(struct irq_data *d, unsigned int type, u32 *rtsr, u32 *ftsr)
+{
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ *rtsr |= mask;
+ *ftsr &= ~mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ *rtsr &= ~mask;
+ *ftsr |= mask;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ *rtsr |= mask;
+ *ftsr |= mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void stm32mp_chip_suspend(struct stm32mp_exti_chip_data *chip_data, u32 wake_active)
+{
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+
+ /* save rtsr, ftsr registers */
+ chip_data->rtsr_cache = readl_relaxed(base + bank->rtsr_ofst);
+ chip_data->ftsr_cache = readl_relaxed(base + bank->ftsr_ofst);
+
+ writel_relaxed(wake_active, base + bank->imr_ofst);
+}
+
+static void stm32mp_chip_resume(struct stm32mp_exti_chip_data *chip_data, u32 mask_cache)
+{
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+
+ /* restore rtsr, ftsr, registers */
+ writel_relaxed(chip_data->rtsr_cache, base + bank->rtsr_ofst);
+ writel_relaxed(chip_data->ftsr_cache, base + bank->ftsr_ofst);
+
+ writel_relaxed(mask_cache, base + bank->imr_ofst);
+}
+
+/* directly set the target bit without reading first. */
+static inline void stm32mp_exti_write_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(val, base + reg);
+}
+
+static inline u32 stm32mp_exti_set_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val;
+
+ val = readl_relaxed(base + reg);
+ val |= BIT(d->hwirq % IRQS_PER_BANK);
+ writel_relaxed(val, base + reg);
+
+ return val;
+}
+
+static inline u32 stm32mp_exti_clr_bit(struct irq_data *d, u32 reg)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ void __iomem *base = chip_data->host_data->base;
+ u32 val;
+
+ val = readl_relaxed(base + reg);
+ val &= ~BIT(d->hwirq % IRQS_PER_BANK);
+ writel_relaxed(val, base + reg);
+
+ return val;
+}
+
+static void stm32mp_exti_eoi(struct irq_data *d)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+
+ stm32mp_exti_write_bit(d, bank->rpr_ofst);
+ stm32mp_exti_write_bit(d, bank->fpr_ofst);
+
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_eoi_parent(d);
+}
+
+static void stm32mp_exti_mask(struct irq_data *d)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+ chip_data->mask_cache = stm32mp_exti_clr_bit(d, bank->imr_ofst);
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_mask_parent(d);
+}
+
+static void stm32mp_exti_unmask(struct irq_data *d)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+
+ raw_spin_lock(&chip_data->rlock);
+ chip_data->mask_cache = stm32mp_exti_set_bit(d, bank->imr_ofst);
+ raw_spin_unlock(&chip_data->rlock);
+
+ if (d->parent_data->chip)
+ irq_chip_unmask_parent(d);
+}
+
+static int stm32mp_exti_set_type(struct irq_data *d, unsigned int type)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+ struct hwspinlock *hwlock = chip_data->host_data->hwlock;
+ void __iomem *base = chip_data->host_data->base;
+ u32 rtsr, ftsr;
+ int err;
+
+ raw_spin_lock(&chip_data->rlock);
+
+ if (hwlock) {
+ err = hwspin_lock_timeout_in_atomic(hwlock, HWSPNLCK_TIMEOUT);
+ if (err) {
+ pr_err("%s can't get hwspinlock (%d)\n", __func__, err);
+ goto unlock;
+ }
+ }
+
+ rtsr = readl_relaxed(base + bank->rtsr_ofst);
+ ftsr = readl_relaxed(base + bank->ftsr_ofst);
+
+ err = stm32mp_exti_convert_type(d, type, &rtsr, &ftsr);
+ if (!err) {
+ writel_relaxed(rtsr, base + bank->rtsr_ofst);
+ writel_relaxed(ftsr, base + bank->ftsr_ofst);
+ }
+
+ if (hwlock)
+ hwspin_unlock_in_atomic(hwlock);
+unlock:
+ raw_spin_unlock(&chip_data->rlock);
+ return err;
+}
+
+static int stm32mp_exti_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ raw_spin_lock(&chip_data->rlock);
+
+ if (on)
+ chip_data->wake_active |= mask;
+ else
+ chip_data->wake_active &= ~mask;
+
+ raw_spin_unlock(&chip_data->rlock);
+
+ return 0;
+}
+
+static int stm32mp_exti_set_affinity(struct irq_data *d, const struct cpumask *dest, bool force)
+{
+ if (d->parent_data->chip)
+ return irq_chip_set_affinity_parent(d, dest, force);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static int stm32mp_exti_suspend(struct device *dev)
+{
+ struct stm32mp_exti_host_data *host_data = dev_get_drvdata(dev);
+ struct stm32mp_exti_chip_data *chip_data;
+ int i;
+
+ for (i = 0; i < host_data->drv_data->bank_nr; i++) {
+ chip_data = &host_data->chips_data[i];
+ stm32mp_chip_suspend(chip_data, chip_data->wake_active);
+ }
+
+ return 0;
+}
+
+static int stm32mp_exti_resume(struct device *dev)
+{
+ struct stm32mp_exti_host_data *host_data = dev_get_drvdata(dev);
+ struct stm32mp_exti_chip_data *chip_data;
+ int i;
+
+ for (i = 0; i < host_data->drv_data->bank_nr; i++) {
+ chip_data = &host_data->chips_data[i];
+ stm32mp_chip_resume(chip_data, chip_data->mask_cache);
+ }
+
+ return 0;
+}
+
+static int stm32mp_exti_retrigger(struct irq_data *d)
+{
+ struct stm32mp_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ const struct stm32mp_exti_bank *bank = chip_data->reg_bank;
+ void __iomem *base = chip_data->host_data->base;
+ u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
+
+ writel_relaxed(mask, base + bank->swier_ofst);
+
+ return 0;
+}
+
+static struct irq_chip stm32mp_exti_chip = {
+ .name = "stm32mp-exti",
+ .irq_eoi = stm32mp_exti_eoi,
+ .irq_mask = stm32mp_exti_mask,
+ .irq_unmask = stm32mp_exti_unmask,
+ .irq_retrigger = stm32mp_exti_retrigger,
+ .irq_set_type = stm32mp_exti_set_type,
+ .irq_set_wake = stm32mp_exti_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32mp_exti_set_affinity : NULL,
+};
+
+static struct irq_chip stm32mp_exti_chip_direct = {
+ .name = "stm32mp-exti-direct",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = stm32mp_exti_mask,
+ .irq_unmask = stm32mp_exti_unmask,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_wake = stm32mp_exti_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
+ .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? irq_chip_set_affinity_parent : NULL,
+};
+
+static int stm32mp_exti_domain_alloc(struct irq_domain *dm,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct stm32mp_exti_host_data *host_data = dm->host_data;
+ struct stm32mp_exti_chip_data *chip_data;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec p_fwspec;
+ irq_hw_number_t hwirq;
+ struct irq_chip *chip;
+ u32 event_trg;
+ u8 desc_irq;
+ int bank;
+
+ hwirq = fwspec->param[0];
+ if (hwirq >= host_data->drv_data->bank_nr * IRQS_PER_BANK)
+ return -EINVAL;
+
+ bank = hwirq / IRQS_PER_BANK;
+ chip_data = &host_data->chips_data[bank];
+
+ /* Check if event is reserved (Secure) */
+ if (chip_data->event_reserved & BIT(hwirq % IRQS_PER_BANK)) {
+ dev_err(host_data->dev, "event %lu is reserved, secure\n", hwirq);
+ return -EPERM;
+ }
+
+ event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst);
+ chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ?
+ &stm32mp_exti_chip : &stm32mp_exti_chip_direct;
+
+ irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
+
+ if (host_data->dt_has_irqs_desc) {
+ struct of_phandle_args out_irq;
+ int ret;
+
+ ret = of_irq_parse_one(host_data->dev->of_node, hwirq, &out_irq);
+ if (ret)
+ return ret;
+ /* we only support one parent, so far */
+ if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode)
+ return -EINVAL;
+
+ of_phandle_args_to_fwspec(out_irq.np, out_irq.args,
+ out_irq.args_count, &p_fwspec);
+
+ return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
+ }
+
+ if (!host_data->drv_data->desc_irqs)
+ return -EINVAL;
+
+ desc_irq = host_data->drv_data->desc_irqs[hwirq];
+ if (desc_irq != EXTI_INVALID_IRQ) {
+ p_fwspec.fwnode = dm->parent->fwnode;
+ p_fwspec.param_count = 3;
+ p_fwspec.param[0] = GIC_SPI;
+ p_fwspec.param[1] = desc_irq;
+ p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
+ }
+
+ return 0;
+}
+
+static struct stm32mp_exti_chip_data *stm32mp_exti_chip_init(struct stm32mp_exti_host_data *h_data,
+ u32 bank_idx, struct device_node *node)
+{
+ struct stm32mp_exti_chip_data *chip_data;
+ const struct stm32mp_exti_bank *bank;
+ void __iomem *base = h_data->base;
+
+ bank = h_data->drv_data->exti_banks[bank_idx];
+ chip_data = &h_data->chips_data[bank_idx];
+ chip_data->host_data = h_data;
+ chip_data->reg_bank = bank;
+
+ raw_spin_lock_init(&chip_data->rlock);
+
+ /*
+ * This IP has no reset, so after hot reboot we should
+ * clear registers to avoid residue
+ */
+ writel_relaxed(0, base + bank->imr_ofst);
+
+ /* reserve Secure events */
+ chip_data->event_reserved = readl_relaxed(base + bank->seccfgr_ofst);
+
+ pr_info("%pOF: bank%d\n", node, bank_idx);
+
+ return chip_data;
+}
+
+static const struct irq_domain_ops stm32mp_exti_domain_ops = {
+ .alloc = stm32mp_exti_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void stm32mp_exti_check_rif(struct stm32mp_exti_host_data *host_data)
+{
+ unsigned int bank, i, event;
+ u32 cid, cidcfgr, hwcfgr1;
+
+ /* quit on CID not supported */
+ hwcfgr1 = readl_relaxed(host_data->base + EXTI_HWCFGR1);
+ if ((hwcfgr1 & EXTI_HWCFGR1_CIDWIDTH_MASK) == 0)
+ return;
+
+ for (bank = 0; bank < host_data->drv_data->bank_nr; bank++) {
+ for (i = 0; i < IRQS_PER_BANK; i++) {
+ event = bank * IRQS_PER_BANK + i;
+ cidcfgr = readl_relaxed(host_data->base + EXTI_EnCIDCFGR(event));
+ cid = (cidcfgr & EXTI_CIDCFGR_CID_MASK) >> EXTI_CIDCFGR_CID_SHIFT;
+ if ((cidcfgr & EXTI_CIDCFGR_CFEN_MASK) && cid != EXTI_CID1)
+ host_data->chips_data[bank].event_reserved |= BIT(i);
+ }
+ }
+}
+
+static void stm32mp_exti_remove_irq(void *data)
+{
+ struct irq_domain *domain = data;
+
+ irq_domain_remove(domain);
+}
+
+static int stm32mp_exti_probe(struct platform_device *pdev)
+{
+ const struct stm32mp_exti_drv_data *drv_data;
+ struct irq_domain *parent_domain, *domain;
+ struct stm32mp_exti_host_data *host_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret, i;
+
+ host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
+ if (!host_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, host_data);
+ host_data->dev = dev;
+
+ /* check for optional hwspinlock which may be not available yet */
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret == -EPROBE_DEFER)
+ /* hwspinlock framework not yet ready */
+ return ret;
+
+ if (ret >= 0) {
+ host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
+ if (!host_data->hwlock) {
+ dev_err(dev, "Failed to request hwspinlock\n");
+ return -EINVAL;
+ }
+ } else if (ret != -ENOENT) {
+ /* note: ENOENT is a valid case (means 'no hwspinlock') */
+ dev_err(dev, "Failed to get hwspinlock\n");
+ return ret;
+ }
+
+ /* initialize host_data */
+ drv_data = of_device_get_match_data(dev);
+ if (!drv_data) {
+ dev_err(dev, "no of match data\n");
+ return -ENODEV;
+ }
+ host_data->drv_data = drv_data;
+
+ host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr,
+ sizeof(*host_data->chips_data),
+ GFP_KERNEL);
+ if (!host_data->chips_data)
+ return -ENOMEM;
+
+ host_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host_data->base))
+ return PTR_ERR(host_data->base);
+
+ for (i = 0; i < drv_data->bank_nr; i++)
+ stm32mp_exti_chip_init(host_data, i, np);
+
+ stm32mp_exti_check_rif(host_data);
+
+ parent_domain = irq_find_host(of_irq_find_parent(np));
+ if (!parent_domain) {
+ dev_err(dev, "GIC interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0,
+ drv_data->bank_nr * IRQS_PER_BANK,
+ np, &stm32mp_exti_domain_ops,
+ host_data);
+
+ if (!domain) {
+ dev_err(dev, "Could not register exti domain\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_add_action_or_reset(dev, stm32mp_exti_remove_irq, domain);
+ if (ret)
+ return ret;
+
+ if (of_property_read_bool(np, "interrupts-extended"))
+ host_data->dt_has_irqs_desc = true;
+
+ return 0;
+}
+
+static const struct of_device_id stm32mp_exti_ids[] = {
+ { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
+ { .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32mp_exti_ids);
+
+static const struct dev_pm_ops stm32mp_exti_dev_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32mp_exti_suspend, stm32mp_exti_resume)
+};
+
+static struct platform_driver stm32mp_exti_driver = {
+ .probe = stm32mp_exti_probe,
+ .driver = {
+ .name = "stm32mp_exti",
+ .of_match_table = stm32mp_exti_ids,
+ .pm = &stm32mp_exti_dev_pm_ops,
+ },
+};
+
+module_platform_driver(stm32mp_exti_driver);
+
+MODULE_AUTHOR("Maxime Coquelin <mcoquelin.stm32@gmail.com>");
+MODULE_DESCRIPTION("STM32MP EXTI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 57f610dab6b8..b5dddb3c1568 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -163,5 +163,6 @@ static struct platform_driver ts4800_ic_driver = {
module_platform_driver(ts4800_ic_driver);
MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Multiplexed-IRQs driver for TS-4800's FPGA");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ts4800_irqc");
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index f9bfe84b1c73..d5cdbba6e7c7 100644
--- a/drivers/macintosh/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
@@ -60,7 +60,7 @@ static int ams_i2c_probe(struct i2c_client *client);
static void ams_i2c_remove(struct i2c_client *client);
static const struct i2c_device_id ams_id[] = {
- { "MAC,accelerometer_1", 0 },
+ { "MAC,accelerometer_1" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ams_id);
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 1ae3539beff5..b7b3ef1e58dc 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+MODULE_DESCRIPTION("Mouse button 2+3 emulation");
MODULE_LICENSE("GPL");
static int mouse_emulate_buttons;
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 37cdc6931f6d..2576a53f247e 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -549,7 +549,7 @@ g4fan_exit( void )
platform_driver_unregister( &therm_of_driver );
if( x.of_dev )
- of_device_unregister( x.of_dev );
+ of_platform_device_destroy(&x.of_dev->dev, NULL);
}
module_init(g4fan_init);
diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c
index 49ce37fde930..3ff4577ba847 100644
--- a/drivers/macintosh/windfarm_ad7417_sensor.c
+++ b/drivers/macintosh/windfarm_ad7417_sensor.c
@@ -304,7 +304,7 @@ static void wf_ad7417_remove(struct i2c_client *client)
}
static const struct i2c_device_id wf_ad7417_id[] = {
- { "MAC,ad7417", 0 },
+ { "MAC,ad7417" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index 603ef6c600ba..82365f19adb4 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -573,7 +573,7 @@ static void wf_fcu_remove(struct i2c_client *client)
}
static const struct i2c_device_id wf_fcu_id[] = {
- { "MAC,fcu", 0 },
+ { "MAC,fcu" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index 975361c23a93..16635e2b180b 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -156,7 +156,7 @@ static void wf_lm87_remove(struct i2c_client *client)
}
static const struct i2c_device_id wf_lm87_id[] = {
- { "MAC,lm87cimt", 0 },
+ { "MAC,lm87cimt" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 02856d1f0313..d734b31b8236 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -112,7 +112,7 @@ static void wf_max6690_remove(struct i2c_client *client)
}
static const struct i2c_device_id wf_max6690_id[] = {
- { "MAC,max6690", 0 },
+ { "MAC,max6690" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 50baa062c9df..ff8805ecf2e5 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -333,7 +333,7 @@ static void wf_sat_remove(struct i2c_client *client)
}
static const struct i2c_device_id wf_sat_id[] = {
- { "MAC,smu-sat", 0 },
+ { "MAC,smu-sat" },
{ }
};
MODULE_DEVICE_TABLE(i2c, wf_sat_id);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 3b8842c4a340..4eed97295927 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -276,6 +276,14 @@ config SPRD_MBOX
to send message between application processors and MCU. Say Y here if
you want to build the Spreatrum mailbox controller driver.
+config QCOM_CPUCP_MBOX
+ tristate "Qualcomm Technologies, Inc. CPUCP mailbox driver"
+ depends on (ARCH_QCOM || COMPILE_TEST) && 64BIT
+ help
+ Qualcomm Technologies, Inc. CPUSS Control Processor (CPUCP) mailbox
+ controller driver enables communication between AP and CPUCP. Say
+ Y here if you want to build this driver.
+
config QCOM_IPCC
tristate "Qualcomm Technologies, Inc. IPCC driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 5cf2f54debaf..3c3c27d54c13 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -61,4 +61,6 @@ obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
+obj-$(CONFIG_QCOM_CPUCP_MBOX) += qcom-cpucp-mbox.o
+
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 242e7504a628..a873672a9082 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -158,10 +158,6 @@ enum pdc_hw {
PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
};
-struct pdc_dma_map {
- void *ctx; /* opaque context associated with frame */
-};
-
/* dma descriptor */
struct dma64dd {
u32 ctrl1; /* misc control bits */
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 933727f89431..d17efb1dd0cb 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -225,6 +225,8 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
void *data)
{
u32 *arg = data;
+ u32 val;
+ int ret;
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -236,7 +238,13 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
queue_work(system_bh_wq, &cp->txdb_work);
break;
case IMX_MU_TYPE_TXDB_V2:
- imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
+ imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
+ priv->dcfg->xCR[IMX_MU_GCR]);
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+ !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+ 0, 1000);
+ if (ret)
+ dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 4aa394e91109..4bff73532085 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -22,7 +22,6 @@
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
-#define CMDQ_GCE_NUM_MAX (2)
#define CMDQ_CURR_IRQ_STATUS 0x10
#define CMDQ_SYNC_TOKEN_UPDATE 0x68
@@ -81,7 +80,7 @@ struct cmdq {
u32 irq_mask;
const struct gce_plat *pdata;
struct cmdq_thread *thread;
- struct clk_bulk_data clocks[CMDQ_GCE_NUM_MAX];
+ struct clk_bulk_data *clocks;
bool suspended;
};
@@ -578,16 +577,64 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
return &mbox->chans[ind];
}
+static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq)
+{
+ static const char * const gce_name = "gce";
+ struct device_node *node, *parent = dev->of_node->parent;
+ struct clk_bulk_data *clks;
+
+ cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num,
+ sizeof(cmdq->clocks), GFP_KERNEL);
+ if (!cmdq->clocks)
+ return -ENOMEM;
+
+ if (cmdq->pdata->gce_num == 1) {
+ clks = &cmdq->clocks[0];
+
+ clks->id = gce_name;
+ clks->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clks->clk))
+ return dev_err_probe(dev, PTR_ERR(clks->clk),
+ "failed to get gce clock\n");
+
+ return 0;
+ }
+
+ /*
+ * If there is more than one GCE, get the clocks for the others too,
+ * as the clock of the main GCE must be enabled for additional IPs
+ * to be reachable.
+ */
+ for_each_child_of_node(parent, node) {
+ int alias_id = of_alias_get_id(node, gce_name);
+
+ if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num)
+ continue;
+
+ clks = &cmdq->clocks[alias_id];
+
+ clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id);
+ if (!clks->id) {
+ of_node_put(node);
+ return -ENOMEM;
+ }
+
+ clks->clk = of_clk_get(node, 0);
+ if (IS_ERR(clks->clk)) {
+ of_node_put(node);
+ return dev_err_probe(dev, PTR_ERR(clks->clk),
+ "failed to get gce%d clock\n", alias_id);
+ }
+ }
+
+ return 0;
+}
+
static int cmdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cmdq *cmdq;
int err, i;
- struct device_node *phandle = dev->of_node;
- struct device_node *node;
- int alias_id = 0;
- static const char * const clk_name = "gce";
- static const char * const clk_names[] = { "gce0", "gce1" };
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -612,29 +659,9 @@ static int cmdq_probe(struct platform_device *pdev)
dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
dev, cmdq->base, cmdq->irq);
- if (cmdq->pdata->gce_num > 1) {
- for_each_child_of_node(phandle->parent, node) {
- alias_id = of_alias_get_id(node, clk_name);
- if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
- cmdq->clocks[alias_id].id = clk_names[alias_id];
- cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
- if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- of_node_put(node);
- return dev_err_probe(dev,
- PTR_ERR(cmdq->clocks[alias_id].clk),
- "failed to get gce clk: %d\n",
- alias_id);
- }
- }
- }
- } else {
- cmdq->clocks[alias_id].id = clk_name;
- cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
- if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
- "failed to get gce clk\n");
- }
- }
+ err = cmdq_get_clocks(dev, cmdq);
+ if (err)
+ return err;
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
@@ -662,12 +689,6 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
}
- err = devm_mbox_controller_register(dev, &cmdq->mbox);
- if (err < 0) {
- dev_err(dev, "failed to register mailbox: %d\n", err);
- return err;
- }
-
platform_set_drvdata(pdev, cmdq);
WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
@@ -695,6 +716,12 @@ static int cmdq_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
+ err = devm_mbox_controller_register(dev, &cmdq->mbox);
+ if (err < 0) {
+ dev_err(dev, "failed to register mailbox: %d\n", err);
+ return err;
+ }
+
return 0;
}
@@ -790,4 +817,5 @@ static void __exit cmdq_drv_exit(void)
subsys_initcall(cmdq_drv_init);
module_exit(cmdq_drv_exit);
+MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 46747559b438..7a87424657a1 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -230,7 +230,8 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
int ret = 0;
ret = request_threaded_irq(mbox->irq, NULL, mbox_interrupt,
- IRQF_ONESHOT, mbox->name, mbox);
+ IRQF_SHARED | IRQF_ONESHOT, mbox->name,
+ mbox);
if (unlikely(ret)) {
pr_err("failed to register mailbox interrupt:%d\n", ret);
return ret;
diff --git a/drivers/mailbox/qcom-cpucp-mbox.c b/drivers/mailbox/qcom-cpucp-mbox.c
new file mode 100644
index 000000000000..e5437c294803
--- /dev/null
+++ b/drivers/mailbox/qcom-cpucp-mbox.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define APSS_CPUCP_IPC_CHAN_SUPPORTED 3
+#define APSS_CPUCP_MBOX_CMD_OFF 0x4
+
+/* Tx Registers */
+#define APSS_CPUCP_TX_MBOX_CMD(i) (0x100 + ((i) * 8))
+
+/* Rx Registers */
+#define APSS_CPUCP_RX_MBOX_CMD(i) (0x100 + ((i) * 8))
+#define APSS_CPUCP_RX_MBOX_MAP 0x4000
+#define APSS_CPUCP_RX_MBOX_STAT 0x4400
+#define APSS_CPUCP_RX_MBOX_CLEAR 0x4800
+#define APSS_CPUCP_RX_MBOX_EN 0x4c00
+#define APSS_CPUCP_RX_MBOX_CMD_MASK GENMASK_ULL(63, 0)
+
+/**
+ * struct qcom_cpucp_mbox - Holder for the mailbox driver
+ * @chans: The mailbox channel
+ * @mbox: The mailbox controller
+ * @tx_base: Base address of the CPUCP tx registers
+ * @rx_base: Base address of the CPUCP rx registers
+ */
+struct qcom_cpucp_mbox {
+ struct mbox_chan chans[APSS_CPUCP_IPC_CHAN_SUPPORTED];
+ struct mbox_controller mbox;
+ void __iomem *tx_base;
+ void __iomem *rx_base;
+};
+
+static inline int channel_number(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static irqreturn_t qcom_cpucp_mbox_irq_fn(int irq, void *data)
+{
+ struct qcom_cpucp_mbox *cpucp = data;
+ u64 status;
+ int i;
+
+ status = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_STAT);
+
+ for_each_set_bit(i, (unsigned long *)&status, APSS_CPUCP_IPC_CHAN_SUPPORTED) {
+ u32 val = readl(cpucp->rx_base + APSS_CPUCP_RX_MBOX_CMD(i) + APSS_CPUCP_MBOX_CMD_OFF);
+ struct mbox_chan *chan = &cpucp->chans[i];
+ unsigned long flags;
+
+ /* Provide mutual exclusion with changes to chan->cl */
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->cl)
+ mbox_chan_received_data(chan, &val);
+ writeq(BIT(i), cpucp->rx_base + APSS_CPUCP_RX_MBOX_CLEAR);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_cpucp_mbox_startup(struct mbox_chan *chan)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u64 val;
+
+ val = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ val |= BIT(chan_id);
+ writeq(val, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+
+ return 0;
+}
+
+static void qcom_cpucp_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u64 val;
+
+ val = readq(cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ val &= ~BIT(chan_id);
+ writeq(val, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+}
+
+static int qcom_cpucp_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_cpucp_mbox *cpucp = container_of(chan->mbox, struct qcom_cpucp_mbox, mbox);
+ unsigned long chan_id = channel_number(chan);
+ u32 *val = data;
+
+ writel(*val, cpucp->tx_base + APSS_CPUCP_TX_MBOX_CMD(chan_id) + APSS_CPUCP_MBOX_CMD_OFF);
+
+ return 0;
+}
+
+static const struct mbox_chan_ops qcom_cpucp_mbox_chan_ops = {
+ .startup = qcom_cpucp_mbox_startup,
+ .send_data = qcom_cpucp_mbox_send_data,
+ .shutdown = qcom_cpucp_mbox_shutdown
+};
+
+static int qcom_cpucp_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_cpucp_mbox *cpucp;
+ struct mbox_controller *mbox;
+ int irq, ret;
+
+ cpucp = devm_kzalloc(dev, sizeof(*cpucp), GFP_KERNEL);
+ if (!cpucp)
+ return -ENOMEM;
+
+ cpucp->rx_base = devm_of_iomap(dev, dev->of_node, 0, NULL);
+ if (IS_ERR(cpucp->rx_base))
+ return PTR_ERR(cpucp->rx_base);
+
+ cpucp->tx_base = devm_of_iomap(dev, dev->of_node, 1, NULL);
+ if (IS_ERR(cpucp->tx_base))
+ return PTR_ERR(cpucp->tx_base);
+
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_EN);
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_CLEAR);
+ writeq(0, cpucp->rx_base + APSS_CPUCP_RX_MBOX_MAP);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, qcom_cpucp_mbox_irq_fn,
+ IRQF_TRIGGER_HIGH, "apss_cpucp_mbox", cpucp);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register irq: %d\n", irq);
+
+ writeq(APSS_CPUCP_RX_MBOX_CMD_MASK, cpucp->rx_base + APSS_CPUCP_RX_MBOX_MAP);
+
+ mbox = &cpucp->mbox;
+ mbox->dev = dev;
+ mbox->num_chans = APSS_CPUCP_IPC_CHAN_SUPPORTED;
+ mbox->chans = cpucp->chans;
+ mbox->ops = &qcom_cpucp_mbox_chan_ops;
+
+ ret = devm_mbox_controller_register(dev, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to create mailbox\n");
+
+ return 0;
+}
+
+static const struct of_device_id qcom_cpucp_mbox_of_match[] = {
+ { .compatible = "qcom,x1e80100-cpucp-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_cpucp_mbox_of_match);
+
+static struct platform_driver qcom_cpucp_mbox_driver = {
+ .probe = qcom_cpucp_mbox_probe,
+ .driver = {
+ .name = "qcom_cpucp_mbox",
+ .of_match_table = qcom_cpucp_mbox_of_match,
+ },
+};
+
+static int __init qcom_cpucp_mbox_init(void)
+{
+ return platform_driver_register(&qcom_cpucp_mbox_driver);
+}
+core_initcall(qcom_cpucp_mbox_init);
+
+static void __exit qcom_cpucp_mbox_exit(void)
+{
+ platform_driver_unregister(&qcom_cpucp_mbox_driver);
+}
+module_exit(qcom_cpucp_mbox_exit);
+
+MODULE_DESCRIPTION("QTI CPUCP MBOX Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 4acf5612487c..521d08b9ab47 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -64,6 +64,13 @@
#define MAX_SGI 16
+/*
+ * Module parameters
+ */
+static int tx_poll_period = 5;
+module_param_named(tx_poll_period, tx_poll_period, int, 0644);
+MODULE_PARM_DESC(tx_poll_period, "Poll period waiting for ack after send.");
+
/**
* struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
* @is_opened: indicate if the IPI channel is opened
@@ -537,7 +544,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mbox->num_chans = 2;
mbox->txdone_irq = false;
mbox->txdone_poll = true;
- mbox->txpoll_period = 5;
+ mbox->txpoll_period = tx_poll_period;
mbox->of_xlate = zynqmp_ipi_of_xlate;
chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
if (!chans)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 48ce750bf70a..da50f6661bae 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -164,40 +164,68 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
* prio is worth 1/8th of what INITIAL_PRIO is worth.
*/
-#define bucket_prio(b) \
-({ \
- unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
- \
- (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
-})
+static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b)
+{
+ unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;
+
+ return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);
+}
+
+static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args)
+{
+ struct bucket **lhs = (struct bucket **)l;
+ struct bucket **rhs = (struct bucket **)r;
+ struct cache *ca = args;
+
+ return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs);
+}
-#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
-#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
+static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
+{
+ struct bucket **lhs = (struct bucket **)l;
+ struct bucket **rhs = (struct bucket **)r;
+ struct cache *ca = args;
+
+ return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
+}
+
+static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
+{
+ struct bucket **lhs = l, **rhs = r;
+
+ swap(*lhs, *rhs);
+}
static void invalidate_buckets_lru(struct cache *ca)
{
struct bucket *b;
- ssize_t i;
+ const struct min_heap_callbacks bucket_max_cmp_callback = {
+ .less = new_bucket_max_cmp,
+ .swp = new_bucket_swap,
+ };
+ const struct min_heap_callbacks bucket_min_cmp_callback = {
+ .less = new_bucket_min_cmp,
+ .swp = new_bucket_swap,
+ };
- ca->heap.used = 0;
+ ca->heap.nr = 0;
for_each_bucket(b, ca) {
if (!bch_can_invalidate_bucket(ca, b))
continue;
- if (!heap_full(&ca->heap))
- heap_add(&ca->heap, b, bucket_max_cmp);
- else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
+ if (!min_heap_full(&ca->heap))
+ min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca);
+ else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) {
ca->heap.data[0] = b;
- heap_sift(&ca->heap, 0, bucket_max_cmp);
+ min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca);
}
}
- for (i = ca->heap.used / 2 - 1; i >= 0; --i)
- heap_sift(&ca->heap, i, bucket_min_cmp);
+ min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca);
while (!fifo_full(&ca->free_inc)) {
- if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
+ if (!ca->heap.nr) {
/*
* We don't want to be calling invalidate_buckets()
* multiple times when it can't do anything
@@ -206,6 +234,8 @@ static void invalidate_buckets_lru(struct cache *ca)
wake_up_gc(ca->set);
return;
}
+ b = min_heap_peek(&ca->heap)[0];
+ min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca);
bch_invalidate_one_bucket(ca, b);
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 1d33e40d26ea..785b0d9008fa 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -458,7 +458,7 @@ struct cache {
/* Allocation stuff: */
struct bucket *buckets;
- DECLARE_HEAP(struct bucket *, heap);
+ DEFINE_MIN_HEAP(struct bucket *, cache_heap) heap;
/*
* If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 463eb13bd0b2..bd97d8626887 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -54,9 +54,11 @@ void bch_dump_bucket(struct btree_keys *b)
int __bch_count_data(struct btree_keys *b)
{
unsigned int ret = 0;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
struct bkey *k;
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+
if (b->ops->is_extents)
for_each_key(b, k, &iter)
ret += KEY_SIZE(k);
@@ -67,9 +69,11 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
{
va_list args;
struct bkey *k, *p = NULL;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
const char *err;
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+
for_each_key(b, k, &iter) {
if (b->ops->is_extents) {
err = "Keys out of order";
@@ -110,9 +114,9 @@ bug:
static void bch_btree_iter_next_check(struct btree_iter *iter)
{
- struct bkey *k = iter->data->k, *next = bkey_next(k);
+ struct bkey *k = iter->heap.data->k, *next = bkey_next(k);
- if (next < iter->data->end &&
+ if (next < iter->heap.data->end &&
bkey_cmp(k, iter->b->ops->is_extents ?
&START_KEY(next) : next) > 0) {
bch_dump_bucket(iter->b);
@@ -879,12 +883,14 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
struct bkey preceding_key_on_stack = ZERO_KEY;
struct bkey *preceding_key_p = &preceding_key_on_stack;
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+
/*
* If k has preceding key, preceding_key_p will be set to address
* of k's preceding key; otherwise preceding_key_p will be set
@@ -895,9 +901,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
else
preceding_key(k, &preceding_key_p);
- m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
+ m = bch_btree_iter_init(b, &iter, preceding_key_p);
- if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
+ if (b->ops->insert_fixup(b, k, &iter, replace_key))
return status;
status = BTREE_INSERT_STATUS_INSERT;
@@ -1077,79 +1083,102 @@ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
/* Btree iterator */
-typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
- struct btree_iter_set);
+typedef bool (new_btree_iter_cmp_fn)(const void *, const void *, void *);
+
+static inline bool new_btree_iter_cmp(const void *l, const void *r, void __always_unused *args)
+{
+ const struct btree_iter_set *_l = l;
+ const struct btree_iter_set *_r = r;
+
+ return bkey_cmp(_l->k, _r->k) <= 0;
+}
-static inline bool btree_iter_cmp(struct btree_iter_set l,
- struct btree_iter_set r)
+static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
{
- return bkey_cmp(l.k, r.k) > 0;
+ struct btree_iter_set *_iter1 = iter1;
+ struct btree_iter_set *_iter2 = iter2;
+
+ swap(*_iter1, *_iter2);
}
static inline bool btree_iter_end(struct btree_iter *iter)
{
- return !iter->used;
+ return !iter->heap.nr;
}
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end)
{
+ const struct min_heap_callbacks callbacks = {
+ .less = new_btree_iter_cmp,
+ .swp = new_btree_iter_swap,
+ };
+
if (k != end)
- BUG_ON(!heap_add(iter,
- ((struct btree_iter_set) { k, end }),
- btree_iter_cmp));
+ BUG_ON(!min_heap_push(&iter->heap,
+ &((struct btree_iter_set) { k, end }),
+ &callbacks,
+ NULL));
}
-static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
- struct btree_iter_stack *iter,
- struct bkey *search,
- struct bset_tree *start)
+static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search,
+ struct bset_tree *start)
{
struct bkey *ret = NULL;
- iter->iter.size = ARRAY_SIZE(iter->stack_data);
- iter->iter.used = 0;
+ iter->heap.size = ARRAY_SIZE(iter->heap.preallocated);
+ iter->heap.nr = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter->iter.b = b;
+ iter->b = b;
#endif
for (; start <= bset_tree_last(b); start++) {
ret = bch_bset_search(b, start, search);
- bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
+ bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
}
return ret;
}
-struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
- struct btree_iter_stack *iter,
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
struct bkey *search)
{
- return __bch_btree_iter_stack_init(b, iter, search, b->set);
+ return __bch_btree_iter_init(b, iter, search, b->set);
}
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
- btree_iter_cmp_fn *cmp)
+ new_btree_iter_cmp_fn *cmp)
{
struct btree_iter_set b __maybe_unused;
struct bkey *ret = NULL;
+ const struct min_heap_callbacks callbacks = {
+ .less = cmp,
+ .swp = new_btree_iter_swap,
+ };
if (!btree_iter_end(iter)) {
bch_btree_iter_next_check(iter);
- ret = iter->data->k;
- iter->data->k = bkey_next(iter->data->k);
+ ret = iter->heap.data->k;
+ iter->heap.data->k = bkey_next(iter->heap.data->k);
- if (iter->data->k > iter->data->end) {
+ if (iter->heap.data->k > iter->heap.data->end) {
WARN_ONCE(1, "bset was corrupt!\n");
- iter->data->k = iter->data->end;
+ iter->heap.data->k = iter->heap.data->end;
}
- if (iter->data->k == iter->data->end)
- heap_pop(iter, b, cmp);
+ if (iter->heap.data->k == iter->heap.data->end) {
+ if (iter->heap.nr) {
+ b = min_heap_peek(&iter->heap)[0];
+ min_heap_pop(&iter->heap, &callbacks, NULL);
+ }
+ }
else
- heap_sift(iter, 0, cmp);
+ min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
}
return ret;
@@ -1157,7 +1186,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
{
- return __bch_btree_iter_next(iter, btree_iter_cmp);
+ return __bch_btree_iter_next(iter, new_btree_iter_cmp);
}
@@ -1195,16 +1224,18 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
bool fixup, bool remove_stale)
{
- int i;
struct bkey *k, *last = NULL;
BKEY_PADDED(k) tmp;
bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
? bch_ptr_bad
: bch_ptr_invalid;
+ const struct min_heap_callbacks callbacks = {
+ .less = b->ops->sort_cmp,
+ .swp = new_btree_iter_swap,
+ };
/* Heapify the iterator, using our comparison function */
- for (i = iter->used / 2 - 1; i >= 0; --i)
- heap_sift(iter, i, b->ops->sort_cmp);
+ min_heapify_all(&iter->heap, &callbacks, NULL);
while (!btree_iter_end(iter)) {
if (b->ops->sort_fixup && fixup)
@@ -1293,10 +1324,11 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
int oldsize = bch_count_data(b);
- __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+ __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
if (start) {
unsigned int i;
@@ -1307,7 +1339,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
order = get_order(__set_bytes(b->set->data, keys));
}
- __btree_sort(b, &iter.iter, start, order, false, state);
+ __btree_sort(b, &iter, start, order, false, state);
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
@@ -1323,11 +1355,13 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
- struct btree_iter_stack iter;
+ struct btree_iter iter;
+
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
- bch_btree_iter_stack_init(b, &iter, NULL);
+ bch_btree_iter_init(b, &iter, NULL);
- btree_mergesort(b, new->set->data, &iter.iter, false, true);
+ btree_mergesort(b, new->set->data, &iter, false, true);
bch_time_stats_update(&state->time, start_time);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 011f6062c4c0..f79441acd4c1 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -187,8 +187,9 @@ struct bset_tree {
};
struct btree_keys_ops {
- bool (*sort_cmp)(struct btree_iter_set l,
- struct btree_iter_set r);
+ bool (*sort_cmp)(const void *l,
+ const void *r,
+ void *args);
struct bkey *(*sort_fixup)(struct btree_iter *iter,
struct bkey *tmp);
bool (*insert_fixup)(struct btree_keys *b,
@@ -312,23 +313,17 @@ enum {
BTREE_INSERT_STATUS_FRONT_MERGE,
};
+struct btree_iter_set {
+ struct bkey *k, *end;
+};
+
/* Btree key iteration */
struct btree_iter {
- size_t size, used;
#ifdef CONFIG_BCACHE_DEBUG
struct btree_keys *b;
#endif
- struct btree_iter_set {
- struct bkey *k, *end;
- } data[];
-};
-
-/* Fixed-size btree_iter that can be allocated on the stack */
-
-struct btree_iter_stack {
- struct btree_iter iter;
- struct btree_iter_set stack_data[MAX_BSETS];
+ MIN_HEAP_PREALLOCATED(struct btree_iter_set, btree_iter_heap, MAX_BSETS) heap;
};
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
@@ -340,9 +335,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end);
-struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
- struct btree_iter_stack *iter,
- struct bkey *search);
+struct bkey *bch_btree_iter_init(struct btree_keys *b,
+ struct btree_iter *iter,
+ struct bkey *search);
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search);
@@ -357,14 +352,13 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
return search ? __bch_bset_search(b, t, search) : t->data->start;
}
-#define for_each_key_filter(b, k, stack_iter, filter) \
- for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
- ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
- filter));)
+#define for_each_key_filter(b, k, iter, filter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
-#define for_each_key(b, k, stack_iter) \
- for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
- ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
+#define for_each_key(b, k, iter) \
+ for (bch_btree_iter_init((b), (iter), NULL); \
+ ((k) = bch_btree_iter_next(iter));)
/* Sorting */
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 4e6ccf2c8a0b..ed40d8600656 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -149,19 +149,19 @@ void bch_btree_node_read_done(struct btree *b)
{
const char *err = "bad btree header";
struct bset *i = btree_bset_first(b);
- struct btree_iter *iter;
+ struct btree_iter iter;
/*
* c->fill_iter can allocate an iterator with more memory space
* than static MAX_BSETS.
* See the comment arount cache_set->fill_iter.
*/
- iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
- iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
- iter->used = 0;
+ iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
+ iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
+ iter.heap.nr = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter->b = &b->keys;
+ iter.b = &b->keys;
#endif
if (!i->seq)
@@ -199,7 +199,7 @@ void bch_btree_node_read_done(struct btree *b)
if (i != b->keys.set[0].data && !i->keys)
goto err;
- bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
+ bch_btree_iter_push(&iter, i->start, bset_bkey_last(i));
b->written += set_blocks(i, block_bytes(b->c->cache));
}
@@ -211,7 +211,7 @@ void bch_btree_node_read_done(struct btree *b)
if (i->seq == b->keys.set[0].data->seq)
goto err;
- bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
+ bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort);
i = b->keys.set[0].data;
err = "short btree key";
@@ -223,7 +223,7 @@ void bch_btree_node_read_done(struct btree *b)
bch_bset_init_next(&b->keys, write_block(b),
bset_magic(&b->c->cache->sb));
out:
- mempool_free(iter, &b->c->fill_iter);
+ mempool_free(iter.heap.data, &b->c->fill_iter);
return;
err:
set_btree_node_io_error(b);
@@ -1309,9 +1309,11 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
uint8_t stale = 0;
unsigned int keys = 0, good_keys = 0;
struct bkey *k;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
struct bset_tree *t;
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+
gc->nodes++;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
@@ -1570,9 +1572,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
static unsigned int btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
unsigned int ret = 0;
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
ret += bkey_u64s(k);
@@ -1611,18 +1615,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
int ret = 0;
bool should_rewrite;
struct bkey *k;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
struct gc_merge_info r[GC_MERGE_NODES];
struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
- bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+ bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
for (i = r; i < r + ARRAY_SIZE(r); i++)
i->b = ERR_PTR(-EINTR);
while (1) {
- k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
- bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
if (k) {
r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
true, b);
@@ -1917,7 +1921,9 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
{
int ret = 0;
struct bkey *k, *p = NULL;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
+
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
bch_initial_mark_key(b->c, b->level, k);
@@ -1925,10 +1931,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
bch_initial_mark_key(b->c, b->level + 1, &b->key);
if (b->level) {
- bch_btree_iter_stack_init(&b->keys, &iter, NULL);
+ bch_btree_iter_init(&b->keys, &iter, NULL);
do {
- k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ k = bch_btree_iter_next_filter(&iter, &b->keys,
bch_ptr_bad);
if (k) {
btree_node_prefetch(b, k);
@@ -1956,7 +1962,7 @@ static int bch_btree_check_thread(void *arg)
struct btree_check_info *info = arg;
struct btree_check_state *check_state = info->state;
struct cache_set *c = check_state->c;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
@@ -1964,9 +1970,11 @@ static int bch_btree_check_thread(void *arg)
cur_idx = prev_idx = 0;
ret = 0;
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+
/* root node keys are checked before thread created */
- bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -1984,7 +1992,7 @@ static int bch_btree_check_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter.iter,
+ k = bch_btree_iter_next_filter(&iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -2057,9 +2065,11 @@ int bch_btree_check(struct cache_set *c)
int ret = 0;
int i;
struct bkey *k = NULL;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
struct btree_check_state check_state;
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+
/* check and mark root node keys */
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
bch_initial_mark_key(c, c->root->level, k);
@@ -2553,11 +2563,12 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
if (b->level) {
struct bkey *k;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
- bch_btree_iter_stack_init(&b->keys, &iter, from);
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+ bch_btree_iter_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
bch_ptr_bad))) {
ret = bcache_btree(map_nodes_recurse, k, b,
op, from, fn, flags);
@@ -2586,12 +2597,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
{
int ret = MAP_CONTINUE;
struct bkey *k;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
- bch_btree_iter_stack_init(&b->keys, &iter, from);
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+ bch_btree_iter_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
- bch_ptr_bad))) {
+ while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
: bcache_btree(map_keys_recurse, k,
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index d626ffcbecb9..a7221e5dbe81 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -33,15 +33,16 @@ static void sort_key_next(struct btree_iter *iter,
i->k = bkey_next(i->k);
if (i->k == i->end)
- *i = iter->data[--iter->used];
+ *i = iter->heap.data[--iter->heap.nr];
}
-static bool bch_key_sort_cmp(struct btree_iter_set l,
- struct btree_iter_set r)
+static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
{
- int64_t c = bkey_cmp(l.k, r.k);
+ struct btree_iter_set *_l = (struct btree_iter_set *)l;
+ struct btree_iter_set *_r = (struct btree_iter_set *)r;
+ int64_t c = bkey_cmp(_l->k, _r->k);
- return c ? c > 0 : l.k < r.k;
+ return !(c ? c > 0 : _l->k < _r->k);
}
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
@@ -238,7 +239,7 @@ static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
}
const struct btree_keys_ops bch_btree_keys_ops = {
- .sort_cmp = bch_key_sort_cmp,
+ .sort_cmp = new_bch_key_sort_cmp,
.insert_fixup = bch_btree_ptr_insert_fixup,
.key_invalid = bch_btree_ptr_invalid,
.key_bad = bch_btree_ptr_bad,
@@ -255,22 +256,36 @@ const struct btree_keys_ops bch_btree_keys_ops = {
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
* equal in different sets, we have to process them newest to oldest.
*/
-static bool bch_extent_sort_cmp(struct btree_iter_set l,
- struct btree_iter_set r)
+
+static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
+{
+ struct btree_iter_set *_l = (struct btree_iter_set *)l;
+ struct btree_iter_set *_r = (struct btree_iter_set *)r;
+ int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
+
+ return !(c ? c > 0 : _l->k < _r->k);
+}
+
+static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
{
- int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+ struct btree_iter_set *_iter1 = iter1;
+ struct btree_iter_set *_iter2 = iter2;
- return c ? c > 0 : l.k < r.k;
+ swap(*_iter1, *_iter2);
}
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
struct bkey *tmp)
{
- while (iter->used > 1) {
- struct btree_iter_set *top = iter->data, *i = top + 1;
-
- if (iter->used > 2 &&
- bch_extent_sort_cmp(i[0], i[1]))
+ const struct min_heap_callbacks callbacks = {
+ .less = new_bch_extent_sort_cmp,
+ .swp = new_btree_iter_swap,
+ };
+ while (iter->heap.nr > 1) {
+ struct btree_iter_set *top = iter->heap.data, *i = top + 1;
+
+ if (iter->heap.nr > 2 &&
+ !new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
i++;
if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
@@ -278,7 +293,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
if (!KEY_SIZE(i->k)) {
sort_key_next(iter, i);
- heap_sift(iter, i - top, bch_extent_sort_cmp);
+ min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
continue;
}
@@ -288,7 +303,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
else
bch_cut_front(top->k, i->k);
- heap_sift(iter, i - top, bch_extent_sort_cmp);
+ min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
} else {
/* can't happen because of comparison func */
BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
@@ -298,7 +313,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
bch_cut_back(&START_KEY(i->k), tmp);
bch_cut_front(i->k, top->k);
- heap_sift(iter, 0, bch_extent_sort_cmp);
+ min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
return tmp;
} else {
@@ -618,7 +633,7 @@ static bool bch_extent_merge(struct btree_keys *bk,
}
const struct btree_keys_ops bch_extent_keys_ops = {
- .sort_cmp = bch_extent_sort_cmp,
+ .sort_cmp = new_bch_extent_sort_cmp,
.sort_fixup = bch_extent_sort_fixup,
.insert_fixup = bch_extent_insert_fixup,
.key_invalid = bch_extent_invalid,
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index ebd500bdf0b2..7f482729c56d 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -182,16 +182,27 @@ err: if (!IS_ERR_OR_NULL(w->private))
closure_sync(&cl);
}
-static bool bucket_cmp(struct bucket *l, struct bucket *r)
+static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *args)
{
- return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
+ struct bucket **_l = (struct bucket **)l;
+ struct bucket **_r = (struct bucket **)r;
+
+ return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
+}
+
+static void new_bucket_swap(void *l, void *r, void __always_unused *args)
+{
+ struct bucket **_l = l;
+ struct bucket **_r = r;
+
+ swap(*_l, *_r);
}
static unsigned int bucket_heap_top(struct cache *ca)
{
struct bucket *b;
- return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
+ return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0;
}
void bch_moving_gc(struct cache_set *c)
@@ -199,6 +210,10 @@ void bch_moving_gc(struct cache_set *c)
struct cache *ca = c->cache;
struct bucket *b;
unsigned long sectors_to_move, reserve_sectors;
+ const struct min_heap_callbacks callbacks = {
+ .less = new_bucket_cmp,
+ .swp = new_bucket_swap,
+ };
if (!c->copy_gc_enabled)
return;
@@ -209,7 +224,7 @@ void bch_moving_gc(struct cache_set *c)
reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]);
- ca->heap.used = 0;
+ ca->heap.nr = 0;
for_each_bucket(b, ca) {
if (GC_MARK(b) == GC_MARK_METADATA ||
@@ -218,25 +233,31 @@ void bch_moving_gc(struct cache_set *c)
atomic_read(&b->pin))
continue;
- if (!heap_full(&ca->heap)) {
+ if (!min_heap_full(&ca->heap)) {
sectors_to_move += GC_SECTORS_USED(b);
- heap_add(&ca->heap, b, bucket_cmp);
- } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+ min_heap_push(&ca->heap, &b, &callbacks, NULL);
+ } else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) {
sectors_to_move -= bucket_heap_top(ca);
sectors_to_move += GC_SECTORS_USED(b);
ca->heap.data[0] = b;
- heap_sift(&ca->heap, 0, bucket_cmp);
+ min_heap_sift_down(&ca->heap, 0, &callbacks, NULL);
}
}
while (sectors_to_move > reserve_sectors) {
- heap_pop(&ca->heap, b, bucket_cmp);
+ if (ca->heap.nr) {
+ b = min_heap_peek(&ca->heap)[0];
+ min_heap_pop(&ca->heap, &callbacks, NULL);
+ }
sectors_to_move -= GC_SECTORS_USED(b);
}
- while (heap_pop(&ca->heap, b, bucket_cmp))
+ while (ca->heap.nr) {
+ b = min_heap_peek(&ca->heap)[0];
+ min_heap_pop(&ca->heap, &callbacks, NULL);
SET_GC_MOVE(b, 1);
+ }
mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b5d6ef430b86..e7abfdd77c3b 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1907,8 +1907,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
INIT_LIST_HEAD(&c->btree_cache_freed);
INIT_LIST_HEAD(&c->data_buckets);
- iter_size = sizeof(struct btree_iter) +
- ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
+ iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
sizeof(struct btree_iter_set);
c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 826b14cae4e5..e8f696cb58c0 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -660,7 +660,9 @@ static unsigned int bch_root_usage(struct cache_set *c)
unsigned int bytes = 0;
struct bkey *k;
struct btree *b;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
+
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
goto lock_root;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index ae380bc3992e..410d8cb49e50 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * random utiility code, for bcache but in theory not specific to bcache
+ * random utility code, for bcache but in theory not specific to bcache
*
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
* Copyright 2012 Google, Inc.
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index f61ab1bada6c..539454d8e2d0 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/sched/clock.h>
#include <linux/llist.h>
+#include <linux/min_heap.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
@@ -30,16 +31,10 @@ struct closure;
#endif
-#define DECLARE_HEAP(type, name) \
- struct { \
- size_t size, used; \
- type *data; \
- } name
-
#define init_heap(heap, _size, gfp) \
({ \
size_t _bytes; \
- (heap)->used = 0; \
+ (heap)->nr = 0; \
(heap)->size = (_size); \
_bytes = (heap)->size * sizeof(*(heap)->data); \
(heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
@@ -52,64 +47,6 @@ do { \
(heap)->data = NULL; \
} while (0)
-#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
-
-#define heap_sift(h, i, cmp) \
-do { \
- size_t _r, _j = i; \
- \
- for (; _j * 2 + 1 < (h)->used; _j = _r) { \
- _r = _j * 2 + 1; \
- if (_r + 1 < (h)->used && \
- cmp((h)->data[_r], (h)->data[_r + 1])) \
- _r++; \
- \
- if (cmp((h)->data[_r], (h)->data[_j])) \
- break; \
- heap_swap(h, _r, _j); \
- } \
-} while (0)
-
-#define heap_sift_down(h, i, cmp) \
-do { \
- while (i) { \
- size_t p = (i - 1) / 2; \
- if (cmp((h)->data[i], (h)->data[p])) \
- break; \
- heap_swap(h, i, p); \
- i = p; \
- } \
-} while (0)
-
-#define heap_add(h, d, cmp) \
-({ \
- bool _r = !heap_full(h); \
- if (_r) { \
- size_t _i = (h)->used++; \
- (h)->data[_i] = d; \
- \
- heap_sift_down(h, _i, cmp); \
- heap_sift(h, _i, cmp); \
- } \
- _r; \
-})
-
-#define heap_pop(h, d, cmp) \
-({ \
- bool _r = (h)->used; \
- if (_r) { \
- (d) = (h)->data[0]; \
- (h)->used--; \
- heap_swap(h, 0, (h)->used); \
- heap_sift(h, 0, cmp); \
- } \
- _r; \
-})
-
-#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
-
-#define heap_full(h) ((h)->used == (h)->size)
-
#define DECLARE_FIFO(type, name) \
struct { \
size_t front, back, size, mask; \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 792e070ccf38..c1d28e365910 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -908,15 +908,16 @@ static int bch_dirty_init_thread(void *arg)
struct dirty_init_thrd_info *info = arg;
struct bch_dirty_init_state *state = info->state;
struct cache_set *c = state->c;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
k = p = NULL;
prev_idx = 0;
- bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+ bch_btree_iter_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -930,7 +931,7 @@ static int bch_dirty_init_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter.iter,
+ k = bch_btree_iter_next_filter(&iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -979,11 +980,13 @@ void bch_sectors_dirty_init(struct bcache_device *d)
int i;
struct btree *b = NULL;
struct bkey *k = NULL;
- struct btree_iter_stack iter;
+ struct btree_iter iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
struct bch_dirty_init_state state;
+ min_heap_init(&iter.heap, NULL, MAX_BSETS);
+
retry_lock:
b = c->root;
rw_lock(0, b, b->level);
diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c
index b6f3d0710a21..7e0009d2f67d 100644
--- a/drivers/md/dm-vdo/repair.c
+++ b/drivers/md/dm-vdo/repair.c
@@ -51,6 +51,8 @@ struct recovery_point {
bool increment_applied;
};
+DEFINE_MIN_HEAP(struct numbered_block_mapping, replay_heap);
+
struct repair_completion {
/* The completion header */
struct vdo_completion completion;
@@ -97,7 +99,7 @@ struct repair_completion {
* order, then original journal order. This permits efficient iteration over the journal
* entries in order.
*/
- struct min_heap replay_heap;
+ struct replay_heap replay_heap;
/* Fields tracking progress through the journal entries. */
struct numbered_block_mapping *current_entry;
struct numbered_block_mapping *current_unfetched_entry;
@@ -135,7 +137,7 @@ struct repair_completion {
* to sort by slot while still ensuring we replay all entries with the same slot in the exact order
* as they appeared in the journal.
*/
-static bool mapping_is_less_than(const void *item1, const void *item2)
+static bool mapping_is_less_than(const void *item1, const void *item2, void __always_unused *args)
{
const struct numbered_block_mapping *mapping1 =
(const struct numbered_block_mapping *) item1;
@@ -154,7 +156,7 @@ static bool mapping_is_less_than(const void *item1, const void *item2)
return 0;
}
-static void swap_mappings(void *item1, void *item2)
+static void swap_mappings(void *item1, void *item2, void __always_unused *args)
{
struct numbered_block_mapping *mapping1 = item1;
struct numbered_block_mapping *mapping2 = item2;
@@ -163,14 +165,13 @@ static void swap_mappings(void *item1, void *item2)
}
static const struct min_heap_callbacks repair_min_heap = {
- .elem_size = sizeof(struct numbered_block_mapping),
.less = mapping_is_less_than,
.swp = swap_mappings,
};
static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair)
{
- struct min_heap *heap = &repair->replay_heap;
+ struct replay_heap *heap = &repair->replay_heap;
struct numbered_block_mapping *last;
if (heap->nr == 0)
@@ -181,8 +182,8 @@ static struct numbered_block_mapping *sort_next_heap_element(struct repair_compl
* restore the heap invariant, and return a pointer to the popped element.
*/
last = &repair->entries[--heap->nr];
- swap_mappings(heap->data, last);
- min_heapify(heap, 0, &repair_min_heap);
+ swap_mappings(heap->data, last, NULL);
+ min_heap_sift_down(heap, 0, &repair_min_heap, NULL);
return last;
}
@@ -1116,12 +1117,12 @@ static void recover_block_map(struct vdo_completion *completion)
* Organize the journal entries into a binary heap so we can iterate over them in sorted
* order incrementally, avoiding an expensive sort call.
*/
- repair->replay_heap = (struct min_heap) {
+ repair->replay_heap = (struct replay_heap) {
.data = repair->entries,
.nr = repair->block_map_entry_count,
.size = repair->block_map_entry_count,
};
- min_heapify_all(&repair->replay_heap, &repair_min_heap);
+ min_heapify_all(&repair->replay_heap, &repair_min_heap, NULL);
vdo_log_info("Replaying %zu recovery entries into block map",
repair->block_map_entry_count);
diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c
index 46e4721e5b4f..274f9ccd072f 100644
--- a/drivers/md/dm-vdo/slab-depot.c
+++ b/drivers/md/dm-vdo/slab-depot.c
@@ -3288,7 +3288,8 @@ int vdo_release_block_reference(struct block_allocator *allocator,
* Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
* before larger ones.
*/
-static bool slab_status_is_less_than(const void *item1, const void *item2)
+static bool slab_status_is_less_than(const void *item1, const void *item2,
+ void __always_unused *args)
{
const struct slab_status *info1 = item1;
const struct slab_status *info2 = item2;
@@ -3300,7 +3301,7 @@ static bool slab_status_is_less_than(const void *item1, const void *item2)
return info1->slab_number < info2->slab_number;
}
-static void swap_slab_statuses(void *item1, void *item2)
+static void swap_slab_statuses(void *item1, void *item2, void __always_unused *args)
{
struct slab_status *info1 = item1;
struct slab_status *info2 = item2;
@@ -3309,7 +3310,6 @@ static void swap_slab_statuses(void *item1, void *item2)
}
static const struct min_heap_callbacks slab_status_min_heap = {
- .elem_size = sizeof(struct slab_status),
.less = slab_status_is_less_than,
.swp = swap_slab_statuses,
};
@@ -3509,7 +3509,7 @@ static int get_slab_statuses(struct block_allocator *allocator,
static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator *allocator)
{
struct slab_status current_slab_status;
- struct min_heap heap;
+ DEFINE_MIN_HEAP(struct slab_status, heap) heap;
int result;
struct slab_status *slab_statuses;
struct slab_depot *depot = allocator->depot;
@@ -3521,12 +3521,12 @@ static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator
return result;
/* Sort the slabs by cleanliness, then by emptiness hint. */
- heap = (struct min_heap) {
+ heap = (struct heap) {
.data = slab_statuses,
.nr = allocator->slab_count,
.size = allocator->slab_count,
};
- min_heapify_all(&heap, &slab_status_min_heap);
+ min_heapify_all(&heap, &slab_status_min_heap, NULL);
while (heap.nr > 0) {
bool high_priority;
@@ -3534,7 +3534,7 @@ static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator
struct slab_journal *journal;
current_slab_status = slab_statuses[0];
- min_heap_pop(&heap, &slab_status_min_heap);
+ min_heap_pop(&heap, &slab_status_min_heap, NULL);
slab = depot->slabs[current_slab_status.slab_number];
if ((depot->load_type == VDO_SLAB_DEPOT_REBUILD_LOAD) ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index abc478b649b7..97fab2087df8 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -11,6 +11,7 @@
#include "dm-uevent.h"
#include "dm-ima.h"
+#include <linux/bio-integrity.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index c1ea214bfc91..1d0db62f0351 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -15,6 +15,7 @@
#define LVB_SIZE 64
#define NEW_DEV_TIMEOUT 5000
+#define WAIT_DLM_LOCK_TIMEOUT (30 * HZ)
struct dlm_lock_resource {
dlm_lockspace_t *ls;
@@ -56,6 +57,7 @@ struct resync_info {
#define MD_CLUSTER_ALREADY_IN_CLUSTER 6
#define MD_CLUSTER_PENDING_RECV_EVENT 7
#define MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD 8
+#define MD_CLUSTER_WAITING_FOR_SYNC 9
struct md_cluster_info {
struct mddev *mddev; /* the md device which md_cluster_info belongs to */
@@ -91,6 +93,7 @@ struct md_cluster_info {
sector_t sync_hi;
};
+/* For compatibility, add the new msg_type at the end. */
enum msg_type {
METADATA_UPDATED = 0,
RESYNCING,
@@ -100,6 +103,7 @@ enum msg_type {
BITMAP_NEEDS_SYNC,
CHANGE_CAPACITY,
BITMAP_RESIZE,
+ RESYNCING_START,
};
struct cluster_msg {
@@ -130,8 +134,13 @@ static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
0, sync_ast, res, res->bast);
if (ret)
return ret;
- wait_event(res->sync_locking, res->sync_locking_done);
+ ret = wait_event_timeout(res->sync_locking, res->sync_locking_done,
+ WAIT_DLM_LOCK_TIMEOUT);
res->sync_locking_done = false;
+ if (!ret) {
+ pr_err("locking DLM '%s' timeout!\n", res->name);
+ return -EBUSY;
+ }
if (res->lksb.sb_status == 0)
res->mode = mode;
return res->lksb.sb_status;
@@ -455,6 +464,7 @@ static void process_suspend_info(struct mddev *mddev,
clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
remove_suspend_info(mddev, slot);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ clear_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state);
md_wakeup_thread(mddev->thread);
return;
}
@@ -525,6 +535,7 @@ static int process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
res = -1;
}
clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
+ set_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state);
return res;
}
@@ -593,6 +604,9 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
case CHANGE_CAPACITY:
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
break;
+ case RESYNCING_START:
+ clear_bit(MD_CLUSTER_WAITING_FOR_SYNC, &mddev->cluster_info->state);
+ break;
case RESYNCING:
set_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
process_suspend_info(mddev, le32_to_cpu(msg->slot),
@@ -743,7 +757,7 @@ static void unlock_comm(struct md_cluster_info *cinfo)
*/
static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
{
- int error;
+ int error, unlock_error;
int slot = cinfo->slot_number - 1;
cmsg->slot = cpu_to_le32(slot);
@@ -751,7 +765,7 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX);
if (error) {
pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error);
- goto failed_message;
+ return error;
}
memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
@@ -781,14 +795,10 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
}
failed_ack:
- error = dlm_unlock_sync(cinfo->message_lockres);
- if (unlikely(error != 0)) {
+ while ((unlock_error = dlm_unlock_sync(cinfo->message_lockres)))
pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n",
- error);
- /* in case the message can't be released due to some reason */
- goto failed_ack;
- }
-failed_message:
+ unlock_error);
+
return error;
}
@@ -1343,6 +1353,23 @@ static void resync_info_get(struct mddev *mddev, sector_t *lo, sector_t *hi)
spin_unlock_irq(&cinfo->suspend_lock);
}
+static int resync_status_get(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ return test_bit(MD_CLUSTER_WAITING_FOR_SYNC, &cinfo->state);
+}
+
+static int resync_start_notify(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg = {0};
+
+ cmsg.type = cpu_to_le32(RESYNCING_START);
+
+ return sendmsg(cinfo, &cmsg, 0);
+}
+
static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
@@ -1577,6 +1604,8 @@ static const struct md_cluster_operations cluster_ops = {
.resync_start = resync_start,
.resync_finish = resync_finish,
.resync_info_update = resync_info_update,
+ .resync_start_notify = resync_start_notify,
+ .resync_status_get = resync_status_get,
.resync_info_get = resync_info_get,
.metadata_update_start = metadata_update_start,
.metadata_update_finish = metadata_update_finish,
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index a78e3021775d..470bf18ffde5 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -14,6 +14,8 @@ struct md_cluster_operations {
int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev);
int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*resync_start_notify)(struct mddev *mddev);
+ int (*resync_status_get)(struct mddev *mddev);
void (*resync_info_get)(struct mddev *mddev, sector_t *lo, sector_t *hi);
int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 64693913ed18..d3a837506a36 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8978,7 +8978,8 @@ void md_do_sync(struct md_thread *thread)
* This will mean we have to start checking from the beginning again.
*
*/
-
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_start_notify(mddev);
do {
int mddev2_minor = -1;
mddev->curr_resync = MD_RESYNC_DELAYED;
@@ -9992,8 +9993,18 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
*/
if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
!(le32_to_cpu(sb->feature_map) &
- MD_FEATURE_RESHAPE_ACTIVE)) {
- rdev2->saved_raid_disk = role;
+ MD_FEATURE_RESHAPE_ACTIVE) &&
+ !md_cluster_ops->resync_status_get(mddev)) {
+ /*
+ * -1 to make raid1_add_disk() set conf->fullsync
+ * to 1. This could avoid skipping sync when the
+ * remote node is down during resyncing.
+ */
+ if ((le32_to_cpu(sb->feature_map)
+ & MD_FEATURE_RECOVERY_OFFSET))
+ rdev2->saved_raid_disk = -1;
+ else
+ rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %pg\n",
rdev2->bdev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 04a0c2ca1732..7acfe7c9dc8d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -680,6 +680,7 @@ static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
len = r1_bio->sectors;
read_len = raid1_check_read_range(rdev, this_sector, &len);
if (read_len == r1_bio->sectors) {
+ *max_sectors = read_len;
update_read_sectors(conf, disk, this_sector, read_len);
return disk;
}
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index c38a6083f0a7..3aaaf47fa4ee 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -7,6 +7,7 @@
*/
#include <linux/crc32.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/io.h>
@@ -84,6 +85,9 @@
#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
+#define PCI_VENDOR_ID_ROCKCHIP 0x1d87
+#define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
+
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
@@ -140,18 +144,6 @@ static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
writel(value, test->base + offset);
}
-static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
- int bar, int offset)
-{
- return readl(test->bar[bar] + offset);
-}
-
-static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
- int bar, u32 offset, u32 value)
-{
- writel(value, test->bar[bar] + offset);
-}
-
static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
{
struct pci_endpoint_test *test = dev_id;
@@ -272,31 +264,60 @@ static const u32 bar_test_pattern[] = {
0xA5A5A5A5,
};
+static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
+ enum pci_barno barno, int offset,
+ void *write_buf, void *read_buf,
+ int size)
+{
+ memset(write_buf, bar_test_pattern[barno], size);
+ memcpy_toio(test->bar[barno] + offset, write_buf, size);
+
+ memcpy_fromio(read_buf, test->bar[barno] + offset, size);
+
+ return memcmp(write_buf, read_buf, size);
+}
+
static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
- int j;
- u32 val;
- int size;
+ int j, bar_size, buf_size, iters, remain;
+ void *write_buf __free(kfree) = NULL;
+ void *read_buf __free(kfree) = NULL;
struct pci_dev *pdev = test->pdev;
if (!test->bar[barno])
return false;
- size = pci_resource_len(pdev, barno);
+ bar_size = pci_resource_len(pdev, barno);
if (barno == test->test_reg_bar)
- size = 0x4;
+ bar_size = 0x4;
+
+ /*
+ * Allocate a buffer of max size 1MB, and reuse that buffer while
+ * iterating over the whole BAR size (which might be much larger).
+ */
+ buf_size = min(SZ_1M, bar_size);
- for (j = 0; j < size; j += 4)
- pci_endpoint_test_bar_writel(test, barno, j,
- bar_test_pattern[barno]);
+ write_buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!write_buf)
+ return false;
- for (j = 0; j < size; j += 4) {
- val = pci_endpoint_test_bar_readl(test, barno, j);
- if (val != bar_test_pattern[barno])
+ read_buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!read_buf)
+ return false;
+
+ iters = bar_size / buf_size;
+ for (j = 0; j < iters; j++)
+ if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
+ write_buf, read_buf, buf_size))
+ return false;
+
+ remain = bar_size % buf_size;
+ if (remain)
+ if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
+ write_buf, read_buf, remain))
return false;
- }
return true;
}
@@ -824,11 +845,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
init_completion(&test->irq_raised);
mutex_init(&test->mutex);
- if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(dev, "Cannot set DMA mask\n");
- return -EINVAL;
- }
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
err = pci_enable_device(pdev);
if (err) {
@@ -980,6 +997,15 @@ static const struct pci_endpoint_test_data j721e_data = {
.irq_type = IRQ_TYPE_MSI,
};
+static const struct pci_endpoint_test_data rk3588_data = {
+ .alignment = SZ_64K,
+ .irq_type = IRQ_TYPE_MSI,
+};
+
+/*
+ * If the controller's Vendor/Device ID are programmable, you may be able to
+ * use one of the existing entries for testing instead of adding a new one.
+ */
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
.driver_data = (kernel_ulong_t)&default_data,
@@ -1017,6 +1043,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
.driver_data = (kernel_ulong_t)&j721e_data,
},
+ { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
+ .driver_data = (kernel_ulong_t)&rk3588_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/most/core.c b/drivers/most/core.c
index f13d0e14a48b..10342e8801bf 100644
--- a/drivers/most/core.c
+++ b/drivers/most/core.c
@@ -1286,7 +1286,7 @@ int most_register_interface(struct most_interface *iface)
!iface->poison_channel || (iface->num_channels > MAX_CHANNELS))
return -EINVAL;
- id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&mdev_id, GFP_KERNEL);
if (id < 0) {
dev_err(iface->dev, "Failed to allocate device ID\n");
return id;
@@ -1294,7 +1294,7 @@ int most_register_interface(struct most_interface *iface)
iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
if (!iface->p) {
- ida_simple_remove(&mdev_id, id);
+ ida_free(&mdev_id, id);
return -ENOMEM;
}
@@ -1308,7 +1308,7 @@ int most_register_interface(struct most_interface *iface)
dev_err(iface->dev, "Failed to register interface device\n");
kfree(iface->p);
put_device(iface->dev);
- ida_simple_remove(&mdev_id, id);
+ ida_free(&mdev_id, id);
return -ENOMEM;
}
@@ -1366,7 +1366,7 @@ err_free_resources:
}
kfree(iface->p);
device_unregister(iface->dev);
- ida_simple_remove(&mdev_id, id);
+ ida_free(&mdev_id, id);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(most_register_interface);
@@ -1397,7 +1397,7 @@ void most_deregister_interface(struct most_interface *iface)
device_unregister(&c->dev);
}
- ida_simple_remove(&mdev_id, iface->p->dev_id);
+ ida_free(&mdev_id, iface->p->dev_id);
kfree(iface->p);
device_unregister(iface->dev);
}
diff --git a/drivers/most/most_cdev.c b/drivers/most/most_cdev.c
index 3ed8f461e01e..b9423f82373d 100644
--- a/drivers/most/most_cdev.c
+++ b/drivers/most/most_cdev.c
@@ -100,7 +100,7 @@ static void destroy_cdev(struct comp_channel *c)
static void destroy_channel(struct comp_channel *c)
{
- ida_simple_remove(&comp.minor_id, MINOR(c->devno));
+ ida_free(&comp.minor_id, MINOR(c->devno));
kfifo_free(&c->fifo);
kfree(c);
}
@@ -425,7 +425,7 @@ static int comp_probe(struct most_interface *iface, int channel_id,
if (c)
return -EEXIST;
- current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
+ current_minor = ida_alloc(&comp.minor_id, GFP_KERNEL);
if (current_minor < 0)
return current_minor;
@@ -472,7 +472,7 @@ err_del_cdev_and_free_channel:
err_free_c:
kfree(c);
err_remove_ida:
- ida_simple_remove(&comp.minor_id, current_minor);
+ ida_free(&comp.minor_id, current_minor);
return retval;
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 60c7f6f751c7..5e5266e2c2e1 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -1399,4 +1399,5 @@ static void cfi_staa_destroy(struct mtd_info *mtd)
kfree(cfi);
}
+MODULE_DESCRIPTION("MTD chip driver for ST Advanced Architecture Command Set (ID 0x0020)");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 140c69a67e82..ef0aa6890bc0 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -441,4 +441,5 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
EXPORT_SYMBOL(cfi_varsize_frob);
+MODULE_DESCRIPTION("Common Flash Interface Generic utility functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index a9083c888e3b..019f1e92cc41 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -17,13 +17,12 @@ obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
-physmap-objs-y += physmap-core.o
-physmap-objs-$(CONFIG_MTD_PHYSMAP_BT1_ROM) += physmap-bt1-rom.o
-physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
-physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
-physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
-physmap-objs := $(physmap-objs-y)
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
+physmap-y := physmap-core.o
+physmap-$(CONFIG_MTD_PHYSMAP_BT1_ROM) += physmap-bt1-rom.o
+physmap-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
+physmap-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
+physmap-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
diff --git a/drivers/mtd/maps/map_funcs.c b/drivers/mtd/maps/map_funcs.c
index 5b684c170d4e..1a4add9e119a 100644
--- a/drivers/mtd/maps/map_funcs.c
+++ b/drivers/mtd/maps/map_funcs.c
@@ -41,4 +41,5 @@ void simple_map_init(struct map_info *map)
}
EXPORT_SYMBOL(simple_map_init);
+MODULE_DESCRIPTION("Out-of-line map I/O");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 04f84d87c657..ff92c17def83 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -531,11 +531,6 @@ struct cdns_nand_chip {
u8 cs[] __counted_by(nsels);
};
-struct ecc_info {
- int (*calc_ecc_bytes)(int step_size, int strength);
- int max_step_size;
-};
-
static inline struct
cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
{
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index e71ad2fcec23..e1b515304e3c 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -983,7 +983,7 @@ static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
return PTR_ERR(sdr);
/* Only MX28/MX6 GPMI controller can reach EDO timings */
- if (sdr->tRC_min <= 25000 && !GPMI_IS_MX28(this) && !GPMI_IS_MX6(this))
+ if (sdr->tRC_min <= 25000 && !this->devdata->support_edo_timing)
return -ENOTSUPP;
/* Stop here if this call was just a check */
@@ -1142,6 +1142,7 @@ static const struct gpmi_devdata gpmi_devdata_imx28 = {
.type = IS_MX28,
.bch_max_ecc_strength = 20,
.max_chain_delay = 16000,
+ .support_edo_timing = true,
.clks = gpmi_clks_for_mx2x,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
};
@@ -1154,6 +1155,7 @@ static const struct gpmi_devdata gpmi_devdata_imx6q = {
.type = IS_MX6Q,
.bch_max_ecc_strength = 40,
.max_chain_delay = 12000,
+ .support_edo_timing = true,
.clks = gpmi_clks_for_mx6,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
};
@@ -1162,6 +1164,7 @@ static const struct gpmi_devdata gpmi_devdata_imx6sx = {
.type = IS_MX6SX,
.bch_max_ecc_strength = 62,
.max_chain_delay = 12000,
+ .support_edo_timing = true,
.clks = gpmi_clks_for_mx6,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
};
@@ -1174,10 +1177,24 @@ static const struct gpmi_devdata gpmi_devdata_imx7d = {
.type = IS_MX7D,
.bch_max_ecc_strength = 62,
.max_chain_delay = 12000,
+ .support_edo_timing = true,
.clks = gpmi_clks_for_mx7d,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
};
+static const char *gpmi_clks_for_mx8qxp[GPMI_CLK_MAX] = {
+ "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb",
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx8qxp = {
+ .type = IS_MX8QXP,
+ .bch_max_ecc_strength = 62,
+ .max_chain_delay = 12000,
+ .support_edo_timing = true,
+ .clks = gpmi_clks_for_mx8qxp,
+ .clks_count = ARRAY_SIZE(gpmi_clks_for_mx8qxp),
+};
+
static int acquire_register_block(struct gpmi_nand_data *this,
const char *res_name)
{
@@ -2721,6 +2738,7 @@ static const struct of_device_id gpmi_nand_id_table[] = {
{ .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, },
{ .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, },
{ .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,},
+ { .compatible = "fsl,imx8qxp-gpmi-nand", .data = &gpmi_devdata_imx8qxp, },
{}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index c3ff56ac62a7..3e9bc985e44a 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -78,6 +78,7 @@ enum gpmi_type {
IS_MX6Q,
IS_MX6SX,
IS_MX7D,
+ IS_MX8QXP,
};
struct gpmi_devdata {
@@ -86,6 +87,7 @@ struct gpmi_devdata {
int max_chain_delay; /* See the SDR EDO mode */
const char * const *clks;
const int clks_count;
+ bool support_edo_timing;
};
/**
@@ -172,8 +174,10 @@ struct gpmi_nand_data {
#define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q)
#define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX)
#define GPMI_IS_MX7D(x) ((x)->devdata->type == IS_MX7D)
+#define GPMI_IS_MX8QXP(x) ((x)->devdata->type == IS_MX8QXP)
#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x) || \
- GPMI_IS_MX7D(x))
+ GPMI_IS_MX7D(x) || GPMI_IS_MX8QXP(x))
+
#define GPMI_IS_MXS(x) (GPMI_IS_MX23(x) || GPMI_IS_MX28(x))
#endif
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index f0ad2308f6d5..78174c463b36 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -295,7 +295,7 @@ static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
dma_addr_t buf_dma;
int ret;
- u32 timeout;
+ unsigned long time_left;
if (dir == DMA_DEV_TO_MEM) {
chan = ebu_host->dma_rx;
@@ -335,8 +335,8 @@ static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
dma_async_issue_pending(chan);
/* Wait DMA to finish the data transfer.*/
- timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
- if (!timeout) {
+ time_left = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
+ if (!time_left) {
dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n",
dmaengine_tx_status(chan, cookie, NULL));
dmaengine_terminate_sync(chan);
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 677fcb03f9be..b9c3adc54c01 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -574,18 +574,22 @@ static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
dma_cap_mask_t mask;
- if (!host->pdata || !host->pdata->dma_filter) {
- dev_err(mtd->dev.parent, "no DMA platform data\n");
- return -ENOENT;
- }
+ host->dma_chan = dma_request_chan(mtd->dev.parent, "rx-tx");
+ if (IS_ERR(host->dma_chan)) {
+ /* fallback to request using platform data */
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
- "nand-mlc");
- if (!host->dma_chan) {
- dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
- return -EBUSY;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, "nand-mlc");
+
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
}
/*
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 1c5fa855b9f2..ade971e4cc3b 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -721,18 +721,22 @@ static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
dma_cap_mask_t mask;
- if (!host->pdata || !host->pdata->dma_filter) {
- dev_err(mtd->dev.parent, "no DMA platform data\n");
- return -ENOENT;
- }
+ host->dma_chan = dma_request_chan(mtd->dev.parent, "rx-tx");
+ if (IS_ERR(host->dma_chan)) {
+ /* fallback to request using platform data */
+ if (!host->pdata || !host->pdata->dma_filter) {
+ dev_err(mtd->dev.parent, "no DMA platform data\n");
+ return -ENOENT;
+ }
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
- "nand-slc");
- if (!host->dma_chan) {
- dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
- return -EBUSY;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter, "nand-slc");
+
+ if (!host->dma_chan) {
+ dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+ return -EBUSY;
+ }
}
return 0;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 2a96a87cf79c..9eb5470344d0 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -35,6 +35,7 @@
#define NFC_CMD_RB BIT(20)
#define NFC_CMD_SCRAMBLER_ENABLE BIT(19)
#define NFC_CMD_SCRAMBLER_DISABLE 0
+#define NFC_CMD_SHORTMODE_ENABLE 1
#define NFC_CMD_SHORTMODE_DISABLE 0
#define NFC_CMD_RB_INT BIT(14)
#define NFC_CMD_RB_INT_NO_PIN ((0xb << 10) | BIT(18) | BIT(16))
@@ -78,6 +79,8 @@
#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
#define DMA_ADDR_ALIGN 8
+#define NFC_SHORT_MODE_ECC_SZ 384
+
#define ECC_CHECK_RETURN_FF (-1)
#define NAND_CE0 (0xe << 10)
@@ -125,6 +128,8 @@ struct meson_nfc_nand_chip {
u32 twb;
u32 tadl;
u32 tbers_max;
+ u32 boot_pages;
+ u32 boot_page_step;
u32 bch_mode;
u8 *data_buf;
@@ -298,28 +303,49 @@ static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed)
nfc->reg_base + NFC_REG_CMD);
}
-static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
- int scrambler)
+static int meson_nfc_is_boot_page(struct nand_chip *nand, int page)
+{
+ const struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
+
+ return (nand->options & NAND_IS_BOOT_MEDIUM) &&
+ !(page % meson_chip->boot_page_step) &&
+ (page < meson_chip->boot_pages);
+}
+
+static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir, int page)
{
+ const struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
- struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
- u32 bch = meson_chip->bch_mode, cmd;
int len = mtd->writesize, pagesize, pages;
+ int scrambler;
+ u32 cmd;
- pagesize = nand->ecc.size;
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ scrambler = NFC_CMD_SCRAMBLER_ENABLE;
+ else
+ scrambler = NFC_CMD_SCRAMBLER_DISABLE;
if (raw) {
len = mtd->writesize + mtd->oobsize;
cmd = len | scrambler | DMA_DIR(dir);
- writel(cmd, nfc->reg_base + NFC_REG_CMD);
- return;
- }
+ } else if (meson_nfc_is_boot_page(nand, page)) {
+ pagesize = NFC_SHORT_MODE_ECC_SZ >> 3;
+ pages = mtd->writesize / 512;
+
+ scrambler = NFC_CMD_SCRAMBLER_ENABLE;
+ cmd = CMDRWGEN(DMA_DIR(dir), scrambler, NFC_ECC_BCH8_1K,
+ NFC_CMD_SHORTMODE_ENABLE, pagesize, pages);
+ } else {
+ pagesize = nand->ecc.size >> 3;
+ pages = len / nand->ecc.size;
- pages = len / nand->ecc.size;
+ cmd = CMDRWGEN(DMA_DIR(dir), scrambler, meson_chip->bch_mode,
+ NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
+ }
- cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch,
- NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
+ if (scrambler == NFC_CMD_SCRAMBLER_ENABLE)
+ meson_nfc_cmd_seed(nfc, page);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
}
@@ -743,14 +769,7 @@ static int meson_nfc_write_page_sub(struct nand_chip *nand,
if (ret)
return ret;
- if (nand->options & NAND_NEED_SCRAMBLING) {
- meson_nfc_cmd_seed(nfc, page);
- meson_nfc_cmd_access(nand, raw, DIRWRITE,
- NFC_CMD_SCRAMBLER_ENABLE);
- } else {
- meson_nfc_cmd_access(nand, raw, DIRWRITE,
- NFC_CMD_SCRAMBLER_DISABLE);
- }
+ meson_nfc_cmd_access(nand, raw, DIRWRITE, page);
cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
@@ -829,14 +848,7 @@ static int meson_nfc_read_page_sub(struct nand_chip *nand,
if (ret)
return ret;
- if (nand->options & NAND_NEED_SCRAMBLING) {
- meson_nfc_cmd_seed(nfc, page);
- meson_nfc_cmd_access(nand, raw, DIRREAD,
- NFC_CMD_SCRAMBLER_ENABLE);
- } else {
- meson_nfc_cmd_access(nand, raw, DIRREAD,
- NFC_CMD_SCRAMBLER_DISABLE);
- }
+ meson_nfc_cmd_access(nand, raw, DIRREAD, page);
ret = meson_nfc_wait_dma_finish(nfc);
meson_nfc_check_ecc_pages_valid(nfc, nand, raw);
@@ -1431,6 +1443,26 @@ meson_nfc_nand_chip_init(struct device *dev,
if (ret)
return ret;
+ if (nand->options & NAND_IS_BOOT_MEDIUM) {
+ ret = of_property_read_u32(np, "amlogic,boot-pages",
+ &meson_chip->boot_pages);
+ if (ret) {
+ dev_err(dev, "could not retrieve 'amlogic,boot-pages' property: %d",
+ ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "amlogic,boot-page-step",
+ &meson_chip->boot_page_step);
+ if (ret) {
+ dev_err(dev, "could not retrieve 'amlogic,boot-page-step' property: %d",
+ ret);
+ nand_cleanup(nand);
+ return ret;
+ }
+ }
+
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register MTD device: %d\n", ret);
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 003008355b3c..736808150e74 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/completion.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
#define DRIVER_NAME "mxc_nand"
@@ -47,6 +48,8 @@
#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
+#define NFC_V1_V2_ECC_STATUS_RESULT_ERM GENMASK(3, 2)
+
#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2)
#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
@@ -123,8 +126,7 @@ struct mxc_nand_host;
struct mxc_nand_devtype_data {
void (*preset)(struct mtd_info *);
- int (*read_page)(struct nand_chip *chip, void *buf, void *oob, bool ecc,
- int page);
+ int (*read_page)(struct nand_chip *chip);
void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
void (*send_page)(struct mtd_info *, unsigned int);
@@ -132,7 +134,7 @@ struct mxc_nand_devtype_data {
uint16_t (*get_dev_status)(struct mxc_nand_host *);
int (*check_int)(struct mxc_nand_host *);
void (*irq_control)(struct mxc_nand_host *, int);
- u32 (*get_ecc_status)(struct mxc_nand_host *);
+ u32 (*get_ecc_status)(struct nand_chip *);
const struct mtd_ooblayout_ops *ooblayout;
void (*select_chip)(struct nand_chip *chip, int cs);
int (*setup_interface)(struct nand_chip *chip, int csline,
@@ -175,11 +177,11 @@ struct mxc_nand_host {
int eccsize;
int used_oobsize;
int active_cs;
+ unsigned int ecc_stats_v1;
struct completion op_completion;
- uint8_t *data_buf;
- unsigned int buf_start;
+ void *data_buf;
const struct mxc_nand_devtype_data *devtype_data;
};
@@ -281,63 +283,6 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom, void *buf)
}
}
-/*
- * MXC NANDFC can only perform full page+spare or spare-only read/write. When
- * the upper layers perform a read/write buf operation, the saved column address
- * is used to index into the full page. So usually this function is called with
- * column == 0 (unless no column cycle is needed indicated by column == -1)
- */
-static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
-{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
- /* Write out column address, if necessary */
- if (column != -1) {
- host->devtype_data->send_addr(host, column & 0xff,
- page_addr == -1);
- if (mtd->writesize > 512)
- /* another col addr cycle for 2k page */
- host->devtype_data->send_addr(host,
- (column >> 8) & 0xff,
- false);
- }
-
- /* Write out page address, if necessary */
- if (page_addr != -1) {
- /* paddr_0 - p_addr_7 */
- host->devtype_data->send_addr(host, (page_addr & 0xff), false);
-
- if (mtd->writesize > 512) {
- if (mtd->size >= 0x10000000) {
- /* paddr_8 - paddr_15 */
- host->devtype_data->send_addr(host,
- (page_addr >> 8) & 0xff,
- false);
- host->devtype_data->send_addr(host,
- (page_addr >> 16) & 0xff,
- true);
- } else
- /* paddr_8 - paddr_15 */
- host->devtype_data->send_addr(host,
- (page_addr >> 8) & 0xff, true);
- } else {
- if (nand_chip->options & NAND_ROW_ADDR_3) {
- /* paddr_8 - paddr_15 */
- host->devtype_data->send_addr(host,
- (page_addr >> 8) & 0xff,
- false);
- host->devtype_data->send_addr(host,
- (page_addr >> 16) & 0xff,
- true);
- } else
- /* paddr_8 - paddr_15 */
- host->devtype_data->send_addr(host,
- (page_addr >> 8) & 0xff, true);
- }
- }
-}
-
static int check_int_v3(struct mxc_nand_host *host)
{
uint32_t tmp;
@@ -406,19 +351,81 @@ static void irq_control(struct mxc_nand_host *host, int activate)
}
}
-static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+static u32 get_ecc_status_v1(struct nand_chip *chip)
{
- return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ unsigned int ecc_stats, max_bitflips = 0;
+ int no_subpages, i;
+
+ no_subpages = mtd->writesize >> 9;
+
+ ecc_stats = host->ecc_stats_v1;
+
+ for (i = 0; i < no_subpages; i++) {
+ switch (ecc_stats & 0x3) {
+ case 0:
+ default:
+ break;
+ case 1:
+ mtd->ecc_stats.corrected++;
+ max_bitflips = 1;
+ break;
+ case 2:
+ mtd->ecc_stats.failed++;
+ break;
+ }
+
+ ecc_stats >>= 2;
+ }
+
+ return max_bitflips;
}
-static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+static u32 get_ecc_status_v2_v3(struct nand_chip *chip, unsigned int ecc_stat)
{
- return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ u8 ecc_bit_mask, err_limit;
+ unsigned int max_bitflips = 0;
+ int no_subpages, err;
+
+ ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
+ err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
+
+ no_subpages = mtd->writesize >> 9;
+
+ do {
+ err = ecc_stat & ecc_bit_mask;
+ if (err > err_limit) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += err;
+ max_bitflips = max_t(unsigned int, max_bitflips, err);
+ }
+
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+ return max_bitflips;
}
-static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+static u32 get_ecc_status_v2(struct nand_chip *chip)
{
- return readl(NFC_V3_ECC_STATUS_RESULT);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ u32 ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
+
+ return get_ecc_status_v2_v3(chip, ecc_stat);
+}
+
+static u32 get_ecc_status_v3(struct nand_chip *chip)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ u32 ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+
+ return get_ecc_status_v2_v3(chip, ecc_stat);
}
static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
@@ -450,14 +457,14 @@ static int wait_op_done(struct mxc_nand_host *host, int useirq)
return 0;
if (useirq) {
- unsigned long timeout;
+ unsigned long time_left;
reinit_completion(&host->op_completion);
irq_control(host, 1);
- timeout = wait_for_completion_timeout(&host->op_completion, HZ);
- if (!timeout && !host->devtype_data->check_int(host)) {
+ time_left = wait_for_completion_timeout(&host->op_completion, HZ);
+ if (!time_left && !host->devtype_data->check_int(host)) {
dev_dbg(host->dev, "timeout waiting for irq\n");
ret = -ETIMEDOUT;
}
@@ -697,38 +704,21 @@ static void mxc_nand_enable_hwecc_v3(struct nand_chip *chip, bool enable)
writel(config2, NFC_V3_CONFIG2);
}
-/* This functions is used by upper layer to checks if device is ready */
-static int mxc_nand_dev_ready(struct nand_chip *chip)
-{
- /*
- * NFC handles R/B internally. Therefore, this function
- * always returns status as ready.
- */
- return 1;
-}
-
-static int mxc_nand_read_page_v1(struct nand_chip *chip, void *buf, void *oob,
- bool ecc, int page)
+static int mxc_nand_read_page_v1(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
- unsigned int bitflips_corrected = 0;
int no_subpages;
int i;
+ unsigned int ecc_stats = 0;
- host->devtype_data->enable_hwecc(chip, ecc);
-
- host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
- mxc_do_addr_cycle(mtd, 0, page);
-
- if (mtd->writesize > 512)
- host->devtype_data->send_cmd(host, NAND_CMD_READSTART, true);
-
- no_subpages = mtd->writesize >> 9;
+ if (mtd->writesize)
+ no_subpages = mtd->writesize >> 9;
+ else
+ /* READ PARAMETER PAGE is called when mtd->writesize is not yet set */
+ no_subpages = 1;
for (i = 0; i < no_subpages; i++) {
- uint16_t ecc_stats;
-
/* NANDFC buffer 0 is used for page read/write */
writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
@@ -737,135 +727,74 @@ static int mxc_nand_read_page_v1(struct nand_chip *chip, void *buf, void *oob,
/* Wait for operation to complete */
wait_op_done(host, true);
- ecc_stats = get_ecc_status_v1(host);
-
- ecc_stats >>= 2;
-
- if (buf && ecc) {
- switch (ecc_stats & 0x3) {
- case 0:
- default:
- break;
- case 1:
- mtd->ecc_stats.corrected++;
- bitflips_corrected = 1;
- break;
- case 2:
- mtd->ecc_stats.failed++;
- break;
- }
- }
+ ecc_stats |= FIELD_GET(NFC_V1_V2_ECC_STATUS_RESULT_ERM,
+ readw(NFC_V1_V2_ECC_STATUS_RESULT)) << i * 2;
}
- if (buf)
- memcpy32_fromio(buf, host->main_area0, mtd->writesize);
- if (oob)
- copy_spare(mtd, true, oob);
+ host->ecc_stats_v1 = ecc_stats;
- return bitflips_corrected;
+ return 0;
}
-static int mxc_nand_read_page_v2_v3(struct nand_chip *chip, void *buf,
- void *oob, bool ecc, int page)
+static int mxc_nand_read_page_v2_v3(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
- unsigned int max_bitflips = 0;
- u32 ecc_stat, err;
- int no_subpages;
- u8 ecc_bit_mask, err_limit;
-
- host->devtype_data->enable_hwecc(chip, ecc);
-
- host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
- mxc_do_addr_cycle(mtd, 0, page);
-
- if (mtd->writesize > 512)
- host->devtype_data->send_cmd(host,
- NAND_CMD_READSTART, true);
host->devtype_data->send_page(mtd, NFC_OUTPUT);
- if (buf)
- memcpy32_fromio(buf, host->main_area0, mtd->writesize);
- if (oob)
- copy_spare(mtd, true, oob);
-
- ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
- err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
-
- no_subpages = mtd->writesize >> 9;
-
- ecc_stat = host->devtype_data->get_ecc_status(host);
-
- do {
- err = ecc_stat & ecc_bit_mask;
- if (err > err_limit) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += err;
- max_bitflips = max_t(unsigned int, max_bitflips, err);
- }
-
- ecc_stat >>= 4;
- } while (--no_subpages);
-
- return max_bitflips;
+ return 0;
}
static int mxc_nand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
- void *oob_buf;
+ int ret;
+
+ host->devtype_data->enable_hwecc(chip, true);
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+
+ host->devtype_data->enable_hwecc(chip, false);
+
+ if (ret)
+ return ret;
if (oob_required)
- oob_buf = chip->oob_poi;
- else
- oob_buf = NULL;
+ copy_spare(mtd, true, chip->oob_poi);
- return host->devtype_data->read_page(chip, buf, oob_buf, 1, page);
+ return host->devtype_data->get_ecc_status(chip);
}
static int mxc_nand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
- struct mxc_nand_host *host = nand_get_controller_data(chip);
- void *oob_buf;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
if (oob_required)
- oob_buf = chip->oob_poi;
- else
- oob_buf = NULL;
+ copy_spare(mtd, true, chip->oob_poi);
- return host->devtype_data->read_page(chip, buf, oob_buf, 0, page);
+ return 0;
}
static int mxc_nand_read_oob(struct nand_chip *chip, int page)
{
- struct mxc_nand_host *host = nand_get_controller_data(chip);
-
- return host->devtype_data->read_page(chip, NULL, chip->oob_poi, 0,
- page);
-}
-
-static int mxc_nand_write_page(struct nand_chip *chip, const uint8_t *buf,
- bool ecc, int page)
-{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int ret;
- host->devtype_data->enable_hwecc(chip, ecc);
-
- host->devtype_data->send_cmd(host, NAND_CMD_SEQIN, false);
- mxc_do_addr_cycle(mtd, 0, page);
-
- memcpy32_toio(host->main_area0, buf, mtd->writesize);
- copy_spare(mtd, false, chip->oob_poi);
+ ret = nand_read_page_op(chip, page, 0, host->data_buf, mtd->writesize);
+ if (ret)
+ return ret;
- host->devtype_data->send_page(mtd, NFC_INPUT);
- host->devtype_data->send_cmd(host, NAND_CMD_PAGEPROG, true);
- mxc_do_addr_cycle(mtd, 0, page);
+ copy_spare(mtd, true, chip->oob_poi);
return 0;
}
@@ -873,83 +802,40 @@ static int mxc_nand_write_page(struct nand_chip *chip, const uint8_t *buf,
static int mxc_nand_write_page_ecc(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
- return mxc_nand_write_page(chip, buf, true, page);
-}
-
-static int mxc_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
- int oob_required, int page)
-{
- return mxc_nand_write_page(chip, buf, false, page);
-}
-
-static int mxc_nand_write_oob(struct nand_chip *chip, int page)
-{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int ret;
- memset(host->data_buf, 0xff, mtd->writesize);
-
- return mxc_nand_write_page(chip, host->data_buf, false, page);
-}
-
-static u_char mxc_nand_read_byte(struct nand_chip *nand_chip)
-{
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- uint8_t ret;
+ copy_spare(mtd, false, chip->oob_poi);
- /* Check for status request */
- if (host->status_request)
- return host->devtype_data->get_dev_status(host) & 0xFF;
+ host->devtype_data->enable_hwecc(chip, true);
- if (nand_chip->options & NAND_BUSWIDTH_16) {
- /* only take the lower byte of each word */
- ret = *(uint16_t *)(host->data_buf + host->buf_start);
+ ret = nand_prog_page_op(chip, page, 0, buf, mtd->writesize);
- host->buf_start += 2;
- } else {
- ret = *(uint8_t *)(host->data_buf + host->buf_start);
- host->buf_start++;
- }
+ host->devtype_data->enable_hwecc(chip, false);
- dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
-/* Write data of length len to buffer buf. The data to be
- * written on NAND Flash is first copied to RAMbuffer. After the Data Input
- * Operation by the NFC, the data is written to NAND Flash */
-static void mxc_nand_write_buf(struct nand_chip *nand_chip, const u_char *buf,
- int len)
+static int mxc_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
- struct mtd_info *mtd = nand_to_mtd(nand_chip);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- u16 col = host->buf_start;
- int n = mtd->oobsize + mtd->writesize - col;
-
- n = min(n, len);
+ struct mtd_info *mtd = nand_to_mtd(chip);
- memcpy(host->data_buf + col, buf, n);
+ copy_spare(mtd, false, chip->oob_poi);
- host->buf_start += n;
+ return nand_prog_page_op(chip, page, 0, buf, mtd->writesize);
}
-/* Read the data buffer from the NAND Flash. To read the data from NAND
- * Flash first the data output cycle is initiated by the NFC, which copies
- * the data to RAMbuffer. This data of length len is then copied to buffer buf.
- */
-static void mxc_nand_read_buf(struct nand_chip *nand_chip, u_char *buf,
- int len)
+static int mxc_nand_write_oob(struct nand_chip *chip, int page)
{
- struct mtd_info *mtd = nand_to_mtd(nand_chip);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- u16 col = host->buf_start;
- int n = mtd->oobsize + mtd->writesize - col;
-
- n = min(n, len);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
- memcpy(buf, host->data_buf + col, n);
+ memset(host->data_buf, 0xff, mtd->writesize);
+ copy_spare(mtd, false, chip->oob_poi);
- host->buf_start += n;
+ return nand_prog_page_op(chip, page, 0, host->data_buf, mtd->writesize);
}
/* This function is used by upper layer for select and
@@ -1328,107 +1214,6 @@ static void preset_v3(struct mtd_info *mtd)
writel(0, NFC_V3_DELAY_LINE);
}
-/* Used by the upper layer to write command to NAND Flash for
- * different operations to be carried out on NAND Flash */
-static void mxc_nand_command(struct nand_chip *nand_chip, unsigned command,
- int column, int page_addr)
-{
- struct mtd_info *mtd = nand_to_mtd(nand_chip);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
- dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
- command, column, page_addr);
-
- /* Reset command state information */
- host->status_request = false;
-
- /* Command pre-processing step */
- switch (command) {
- case NAND_CMD_RESET:
- host->devtype_data->preset(mtd);
- host->devtype_data->send_cmd(host, command, false);
- break;
-
- case NAND_CMD_STATUS:
- host->buf_start = 0;
- host->status_request = true;
-
- host->devtype_data->send_cmd(host, command, true);
- WARN_ONCE(column != -1 || page_addr != -1,
- "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
- command, column, page_addr);
- mxc_do_addr_cycle(mtd, column, page_addr);
- break;
-
- case NAND_CMD_READID:
- host->devtype_data->send_cmd(host, command, true);
- mxc_do_addr_cycle(mtd, column, page_addr);
- host->devtype_data->send_read_id(host);
- host->buf_start = 0;
- break;
-
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- host->devtype_data->send_cmd(host, command, false);
- WARN_ONCE(column != -1,
- "Unexpected column value (cmd=%u, col=%d)\n",
- command, column);
- mxc_do_addr_cycle(mtd, column, page_addr);
-
- break;
- case NAND_CMD_PARAM:
- host->devtype_data->send_cmd(host, command, false);
- mxc_do_addr_cycle(mtd, column, page_addr);
- host->devtype_data->send_page(mtd, NFC_OUTPUT);
- memcpy32_fromio(host->data_buf, host->main_area0, 512);
- host->buf_start = 0;
- break;
- default:
- WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
- command);
- break;
- }
-}
-
-static int mxc_nand_set_features(struct nand_chip *chip, int addr,
- u8 *subfeature_param)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct mxc_nand_host *host = nand_get_controller_data(chip);
- int i;
-
- host->buf_start = 0;
-
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->legacy.write_byte(chip, subfeature_param[i]);
-
- memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
- host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
- mxc_do_addr_cycle(mtd, addr, -1);
- host->devtype_data->send_page(mtd, NFC_INPUT);
-
- return 0;
-}
-
-static int mxc_nand_get_features(struct nand_chip *chip, int addr,
- u8 *subfeature_param)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct mxc_nand_host *host = nand_get_controller_data(chip);
- int i;
-
- host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
- mxc_do_addr_cycle(mtd, addr, -1);
- host->devtype_data->send_page(mtd, NFC_OUTPUT);
- memcpy32_fromio(host->data_buf, host->main_area0, 512);
- host->buf_start = 0;
-
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- *subfeature_param++ = chip->legacy.read_byte(chip);
-
- return 0;
-}
-
/*
* The generic flash bbt descriptors overlap with our ecc
* hardware, so define some i.MX specific ones.
@@ -1617,10 +1402,10 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = host->devtype_data->eccbytes;
host->eccsize = host->devtype_data->eccsize;
chip->ecc.size = 512;
- mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
chip->ecc.read_page = mxc_nand_read_page;
chip->ecc.read_page_raw = mxc_nand_read_page_raw;
chip->ecc.read_oob = mxc_nand_read_oob;
@@ -1630,6 +1415,8 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
break;
default:
@@ -1685,9 +1472,217 @@ static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr,
return host->devtype_data->setup_interface(chip, chipnr, conf);
}
+static void memff16_toio(void *buf, int n)
+{
+ __iomem u16 *t = buf;
+ int i;
+
+ for (i = 0; i < (n >> 1); i++)
+ __raw_writew(0xffff, t++);
+}
+
+static void copy_page_to_sram(struct mtd_info *mtd, const void *buf, int buf_len)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(this);
+ unsigned int no_subpages = mtd->writesize / 512;
+ int oob_per_subpage, i;
+
+ oob_per_subpage = (mtd->oobsize / no_subpages) & ~1;
+
+ /*
+ * During a page write the i.MX NAND controller will read 512b from
+ * main_area0 SRAM, then oob_per_subpage bytes from spare0 SRAM, then
+ * 512b from main_area1 SRAM and so on until the full page is written.
+ * For software ECC we want to have a 1:1 mapping between the raw page
+ * data on the NAND chip and the view of the NAND core. This is
+ * necessary to make the NAND_CMD_RNDOUT read the data it expects.
+ * To accomplish this we have to write the data in the order the controller
+ * reads it. This is reversed in copy_page_from_sram() below.
+ *
+ * buf_len can either be the full page including the OOB or user data only.
+ * When it's user data only make sure that we fill up the rest of the
+ * SRAM with 0xff.
+ */
+ for (i = 0; i < no_subpages; i++) {
+ int now = min(buf_len, 512);
+
+ if (now)
+ memcpy16_toio(host->main_area0 + i * 512, buf, now);
+
+ if (now < 512)
+ memff16_toio(host->main_area0 + i * 512 + now, 512 - now);
+
+ buf += 512;
+ buf_len -= now;
+
+ now = min(buf_len, oob_per_subpage);
+ if (now)
+ memcpy16_toio(host->spare0 + i * host->devtype_data->spare_len,
+ buf, now);
+
+ if (now < oob_per_subpage)
+ memff16_toio(host->spare0 + i * host->devtype_data->spare_len + now,
+ oob_per_subpage - now);
+
+ buf += oob_per_subpage;
+ buf_len -= now;
+ }
+}
+
+static void copy_page_from_sram(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(this);
+ void *buf = host->data_buf;
+ unsigned int no_subpages = mtd->writesize / 512;
+ int oob_per_subpage, i;
+
+ /* mtd->writesize is not set during ident scanning */
+ if (!no_subpages)
+ no_subpages = 1;
+
+ oob_per_subpage = (mtd->oobsize / no_subpages) & ~1;
+
+ for (i = 0; i < no_subpages; i++) {
+ memcpy16_fromio(buf, host->main_area0 + i * 512, 512);
+ buf += 512;
+
+ memcpy16_fromio(buf, host->spare0 + i * host->devtype_data->spare_len,
+ oob_per_subpage);
+ buf += oob_per_subpage;
+ }
+}
+
+static int mxcnd_do_exec_op(struct nand_chip *chip,
+ const struct nand_subop *op)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, j, buf_len;
+ void *buf_read = NULL;
+ const void *buf_write = NULL;
+ const struct nand_op_instr *instr;
+ bool readid = false;
+ bool statusreq = false;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_WAITRDY_INSTR:
+ /* NFC handles R/B internally, nothing to do here */
+ break;
+ case NAND_OP_CMD_INSTR:
+ host->devtype_data->send_cmd(host, instr->ctx.cmd.opcode, true);
+
+ if (instr->ctx.cmd.opcode == NAND_CMD_READID)
+ readid = true;
+ if (instr->ctx.cmd.opcode == NAND_CMD_STATUS)
+ statusreq = true;
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ for (j = 0; j < instr->ctx.addr.naddrs; j++) {
+ bool islast = j == instr->ctx.addr.naddrs - 1;
+ host->devtype_data->send_addr(host, instr->ctx.addr.addrs[j], islast);
+ }
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ buf_write = instr->ctx.data.buf.out;
+ buf_len = instr->ctx.data.len;
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
+ memcpy32_toio(host->main_area0, buf_write, buf_len);
+ else
+ copy_page_to_sram(mtd, buf_write, buf_len);
+
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+
+ buf_read = instr->ctx.data.buf.in;
+ buf_len = instr->ctx.data.len;
+
+ if (readid) {
+ host->devtype_data->send_read_id(host);
+ readid = false;
+
+ memcpy32_fromio(host->data_buf, host->main_area0, buf_len * 2);
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ u8 *bufr = buf_read;
+ u16 *bufw = host->data_buf;
+ for (j = 0; j < buf_len; j++)
+ bufr[j] = bufw[j];
+ } else {
+ memcpy(buf_read, host->data_buf, buf_len);
+ }
+ break;
+ }
+
+ if (statusreq) {
+ *(u8*)buf_read = host->devtype_data->get_dev_status(host);
+ statusreq = false;
+ break;
+ }
+
+ host->devtype_data->read_page(chip);
+
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ if (IS_ALIGNED(buf_len, 4)) {
+ memcpy32_fromio(buf_read, host->main_area0, buf_len);
+ } else {
+ memcpy32_fromio(host->data_buf, host->main_area0, mtd->writesize);
+ memcpy(buf_read, host->data_buf, buf_len);
+ }
+ } else {
+ copy_page_from_sram(mtd);
+ memcpy(buf_read, host->data_buf, buf_len);
+ }
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+#define MAX_DATA_SIZE (4096 + 512)
+
+static const struct nand_op_parser mxcnd_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(mxcnd_do_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, MAX_DATA_SIZE)),
+ NAND_OP_PARSER_PATTERN(mxcnd_do_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(mxcnd_do_exec_op,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int mxcnd_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &mxcnd_op_parser,
+ op, check_only);
+}
+
static const struct nand_controller_ops mxcnd_controller_ops = {
.attach_chip = mxcnd_attach_chip,
.setup_interface = mxcnd_setup_interface,
+ .exec_op = mxcnd_exec_op,
};
static int mxcnd_probe(struct platform_device *pdev)
@@ -1720,13 +1715,6 @@ static int mxcnd_probe(struct platform_device *pdev)
nand_set_controller_data(this, host);
nand_set_flash_node(this, pdev->dev.of_node);
- this->legacy.dev_ready = mxc_nand_dev_ready;
- this->legacy.cmdfunc = mxc_nand_command;
- this->legacy.read_byte = mxc_nand_read_byte;
- this->legacy.write_buf = mxc_nand_write_buf;
- this->legacy.read_buf = mxc_nand_read_buf;
- this->legacy.set_features = mxc_nand_set_features;
- this->legacy.get_features = mxc_nand_get_features;
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk))
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 3dfc7e1e5241..3f9e9c572854 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -121,7 +121,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26, 0x03),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -131,7 +131,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF4GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -141,7 +141,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF1G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -150,7 +150,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -158,8 +158,17 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF2G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF4G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -167,6 +176,15 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX35LF4G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -199,7 +217,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF4G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -208,8 +226,18 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF4G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF4GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -229,7 +257,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF2G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -238,8 +266,18 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35UF2G24AD-Z4I8",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF2GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -249,7 +287,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF2GE4AC",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -269,7 +307,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF1G24AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -279,7 +317,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF1GE4AD",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -289,7 +327,7 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF1GE4AC",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
diff --git a/drivers/mtd/parsers/brcm_u-boot.c b/drivers/mtd/parsers/brcm_u-boot.c
index 7c338dc7b8f3..984f98923446 100644
--- a/drivers/mtd/parsers/brcm_u-boot.c
+++ b/drivers/mtd/parsers/brcm_u-boot.c
@@ -81,4 +81,5 @@ static struct mtd_part_parser brcm_u_boot_mtd_parser = {
};
module_mtd_part_parser(brcm_u_boot_mtd_parser);
+MODULE_DESCRIPTION("Broadcom's U-Boot partition parser");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
index b34856def816..504e5fa2b45b 100644
--- a/drivers/mtd/parsers/cmdlinepart.c
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -44,14 +44,6 @@
#include <linux/module.h>
#include <linux/err.h>
-/* debug macro */
-#if 0
-#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
-#else
-#define dbg(x)
-#endif
-
-
/* special size referring to all the remaining space in a partition */
#define SIZE_REMAINING ULLONG_MAX
#define OFFSET_CONTINUOUS ULLONG_MAX
@@ -199,9 +191,9 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
- dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
+ pr_debug("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part, parts[this_part].name, parts[this_part].offset,
- parts[this_part].size, parts[this_part].mask_flags));
+ parts[this_part].size, parts[this_part].mask_flags);
/* return (updated) pointer to extra_mem memory */
if (extra_mem_ptr)
@@ -267,7 +259,7 @@ static int mtdpart_setup_real(char *s)
}
mtd_id_len = p - mtd_id;
- dbg(("parsing <%s>\n", p+1));
+ pr_debug("parsing <%s>\n", p+1);
/*
* parse one mtd. have it reserve memory for the
@@ -304,8 +296,8 @@ static int mtdpart_setup_real(char *s)
this_mtd->next = partitions;
partitions = this_mtd;
- dbg(("mtdid=<%s> num_parts=<%d>\n",
- this_mtd->mtd_id, this_mtd->num_parts));
+ pr_debug("mtdid=<%s> num_parts=<%d>\n",
+ this_mtd->mtd_id, this_mtd->num_parts);
/* EOS - we're done */
diff --git a/drivers/mtd/parsers/tplink_safeloader.c b/drivers/mtd/parsers/tplink_safeloader.c
index 1c689dafca2a..e358a029dc70 100644
--- a/drivers/mtd/parsers/tplink_safeloader.c
+++ b/drivers/mtd/parsers/tplink_safeloader.c
@@ -149,4 +149,5 @@ static struct mtd_part_parser mtd_parser_tplink_safeloader = {
};
module_mtd_part_parser(mtd_parser_tplink_safeloader);
+MODULE_DESCRIPTION("TP-Link Safeloader partitions parser");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 5e68468b72fc..5dd9c35f6b6f 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -13,7 +13,6 @@ spi-nor-objs += micron-st.o
spi-nor-objs += spansion.o
spi-nor-objs += sst.o
spi-nor-objs += winbond.o
-spi-nor-objs += xilinx.o
spi-nor-objs += xmc.o
spi-nor-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 028514c6996f..e0c4efc424f4 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1463,14 +1463,6 @@ static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size
spi_nor_unprep(nor);
}
-static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
-{
- if (!nor->params->convert_addr)
- return addr;
-
- return nor->params->convert_addr(nor, addr);
-}
-
/*
* Initiate the erasure of a single sector
*/
@@ -1478,8 +1470,6 @@ int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
{
int i;
- addr = spi_nor_convert_addr(nor, addr);
-
if (nor->spimem) {
struct spi_mem_op op =
SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
@@ -1986,7 +1976,6 @@ static const struct spi_nor_manufacturer *manufacturers[] = {
&spi_nor_spansion,
&spi_nor_sst,
&spi_nor_winbond,
- &spi_nor_xilinx,
&spi_nor_xmc,
};
@@ -2065,8 +2054,6 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
while (len) {
loff_t addr = from;
- addr = spi_nor_convert_addr(nor, addr);
-
ret = spi_nor_read_data(nor, addr, len, buf);
if (ret == 0) {
/* We shouldn't see 0-length reads */
@@ -2099,7 +2086,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- size_t page_offset, page_remain, i;
+ size_t i;
ssize_t ret;
u32 page_size = nor->params->page_size;
@@ -2112,23 +2099,9 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
for (i = 0; i < len; ) {
ssize_t written;
loff_t addr = to + i;
-
- /*
- * If page_size is a power of two, the offset can be quickly
- * calculated with an AND operation. On the other cases we
- * need to do a modulus operation (more expensive).
- */
- if (is_power_of_2(page_size)) {
- page_offset = addr & (page_size - 1);
- } else {
- u64 aux = addr;
-
- page_offset = do_div(aux, page_size);
- }
+ size_t page_offset = addr & (page_size - 1);
/* the size of data remaining on the first page */
- page_remain = min_t(size_t, page_size - page_offset, len - i);
-
- addr = spi_nor_convert_addr(nor, addr);
+ size_t page_remain = min_t(size_t, page_size - page_offset, len - i);
ret = spi_nor_lock_device(nor);
if (ret)
@@ -2581,8 +2554,51 @@ static int spi_nor_select_erase(struct spi_nor *nor)
return 0;
}
-static int spi_nor_default_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
+static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
+{
+ if (nor->params->addr_nbytes) {
+ nor->addr_nbytes = nor->params->addr_nbytes;
+ } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
+ /*
+ * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
+ * in this protocol an odd addr_nbytes cannot be used because
+ * then the address phase would only span a cycle and a half.
+ * Half a cycle would be left over. We would then have to start
+ * the dummy phase in the middle of a cycle and so too the data
+ * phase, and we will end the transaction with half a cycle left
+ * over.
+ *
+ * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
+ * avoid this situation.
+ */
+ nor->addr_nbytes = 4;
+ } else if (nor->info->addr_nbytes) {
+ nor->addr_nbytes = nor->info->addr_nbytes;
+ } else {
+ nor->addr_nbytes = 3;
+ }
+
+ if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_nbytes = 4;
+ }
+
+ if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
+ dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
+ nor->addr_nbytes);
+ return -EINVAL;
+ }
+
+ /* Set 4byte opcodes when possible. */
+ if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor,
+ const struct spi_nor_hwcaps *hwcaps)
{
struct spi_nor_flash_parameter *params = nor->params;
u32 ignored_mask, shared_mask;
@@ -2639,64 +2655,6 @@ static int spi_nor_default_setup(struct spi_nor *nor,
return err;
}
- return 0;
-}
-
-static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
-{
- if (nor->params->addr_nbytes) {
- nor->addr_nbytes = nor->params->addr_nbytes;
- } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
- /*
- * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
- * in this protocol an odd addr_nbytes cannot be used because
- * then the address phase would only span a cycle and a half.
- * Half a cycle would be left over. We would then have to start
- * the dummy phase in the middle of a cycle and so too the data
- * phase, and we will end the transaction with half a cycle left
- * over.
- *
- * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
- * avoid this situation.
- */
- nor->addr_nbytes = 4;
- } else if (nor->info->addr_nbytes) {
- nor->addr_nbytes = nor->info->addr_nbytes;
- } else {
- nor->addr_nbytes = 3;
- }
-
- if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_nbytes = 4;
- }
-
- if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
- dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
- nor->addr_nbytes);
- return -EINVAL;
- }
-
- /* Set 4byte opcodes when possible. */
- if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
-
- return 0;
-}
-
-static int spi_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- int ret;
-
- if (nor->params->setup)
- ret = nor->params->setup(nor, hwcaps);
- else
- ret = spi_nor_default_setup(nor, hwcaps);
- if (ret)
- return ret;
-
return spi_nor_set_addr_nbytes(nor);
}
@@ -2965,15 +2923,10 @@ static void spi_nor_init_default_params(struct spi_nor *nor)
params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE;
params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS;
- if (!(info->flags & SPI_NOR_NO_FR)) {
- /* Default to Fast Read for DT and non-DT platform devices. */
+ /* Default to Fast Read for non-DT and enable it if requested by DT. */
+ if (!np || of_property_read_bool(np, "m25p,fast-read"))
params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
- /* Mask out Fast Read if not requested at DT instantiation. */
- if (np && !of_property_read_bool(np, "m25p,fast-read"))
- params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
- }
-
/* (Fast) Read settings. */
params->hwcaps.mask |= SNOR_HWCAPS_READ;
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
@@ -3055,7 +3008,14 @@ static int spi_nor_init_params(struct spi_nor *nor)
spi_nor_init_params_deprecated(nor);
}
- return spi_nor_late_init_params(nor);
+ ret = spi_nor_late_init_params(nor);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!is_power_of_2(nor->params->page_size)))
+ return -EINVAL;
+
+ return 0;
}
/** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
@@ -3338,32 +3298,28 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
if (name)
info = spi_nor_match_name(nor, name);
- /* Try to auto-detect if chip name wasn't specified or not found */
- if (!info)
- return spi_nor_detect(nor);
-
/*
- * If caller has specified name of flash model that can normally be
- * detected using JEDEC, let's verify it.
+ * Auto-detect if chip name wasn't specified or not found, or the chip
+ * has an ID. If the chip supposedly has an ID, we also do an
+ * auto-detection to compare it later.
*/
- if (name && info->id) {
+ if (!info || info->id) {
const struct flash_info *jinfo;
jinfo = spi_nor_detect(nor);
- if (IS_ERR(jinfo)) {
+ if (IS_ERR(jinfo))
return jinfo;
- } else if (jinfo != info) {
- /*
- * JEDEC knows better, so overwrite platform ID. We
- * can't trust partitions any longer, but we'll let
- * mtd apply them anyway, since some partitions may be
- * marked read-only, and we don't want to loose that
- * information, even if it's not 100% accurate.
- */
+
+ /*
+ * If caller has specified name of flash model that can normally
+ * be detected using JEDEC, let's verify it.
+ */
+ if (info && jinfo != info)
dev_warn(nor->dev, "found %s, expected %s\n",
jinfo->name, info->name);
- info = jinfo;
- }
+
+ /* If info was set before, JEDEC knows better. */
+ info = jinfo;
}
return info;
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 442786685515..1516b6d0dc37 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -366,13 +366,6 @@ struct spi_nor_otp {
* @set_octal_dtr: enables or disables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
- * @convert_addr: converts an absolute address into something the flash
- * will understand. Particularly useful when pagesize is
- * not a power-of-2.
- * @setup: (optional) configures the SPI NOR memory. Useful for
- * SPI NOR flashes that have peculiarities to the SPI NOR
- * standard e.g. different opcodes, specific address
- * calculation, page size, etc.
* @ready: (optional) flashes might use a different mechanism
* than reading the status register to indicate they
* are ready for a new command
@@ -403,8 +396,6 @@ struct spi_nor_flash_parameter {
int (*set_octal_dtr)(struct spi_nor *nor, bool enable);
int (*quad_enable)(struct spi_nor *nor);
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
- u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
- int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
int (*ready)(struct spi_nor *nor);
const struct spi_nor_locking_ops *locking_ops;
@@ -479,7 +470,6 @@ struct spi_nor_id {
* Usually these will power-up in a write-protected
* state.
* SPI_NOR_NO_ERASE: no erase command needed.
- * SPI_NOR_NO_FR: can't do fastread.
* SPI_NOR_QUAD_PP: flash supports Quad Input Page Program.
* SPI_NOR_RWW: flash supports reads while write.
*
@@ -528,7 +518,6 @@ struct flash_info {
#define SPI_NOR_BP3_SR_BIT6 BIT(4)
#define SPI_NOR_SWP_IS_VOLATILE BIT(5)
#define SPI_NOR_NO_ERASE BIT(6)
-#define SPI_NOR_NO_FR BIT(7)
#define SPI_NOR_QUAD_PP BIT(8)
#define SPI_NOR_RWW BIT(9)
@@ -603,7 +592,6 @@ extern const struct spi_nor_manufacturer spi_nor_st;
extern const struct spi_nor_manufacturer spi_nor_spansion;
extern const struct spi_nor_manufacturer spi_nor_sst;
extern const struct spi_nor_manufacturer spi_nor_winbond;
-extern const struct spi_nor_manufacturer spi_nor_xilinx;
extern const struct spi_nor_manufacturer spi_nor_xmc;
extern const struct attribute_group *spi_nor_sysfs_groups[];
diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
index 5f321e24ae7d..add37104d673 100644
--- a/drivers/mtd/spi-nor/everspin.c
+++ b/drivers/mtd/spi-nor/everspin.c
@@ -14,28 +14,39 @@ static const struct flash_info everspin_nor_parts[] = {
.size = SZ_16K,
.sector_size = SZ_16K,
.addr_nbytes = 2,
- .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+ .flags = SPI_NOR_NO_ERASE,
}, {
.name = "mr25h256",
.size = SZ_32K,
.sector_size = SZ_32K,
.addr_nbytes = 2,
- .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+ .flags = SPI_NOR_NO_ERASE,
}, {
.name = "mr25h10",
.size = SZ_128K,
.sector_size = SZ_128K,
- .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+ .flags = SPI_NOR_NO_ERASE,
}, {
.name = "mr25h40",
.size = SZ_512K,
.sector_size = SZ_512K,
- .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+ .flags = SPI_NOR_NO_ERASE,
}
};
+static void everspin_nor_default_init(struct spi_nor *nor)
+{
+ /* Everspin FRAMs don't support the fast read opcode. */
+ nor->params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+}
+
+static const struct spi_nor_fixups everspin_nor_fixups = {
+ .default_init = everspin_nor_default_init,
+};
+
const struct spi_nor_manufacturer spi_nor_everspin = {
.name = "everspin",
.parts = everspin_nor_parts,
.nparts = ARRAY_SIZE(everspin_nor_parts),
+ .fixups = &everspin_nor_fixups,
};
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 142fb27b2ea9..e065e4fd42a3 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -105,7 +105,9 @@ static const struct flash_info winbond_nor_parts[] = {
}, {
.id = SNOR_ID(0xef, 0x40, 0x18),
.name = "w25q128",
+ .size = SZ_16M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
.id = SNOR_ID(0xef, 0x40, 0x19),
.name = "w25q256",
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
deleted file mode 100644
index f99118c691b0..000000000000
--- a/drivers/mtd/spi-nor/xilinx.c
+++ /dev/null
@@ -1,169 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/mtd/spi-nor.h>
-
-#include "core.h"
-
-#define XILINX_OP_SE 0x50 /* Sector erase */
-#define XILINX_OP_PP 0x82 /* Page program */
-#define XILINX_OP_RDSR 0xd7 /* Read status register */
-
-#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
-#define XSR_RDY BIT(7) /* Ready */
-
-#define XILINX_RDSR_OP(buf) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(XILINX_OP_RDSR, 0), \
- SPI_MEM_OP_NO_ADDR, \
- SPI_MEM_OP_NO_DUMMY, \
- SPI_MEM_OP_DATA_IN(1, buf, 0))
-
-#define S3AN_FLASH(_id, _name, _n_sectors, _page_size) \
- .id = _id, \
- .name = _name, \
- .size = 8 * (_page_size) * (_n_sectors), \
- .sector_size = (8 * (_page_size)), \
- .page_size = (_page_size), \
- .flags = SPI_NOR_NO_FR
-
-/* Xilinx S3AN share MFR with Atmel SPI NOR */
-static const struct flash_info xilinx_nor_parts[] = {
- /* Xilinx S3AN Internal Flash */
- { S3AN_FLASH(SNOR_ID(0x1f, 0x22, 0x00), "3S50AN", 64, 264) },
- { S3AN_FLASH(SNOR_ID(0x1f, 0x24, 0x00), "3S200AN", 256, 264) },
- { S3AN_FLASH(SNOR_ID(0x1f, 0x24, 0x00), "3S400AN", 256, 264) },
- { S3AN_FLASH(SNOR_ID(0x1f, 0x25, 0x00), "3S700AN", 512, 264) },
- { S3AN_FLASH(SNOR_ID(0x1f, 0x26, 0x00), "3S1400AN", 512, 528) },
-};
-
-/*
- * This code converts an address to the Default Address Mode, that has non
- * power of two page sizes. We must support this mode because it is the default
- * mode supported by Xilinx tools, it can access the whole flash area and
- * changing over to the Power-of-two mode is irreversible and corrupts the
- * original data.
- * Addr can safely be unsigned int, the biggest S3AN device is smaller than
- * 4 MiB.
- */
-static u32 s3an_nor_convert_addr(struct spi_nor *nor, u32 addr)
-{
- u32 page_size = nor->params->page_size;
- u32 offset, page;
-
- offset = addr % page_size;
- page = addr / page_size;
- page <<= (page_size > 512) ? 10 : 9;
-
- return page | offset;
-}
-
-/**
- * xilinx_nor_read_sr() - Read the Status Register on S3AN flashes.
- * @nor: pointer to 'struct spi_nor'.
- * @sr: pointer to a DMA-able buffer where the value of the
- * Status Register will be written.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int xilinx_nor_read_sr(struct spi_nor *nor, u8 *sr)
-{
- int ret;
-
- if (nor->spimem) {
- struct spi_mem_op op = XILINX_RDSR_OP(sr);
-
- spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
-
- ret = spi_mem_exec_op(nor->spimem, &op);
- } else {
- ret = spi_nor_controller_ops_read_reg(nor, XILINX_OP_RDSR, sr,
- 1);
- }
-
- if (ret)
- dev_dbg(nor->dev, "error %d reading SR\n", ret);
-
- return ret;
-}
-
-/**
- * xilinx_nor_sr_ready() - Query the Status Register of the S3AN flash to see
- * if the flash is ready for new commands.
- * @nor: pointer to 'struct spi_nor'.
- *
- * Return: 1 if ready, 0 if not ready, -errno on errors.
- */
-static int xilinx_nor_sr_ready(struct spi_nor *nor)
-{
- int ret;
-
- ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- return !!(nor->bouncebuf[0] & XSR_RDY);
-}
-
-static int xilinx_nor_setup(struct spi_nor *nor,
- const struct spi_nor_hwcaps *hwcaps)
-{
- u32 page_size;
- int ret;
-
- ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
- if (ret)
- return ret;
-
- nor->erase_opcode = XILINX_OP_SE;
- nor->program_opcode = XILINX_OP_PP;
- nor->read_opcode = SPINOR_OP_READ;
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
- /*
- * This flashes have a page size of 264 or 528 bytes (known as
- * Default addressing mode). It can be changed to a more standard
- * Power of two mode where the page size is 256/512. This comes
- * with a price: there is 3% less of space, the data is corrupted
- * and the page size cannot be changed back to default addressing
- * mode.
- *
- * The current addressing mode can be read from the XRDSR register
- * and should not be changed, because is a destructive operation.
- */
- if (nor->bouncebuf[0] & XSR_PAGESIZE) {
- /* Flash in Power of 2 mode */
- page_size = (nor->params->page_size == 264) ? 256 : 512;
- nor->params->page_size = page_size;
- nor->mtd.writebufsize = page_size;
- nor->params->size = nor->info->size;
- nor->mtd.erasesize = 8 * page_size;
- } else {
- /* Flash in Default addressing mode */
- nor->params->convert_addr = s3an_nor_convert_addr;
- nor->mtd.erasesize = nor->info->sector_size;
- }
-
- return 0;
-}
-
-static int xilinx_nor_late_init(struct spi_nor *nor)
-{
- nor->params->setup = xilinx_nor_setup;
- nor->params->ready = xilinx_nor_sr_ready;
-
- return 0;
-}
-
-static const struct spi_nor_fixups xilinx_nor_fixups = {
- .late_init = xilinx_nor_late_init,
-};
-
-const struct spi_nor_manufacturer spi_nor_xilinx = {
- .name = "xilinx",
- .parts = xilinx_nor_parts,
- .nparts = ARRAY_SIZE(xilinx_nor_parts),
- .fixups = &xilinx_nor_fixups,
-};
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index 5de0378f90db..7dae831ee8b6 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -1,19 +1,19 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
-obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
+obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o mtd_test.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o mtd_test.o
-mtd_oobtest-objs := oobtest.o mtd_test.o
-mtd_pagetest-objs := pagetest.o mtd_test.o
-mtd_readtest-objs := readtest.o mtd_test.o
-mtd_speedtest-objs := speedtest.o mtd_test.o
-mtd_stresstest-objs := stresstest.o mtd_test.o
-mtd_subpagetest-objs := subpagetest.o mtd_test.o
-mtd_torturetest-objs := torturetest.o mtd_test.o
-mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
+mtd_oobtest-objs := oobtest.o
+mtd_pagetest-objs := pagetest.o
+mtd_readtest-objs := readtest.o
+mtd_speedtest-objs := speedtest.o
+mtd_stresstest-objs := stresstest.o
+mtd_subpagetest-objs := subpagetest.o
+mtd_torturetest-objs := torturetest.o
+mtd_nandbiterrs-objs := nandbiterrs.o
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
index c84250beffdc..f391e0300cdc 100644
--- a/drivers/mtd/tests/mtd_test.c
+++ b/drivers/mtd/tests/mtd_test.c
@@ -25,6 +25,7 @@ int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
return 0;
}
+EXPORT_SYMBOL_GPL(mtdtest_erase_eraseblock);
static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
{
@@ -57,6 +58,7 @@ int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
return 0;
}
+EXPORT_SYMBOL_GPL(mtdtest_scan_for_bad_eraseblocks);
int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
unsigned int eb, int ebcnt)
@@ -75,6 +77,7 @@ int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
return 0;
}
+EXPORT_SYMBOL_GPL(mtdtest_erase_good_eraseblocks);
int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
{
@@ -92,6 +95,7 @@ int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
return err;
}
+EXPORT_SYMBOL_GPL(mtdtest_read);
int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
const void *buf)
@@ -107,3 +111,8 @@ int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
return err;
}
+EXPORT_SYMBOL_GPL(mtdtest_write);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD function test helpers");
+MODULE_AUTHOR("Akinobu Mita");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index ed0796aff722..d92470960b38 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -621,8 +621,14 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
- return -ENODATA;
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+ /*
+ * Tell the core that there is no valid temperature value to
+ * return, but it need not worry about this.
+ */
+ *temperature = THERMAL_TEMP_INVALID;
+ return 0;
+ }
ret = iwl_mvm_get_temp(mvm, &temp);
if (ret)
diff --git a/drivers/net/wireless/ti/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index 1da6ab664e41..af5ec7f12231 100644
--- a/drivers/net/wireless/ti/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
@@ -229,7 +229,7 @@ struct acx_rx_msdu_lifetime {
* === ==========
* 31:12 Reserved - Always equal to 0.
* 11 Association - When set, the WiLink receives all association
- * related frames (association request/response, reassocation
+ * related frames (association request/response, reassociation
* request/response, and disassociation). When clear, these frames
* are discarded.
* 10 Auth/De auth - When set, the WiLink receives all authentication
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index d6bbcc7b5b90..31946387badf 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -1565,7 +1565,7 @@ static struct class_interface switchtec_interface = {
static int __init switchtec_ntb_init(void)
{
- switchtec_interface.class = switchtec_class;
+ switchtec_interface.class = &switchtec_class;
return class_interface_register(&switchtec_interface);
}
module_init(switchtec_ntb_init);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index e79c06d65bb7..423dcd190906 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -751,7 +751,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
u64 logsize, mapsize, datasize;
u64 available = size;
- arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
+ arena = kzalloc(sizeof(*arena), GFP_KERNEL);
if (!arena)
return NULL;
arena->nd_btt = btt->nd_btt;
@@ -978,7 +978,7 @@ static int btt_arena_write_layout(struct arena_info *arena)
if (ret)
return ret;
- super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
+ super = kzalloc(sizeof(*super), GFP_NOIO);
if (!super)
return -ENOMEM;
@@ -1716,6 +1716,7 @@ static void __exit nd_btt_exit(void)
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
+MODULE_DESCRIPTION("NVDIMM Block Translation Table");
MODULE_LICENSE("GPL v2");
module_init(nd_btt_init);
module_exit(nd_btt_exit);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 2023a661bbb0..eaa796629c27 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -540,6 +540,7 @@ static __exit void libnvdimm_exit(void)
nvdimm_devs_exit();
}
+MODULE_DESCRIPTION("NVDIMM (Non-Volatile Memory Device) core");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
subsys_initcall(libnvdimm_init);
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 4cd18be9d0e9..008b9aae74ff 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -69,5 +69,6 @@ static struct platform_driver e820_pmem_driver = {
module_platform_driver(e820_pmem_driver);
MODULE_ALIAS("platform:e820_pmem*");
+MODULE_DESCRIPTION("NVDIMM support for e820 type-12 memory");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index 1f8c667c6f1e..35c8fbbba10e 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -123,4 +123,5 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
return 0;
};
EXPORT_SYMBOL_GPL(async_pmem_flush);
+MODULE_DESCRIPTION("Virtio Persistent Memory Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index d3fca0ab6290..5134a8d08bf9 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -111,5 +111,6 @@ static struct platform_driver of_pmem_region_driver = {
module_platform_driver(of_pmem_region_driver);
MODULE_DEVICE_TABLE(of, of_pmem_region_match);
+MODULE_DESCRIPTION("NVDIMM Device Tree support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 1dd74c969d5a..1ae8b2351654 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -766,4 +766,5 @@ static struct nd_device_driver nd_pmem_driver = {
module_nd_driver(nd_pmem_driver);
MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
+MODULE_DESCRIPTION("NVDIMM Persistent Memory Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 8b69427a4476..f1d58e70933f 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -3,6 +3,7 @@
* Copyright (c) 2011-2014, Intel Corporation.
* Copyright (c) 2017-2021 Christoph Hellwig.
*/
+#include <linux/bio-integrity.h>
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
#include <linux/io_uring/cmd.h>
@@ -111,13 +112,6 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
-static void nvme_unmap_bio(struct bio *bio)
-{
- if (bio_integrity(bio))
- bio_integrity_unmap_free_user(bio);
- blk_rq_unmap_user(bio);
-}
-
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
@@ -164,7 +158,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
- nvme_unmap_bio(bio);
+ blk_rq_unmap_user(bio);
out:
blk_mq_free_request(req);
return ret;
@@ -202,7 +196,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
- nvme_unmap_bio(bio);
+ blk_rq_unmap_user(bio);
blk_mq_free_request(req);
if (effects)
@@ -413,7 +407,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
- nvme_unmap_bio(pdu->bio);
+ blk_rq_unmap_user(pdu->bio);
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
@@ -439,7 +433,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
*/
if (blk_rq_is_poll(req)) {
if (pdu->bio)
- nvme_unmap_bio(pdu->bio);
+ blk_rq_unmap_user(pdu->bio);
io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
} else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 8f3c6c080d88..55c853686051 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -177,10 +177,7 @@ static void pci_clip_resource_to_region(struct pci_bus *bus,
static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
resource_size_t min, unsigned long type_mask,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data,
struct pci_bus_region *region)
{
@@ -251,10 +248,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
resource_size_t min, unsigned long type_mask,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data)
{
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index e534c02ee34f..4d2c188f5835 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -215,14 +215,6 @@ config PCIE_MT7621
help
This selects a driver for the MediaTek MT7621 PCIe Controller.
-config PCIE_MICROCHIP_HOST
- tristate "Microchip AXI PCIe controller"
- depends on PCI_MSI && OF
- select PCI_HOST_COMMON
- help
- Say Y here if you want kernel to support the Microchip AXI PCIe
- Host Bridge driver.
-
config PCI_HYPERV_INTERFACE
tristate "Microsoft Hyper-V PCI Interface"
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
@@ -356,4 +348,5 @@ config PCIE_XILINX_CPM
source "drivers/pci/controller/cadence/Kconfig"
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
+source "drivers/pci/controller/plda/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index f2b19e6174af..038ccbd9e3ba 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
-obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
@@ -44,6 +43,7 @@ obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
+obj-y += plda/
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 8afacc90c63b..4c38181acffa 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -311,16 +311,30 @@ config PCIE_RCAR_GEN4_EP
SoCs. To compile this driver as a module, choose M here: the module
will be called pcie-rcar-gen4.ko. This uses the DesignWare core.
+config PCIE_ROCKCHIP_DW
+ bool
+
config PCIE_ROCKCHIP_DW_HOST
- bool "Rockchip DesignWare PCIe controller"
- select PCIE_DW
- select PCIE_DW_HOST
+ bool "Rockchip DesignWare PCIe controller (host mode)"
depends on PCI_MSI
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
+ select PCIE_DW_HOST
+ select PCIE_ROCKCHIP_DW
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC (except RK3399) to work in host mode.
+
+config PCIE_ROCKCHIP_DW_EP
+ bool "Rockchip DesignWare PCIe controller (endpoint mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_ROCKCHIP_DW
help
Enables support for the DesignWare PCIe controller in the
- Rockchip SoC except RK3399.
+ Rockchip SoC (except RK3399) to work in endpoint mode.
config PCI_EXYNOS
tristate "Samsung Exynos PCIe controller"
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index bac103faa523..ec215b3d6191 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
-obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index d2d17d37d3e0..4fe3b0cb72ec 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -13,11 +13,11 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -113,9 +113,9 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
-static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+ return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
static int dra7xx_pcie_link_up(struct dw_pcie *pci)
@@ -474,7 +474,7 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return ret;
}
- dw_pcie_ep_init_notify(ep);
+ pci_epc_init_notify(ep->epc);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index a33fa98a252e..fa45da28a218 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -54,43 +54,11 @@
struct exynos_pcie {
struct dw_pcie pci;
void __iomem *elbi_base;
- struct clk *clk;
- struct clk *bus_clk;
+ struct clk_bulk_data *clks;
struct phy *phy;
struct regulator_bulk_data supplies[2];
};
-static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
-{
- struct device *dev = ep->pci.dev;
- int ret;
-
- ret = clk_prepare_enable(ep->clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie rc clock");
- return ret;
- }
-
- ret = clk_prepare_enable(ep->bus_clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie bus clock");
- goto err_bus_clk;
- }
-
- return 0;
-
-err_bus_clk:
- clk_disable_unprepare(ep->clk);
-
- return ret;
-}
-
-static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
-{
- clk_disable_unprepare(ep->bus_clk);
- clk_disable_unprepare(ep->clk);
-}
-
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
{
writel(val, base + reg);
@@ -332,17 +300,9 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->elbi_base))
return PTR_ERR(ep->elbi_base);
- ep->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk);
- }
-
- ep->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->bus_clk);
- }
+ ret = devm_clk_bulk_get_all_enable(dev, &ep->clks);
+ if (ret < 0)
+ return ret;
ep->supplies[0].supply = "vdd18";
ep->supplies[1].supply = "vdd10";
@@ -351,10 +311,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = exynos_pcie_init_clk_resources(ep);
- if (ret)
- return ret;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
if (ret)
return ret;
@@ -369,7 +325,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return ret;
@@ -383,7 +338,6 @@ static void exynos_pcie_remove(struct platform_device *pdev)
exynos_pcie_assert_core_reset(ep);
phy_power_off(ep->phy);
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
}
@@ -437,5 +391,6 @@ static struct platform_driver exynos_pcie_driver = {
},
};
module_platform_driver(exynos_pcie_driver);
+MODULE_DESCRIPTION("Samsung Exynos PCIe host controller driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 917c69edee1d..964d67756eb2 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -11,14 +11,13 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -107,8 +106,7 @@ struct imx6_pcie_drvdata {
struct imx6_pcie {
struct dw_pcie *pci;
- int reset_gpio;
- bool gpio_active_high;
+ struct gpio_desc *reset_gpiod;
bool link_is_up;
struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
struct regmap *iomuxc_gpr;
@@ -721,9 +719,7 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
}
/* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio))
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
+ gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 1);
}
static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
@@ -771,10 +767,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
}
/* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ if (imx6_pcie->reset_gpiod) {
msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
+ gpiod_set_value_cansleep(imx6_pcie->reset_gpiod, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
}
@@ -1131,7 +1126,7 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
return ret;
}
- dw_pcie_ep_init_notify(ep);
+ pci_epc_init_notify(ep->epc);
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
@@ -1285,22 +1280,11 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pci->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
- imx6_pcie->gpio_active_high = of_property_read_bool(node,
- "reset-gpio-active-high");
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high ?
- GPIOF_OUT_INIT_HIGH :
- GPIOF_OUT_INIT_LOW,
- "PCIe reset");
- if (ret) {
- dev_err(dev, "unable to get reset gpio\n");
- return ret;
- }
- } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
- return imx6_pcie->reset_gpio;
- }
+ imx6_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(imx6_pcie->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->reset_gpiod),
+ "unable to get reset gpio\n");
+ gpiod_set_consumer_name(imx6_pcie->reset_gpiod, "PCIe reset");
if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index d3a7d14ee685..52c6420ae200 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -34,6 +34,11 @@
#define PCIE_DEVICEID_SHIFT 16
/* Application registers */
+#define PID 0x000
+#define RTL GENMASK(15, 11)
+#define RTL_SHIFT 11
+#define AM6_PCI_PG1_RTL_VER 0x15
+
#define CMD_STATUS 0x004
#define LTSSM_EN_VAL BIT(0)
#define OB_XLAT_EN_VAL BIT(1)
@@ -104,6 +109,8 @@
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+
struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
@@ -245,8 +252,68 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.irq_unmask = ks_pcie_msi_unmask,
};
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
@@ -340,59 +407,22 @@ static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val |= DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2));
-}
-
-/**
- * ks_pcie_clear_dbi_mode() - Disable DBI mode
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val &= ~DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2);
-}
-
-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
- u64 start, end;
+ struct resource_entry *entry;
struct resource *mem;
+ u64 start, end;
int i;
- mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ mem = entry->res;
start = mem->start;
end = mem->end;
@@ -403,7 +433,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_clear_dbi_mode(ks_pcie);
if (ks_pcie->is_am6)
- return;
+ return 0;
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -420,6 +450,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val |= OB_XLAT_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ return 0;
}
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
@@ -445,44 +477,10 @@ static struct pci_ops ks_child_pcie_ops = {
.write = pci_generic_config_write,
};
-/**
- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
- * @bus: A pointer to the PCI bus structure.
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
-{
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- if (!pci_is_root_bus(bus))
- return 0;
-
- /* Configure and set up BAR0 */
- ks_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-
- return 0;
-}
-
static struct pci_ops ks_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
- .add_bus = ks_pcie_v3_65_add_bus,
};
/**
@@ -525,7 +523,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
+ struct keystone_pcie *ks_pcie;
+ struct device *bridge_dev;
struct pci_dev *bridge;
+ u32 val;
+
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
@@ -537,6 +539,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ 0, },
};
+ static const struct pci_device_id am6_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
if (pci_is_root_bus(bus))
bridge = dev;
@@ -558,10 +565,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(rc_pci_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
+ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
pcie_set_readrq(dev, 256);
}
}
+
+ /*
+ * Memory transactions fail with PCI controller in AM654 PG1.0
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
+ * 128 bytes in all downstream devices.
+ */
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ bridge_dev = pci_get_host_bridge_device(dev);
+ if (!bridge_dev && !bridge_dev->parent)
+ return;
+
+ ks_pcie = dev_get_drvdata(bridge_dev->parent);
+ if (!ks_pcie)
+ return;
+
+ val = ks_pcie_app_readl(ks_pcie, PID);
+ val &= RTL;
+ val >>= RTL_SHIFT;
+ if (val != AM6_PCI_PG1_RTL_VER)
+ return;
+
+ if (pcie_get_readrq(dev) > 128) {
+ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+ pcie_set_readrq(dev, 128);
+ }
+ }
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
@@ -814,7 +847,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
ks_pcie_stop_link(pci);
- ks_pcie_setup_rc_app_regs(ks_pcie);
+ ret = ks_pcie_setup_rc_app_regs(ks_pcie);
+ if (ret)
+ return ret;
+
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -1293,7 +1329,7 @@ static int ks_pcie_probe(struct platform_device *pdev)
goto err_ep_init;
}
- dw_pcie_ep_init_notify(&pci->ep);
+ pci_epc_init_notify(pci->ep.epc);
break;
default:
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 7dde6d5fa4d8..a4a800699f89 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -104,7 +104,7 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
dev_dbg(pci->dev, "Link up\n");
} else if (val & PEX_PF0_PME_MES_DR_LDD) {
dev_dbg(pci->dev, "Link down\n");
- pci_epc_linkdown(pci->ep.epc);
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (val & PEX_PF0_PME_MES_DR_HRD) {
dev_dbg(pci->dev, "Hot reset\n");
}
@@ -286,7 +286,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
return ret;
}
- dw_pcie_ep_init_notify(&pci->ep);
+ pci_epc_init_notify(pci->ep.epc);
return ls_pcie_ep_interrupt_init(pcie, pdev);
}
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 6477c83262c2..db9482a113e9 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 6dfdda59f328..643115f74092 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
.write = pci_generic_config_write,
};
-static void al_pcie_config_prepare(struct al_pcie *pcie)
+static int al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
+ struct resource_entry *ft;
u32 cfg_control_offset;
+ struct resource *bus;
u8 subordinate_bus;
u8 secondary_bus;
u32 cfg_control;
u32 reg;
- struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!ft)
+ return -ENODEV;
+
+ bus = ft->res;
target_bus_cfg = &pcie->target_bus_cfg;
ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+
+ return 0;
}
static int al_pcie_host_init(struct dw_pcie_rp *pp)
@@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
if (rc)
return rc;
- al_pcie_config_prepare(pcie);
+ rc = al_pcie_config_prepare(pcie);
+ if (rc)
+ return rc;
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index a4630b92489b..f8e7283dacd4 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -94,7 +94,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
-static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
struct dw_pcie_rp *pp = &pci->pp;
@@ -102,13 +102,13 @@ static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
switch (artpec6_pcie->mode) {
case DW_PCIE_RC_TYPE:
- return pci_addr - pp->cfg0_base;
+ return cpu_addr - pp->cfg0_base;
case DW_PCIE_EP_TYPE:
- return pci_addr - ep->phys_base;
+ return cpu_addr - ep->phys_base;
default:
dev_err(pci->dev, "UNKNOWN device type\n");
}
- return pci_addr;
+ return cpu_addr;
}
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
@@ -452,7 +452,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
return ret;
}
- dw_pcie_ep_init_notify(&pci->ep);
+ pci_epc_init_notify(pci->ep.epc);
break;
default:
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 47391d7d3a73..43ba5c6738df 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -16,30 +16,6 @@
#include <linux/pci-epf.h>
/**
- * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
- * @ep: DWC EP device
- */
-void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_linkup(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
-
-/**
- * dw_pcie_ep_init_notify - Notify EPF drivers about EPC initialization complete
- * @ep: DWC EP device
- */
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_init_notify(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
-
-/**
* dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
* the endpoint function
* @ep: DWC EP device
@@ -161,7 +137,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
if (!ep->bar_to_atu[bar])
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
else
- free_win = ep->bar_to_atu[bar];
+ free_win = ep->bar_to_atu[bar] - 1;
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
@@ -175,15 +151,18 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
return ret;
}
- ep->bar_to_atu[bar] = free_win;
+ /*
+ * Always increment free_win before assignment, since value 0 is used to identify
+ * unallocated mapping.
+ */
+ ep->bar_to_atu[bar] = free_win + 1;
set_bit(free_win, ep->ib_window_map);
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- phys_addr_t phys_addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
+ struct dw_pcie_ob_atu_cfg *atu)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 free_win;
@@ -195,13 +174,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL;
}
- ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ atu->index = free_win;
+ ret = dw_pcie_prog_outbound_atu(pci, atu);
if (ret)
return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = phys_addr;
+ ep->outbound_addr[free_win] = atu->cpu_addr;
return 0;
}
@@ -212,7 +191,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
- u32 atu_index = ep->bar_to_atu[bar];
+ u32 atu_index = ep->bar_to_atu[bar] - 1;
+
+ if (!ep->bar_to_atu[bar])
+ return;
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
@@ -233,6 +215,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
int ret, type;
u32 reg;
+ /*
+ * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
+ * 1 and 2 to form a 64-bit BAR.
+ */
+ if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
+ return -EINVAL;
+
reg = PCI_BASE_ADDRESS_0 + (4 * bar);
if (!(flags & PCI_BASE_ADDRESS_SPACE))
@@ -301,8 +290,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-
- ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+
+ atu.func_no = func_no;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.cpu_addr = addr;
+ atu.pci_addr = pci_addr;
+ atu.size = size;
+ ret = dw_pcie_ep_outbound_atu(ep, &atu);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -632,7 +627,6 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
dw_pcie_edma_remove(pci);
- ep->epc->init_complete = false;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
@@ -674,6 +668,34 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
return 0;
}
+static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
+{
+ unsigned int offset;
+ unsigned int nbars;
+ u32 reg, i;
+
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ */
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ }
+
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
/**
* dw_pcie_ep_init_registers - Initialize DWC EP specific registers
* @ep: DWC EP device
@@ -688,13 +710,11 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
struct dw_pcie_ep_func *ep_func;
struct device *dev = pci->dev;
struct pci_epc *epc = ep->epc;
- unsigned int offset, ptm_cap_base;
- unsigned int nbars;
+ u32 ptm_cap_base, reg;
u8 hdr_type;
u8 func_no;
- int i, ret;
void *addr;
- u32 reg;
+ int ret;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -757,25 +777,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
if (ep->ops->init)
ep->ops->init(ep);
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
- dw_pcie_dbi_ro_wr_en(pci);
-
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
-
- /*
- * PCIe r6.0, sec 7.8.6.2 require us to support at least one
- * size in the range from 1 MB to 512 GB. Advertise support
- * for 1 MB BAR size only.
- */
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
- }
-
/*
* PTM responder capability can be disabled only after disabling
* PTM root capability.
@@ -792,8 +795,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci);
}
- dw_pcie_setup(pci);
- dw_pcie_dbi_ro_wr_dis(pci);
+ dw_pcie_ep_init_non_sticky_registers(pci);
return 0;
@@ -805,6 +807,43 @@ err_remove_edma:
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
/**
+ * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
+ * @ep: DWC EP device
+ */
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+/**
+ * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
+ * @ep: DWC EP device
+ *
+ * Non-sticky registers are also initialized before sending the notification to
+ * the EPF drivers. This is needed since the registers need to be initialized
+ * before the link comes back again.
+ */
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+
+ /*
+ * Initialize the non-sticky DWC registers as they would've reset post
+ * Link Down. This is specifically needed for drivers not supporting
+ * PERST# as they have no way to reinitialize the registers before the
+ * link comes back again.
+ */
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ pci_epc_linkdown(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
+
+/**
* dw_pcie_ep_init - Initialize the endpoint device
* @ep: DWC EP device
*
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d15a5c2d5b48..a0822d5371bc 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -398,6 +398,32 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
return 0;
}
+static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *win;
+ struct resource *res;
+
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (win) {
+ res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+
+ /*
+ * Allocate MSG TLP region of size 'region_align' at the end of
+ * the host bridge window.
+ */
+ res->start = win->res->end - pci->region_align + 1;
+ res->end = win->res->end;
+ res->name = "msg";
+ res->flags = win->res->flags | IORESOURCE_BUSY;
+
+ if (!devm_request_resource(pci->dev, win->res, res))
+ pp->msg_res = res;
+ }
+}
+
int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -484,6 +510,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
+ /*
+ * Allocate the resource for MSG TLP before programming the iATU
+ * outbound window in dw_pcie_setup_rc(). Since the allocation depends
+ * on the value of 'region_align', this has to be done after
+ * dw_pcie_iatu_detect().
+ *
+ * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
+ * make use of the generic MSG TLP implementation.
+ */
+ if (pp->use_atu_msg)
+ dw_pcie_host_request_msg_tlp_res(pp);
+
ret = dw_pcie_edma_detect(pci);
if (ret)
goto err_free_msi;
@@ -554,6 +592,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int type, ret;
u32 busdev;
@@ -576,8 +615,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else
type = PCIE_ATU_TYPE_CFG1;
- ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
- pp->cfg0_size);
+ atu.type = type;
+ atu.cpu_addr = pp->cfg0_base;
+ atu.pci_addr = busdev;
+ atu.size = pp->cfg0_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return NULL;
@@ -589,6 +632,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
@@ -596,9 +640,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.cpu_addr = pp->io_base;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -611,6 +658,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
@@ -618,9 +666,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.cpu_addr = pp->io_base;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -655,6 +706,7 @@ static struct pci_ops dw_pcie_ops = {
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
struct resource_entry *entry;
int i, ret;
@@ -682,10 +734,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows <= ++i)
break;
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
- entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.cpu_addr = entry->res->start;
+ atu.pci_addr = entry->res->start - entry->offset;
+
+ /* Adjust iATU size if MSG TLP region was allocated before */
+ if (pp->msg_res && pp->msg_res->parent == entry->res)
+ atu.size = resource_size(entry->res) -
+ resource_size(pp->msg_res);
+ else
+ atu.size = resource_size(entry->res);
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set MEM range %pr\n",
entry->res);
@@ -695,10 +756,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pp->io_size) {
if (pci->num_ob_windows > ++i) {
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
- pp->io_base,
- pp->io_bus_addr,
- pp->io_size);
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.cpu_addr = pp->io_base;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set IO range %pr\n",
entry->res);
@@ -713,6 +777,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
pci->num_ob_windows);
+ pp->msg_atu_index = i;
+
i = 0;
resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
if (resource_type(entry->res) != IORESOURCE_MEM)
@@ -818,11 +884,47 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
+static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
+{
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ void __iomem *mem;
+ int ret;
+
+ if (pci->num_ob_windows <= pci->pp.msg_atu_index)
+ return -ENOSPC;
+
+ if (!pci->pp.msg_res)
+ return -ENOSPC;
+
+ atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
+ atu.routing = PCIE_MSG_TYPE_R_BC;
+ atu.type = PCIE_ATU_TYPE_MSG;
+ atu.size = resource_size(pci->pp.msg_res);
+ atu.index = pci->pp.msg_atu_index;
+
+ atu.cpu_addr = pci->pp.msg_res->start;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ mem = ioremap(atu.cpu_addr, pci->region_align);
+ if (!mem)
+ return -ENOMEM;
+
+ /* A dummy write is converted to a Msg TLP */
+ writel(0, mem);
+
+ iounmap(mem);
+
+ return 0;
+}
+
int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- int ret;
+ int ret = 0;
/*
* If L1SS is supported, then do not put the link into L2 as some
@@ -834,10 +936,13 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
return 0;
- if (!pci->pp.ops->pme_turn_off)
- return 0;
+ if (pci->pp.ops->pme_turn_off)
+ pci->pp.ops->pme_turn_off(&pci->pp);
+ else
+ ret = dw_pcie_pme_turn_off(pci);
- pci->pp.ops->pme_turn_off(&pci->pp);
+ if (ret)
+ return ret;
ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
PCIE_PME_TO_L2_TIMEOUT_US/10,
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 8490c5d6ff9f..771b9d9be077 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -154,7 +154,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
dw_pcie_ep_deinit(&pci->ep);
}
- dw_pcie_ep_init_notify(&pci->ep);
+ pci_epc_init_notify(pci->ep.epc);
break;
default:
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 250cf7f40b85..1b5aba1f0c92 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -465,56 +465,61 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu)
{
+ u64 cpu_addr = atu->cpu_addr;
u32 retries, val;
u64 limit_addr;
if (pci->ops && pci->ops->cpu_addr_fixup)
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
- limit_addr = cpu_addr + size - 1;
+ limit_addr = cpu_addr + atu->size - 1;
if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
!IS_ALIGNED(cpu_addr, pci->region_align) ||
- !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
return -EINVAL;
}
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
lower_32_bits(cpu_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
upper_32_bits(cpu_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
if (dw_pcie_ver_is_ge(pci, 460A))
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
upper_32_bits(limit_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(atu->pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(atu->pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
+ val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+ val = PCIE_ATU_ENABLE;
+ if (atu->type == PCIE_ATU_TYPE_MSG) {
+ /* The data-less messages only for now */
+ val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
+ }
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
+ val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
@@ -526,21 +531,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
return -ETIMEDOUT;
}
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
-}
-
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
-}
-
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
@@ -655,7 +645,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
if (dw_pcie_link_up(pci))
break;
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ msleep(LINK_WAIT_SLEEP_MS);
}
if (retries >= LINK_WAIT_MAX_RETRIES) {
@@ -880,30 +870,40 @@ static struct dw_edma_plat_ops dw_pcie_edma_ops = {
.irq_vector = dw_pcie_edma_irq_vector,
};
-static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+static void dw_pcie_edma_init_data(struct dw_pcie *pci)
+{
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+}
+
+static int dw_pcie_edma_find_mf(struct dw_pcie *pci)
{
u32 val;
/*
+ * Bail out finding the mapping format if it is already set by the glue
+ * driver. Also ensure that the edma.reg_base is pointing to a valid
+ * memory region.
+ */
+ if (pci->edma.mf != EDMA_MF_EDMA_LEGACY)
+ return pci->edma.reg_base ? 0 : -ENODEV;
+
+ /*
* Indirect eDMA CSRs access has been completely removed since v5.40a
* thus no space is now reserved for the eDMA channels viewport and
* former DMA CTRL register is no longer fixed to FFs.
- *
- * Note that Renesas R-Car S4-8's PCIe controllers for unknown reason
- * have zeros in the eDMA CTRL register even though the HW-manual
- * explicitly states there must FFs if the unrolled mapping is enabled.
- * For such cases the low-level drivers are supposed to manually
- * activate the unrolled mapping to bypass the auto-detection procedure.
*/
- if (dw_pcie_ver_is_ge(pci, 540A) || dw_pcie_cap_is(pci, EDMA_UNROLL))
+ if (dw_pcie_ver_is_ge(pci, 540A))
val = 0xFFFFFFFF;
else
val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
if (val == 0xFFFFFFFF && pci->edma.reg_base) {
pci->edma.mf = EDMA_MF_EDMA_UNROLL;
-
- val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
} else if (val != 0xFFFFFFFF) {
pci->edma.mf = EDMA_MF_EDMA_LEGACY;
@@ -912,15 +912,25 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return -ENODEV;
}
- pci->edma.dev = pci->dev;
+ return 0;
+}
- if (!pci->edma.ops)
- pci->edma.ops = &dw_pcie_edma_ops;
+static int dw_pcie_edma_find_channels(struct dw_pcie *pci)
+{
+ u32 val;
- pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+ /*
+ * Autodetect the read/write channels count only for non-HDMA platforms.
+ * HDMA platforms with native CSR mapping doesn't support autodetect,
+ * so the glue drivers should've passed the valid count already. If not,
+ * the below sanity check will catch it.
+ */
+ if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) {
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
- pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
- pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ }
/* Sanity check the channels count if the mapping was incorrect */
if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
@@ -930,6 +940,19 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return 0;
}
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ int ret;
+
+ dw_pcie_edma_init_data(pci);
+
+ ret = dw_pcie_edma_find_mf(pci);
+ if (ret)
+ return ret;
+
+ return dw_pcie_edma_find_channels(pci);
+}
+
static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index f8e5431a207b..53c4c8f399c8 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -51,9 +51,8 @@
/* DWC PCIe controller capabilities */
#define DW_PCIE_CAP_REQ_RES 0
-#define DW_PCIE_CAP_EDMA_UNROLL 1
-#define DW_PCIE_CAP_IATU_UNROLL 2
-#define DW_PCIE_CAP_CDM_CHECK 3
+#define DW_PCIE_CAP_IATU_UNROLL 1
+#define DW_PCIE_CAP_CDM_CHECK 2
#define dw_pcie_cap_is(_pci, _cap) \
test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
@@ -63,14 +62,16 @@
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
+#define LINK_WAIT_SLEEP_MS 90
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_FORCE 0x708
+#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
+
#define PCIE_PORT_AFR 0x70C
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
@@ -92,6 +93,9 @@
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+#define PCIE_PORT_LANE_SKEW 0x714
+#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
+
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
@@ -148,11 +152,13 @@
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TYPE_MSG 0x10
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008
#define PCIE_ATU_UPPER_BASE 0x00C
@@ -299,6 +305,17 @@ enum dw_pcie_ltssm {
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
};
+struct dw_pcie_ob_atu_cfg {
+ int index;
+ int type;
+ u8 func_no;
+ u8 code;
+ u8 routing;
+ u64 cpu_addr;
+ u64 pci_addr;
+ u64 size;
+};
+
struct dw_pcie_host_ops {
int (*init)(struct dw_pcie_rp *pp);
void (*deinit)(struct dw_pcie_rp *pp);
@@ -328,6 +345,9 @@ struct dw_pcie_rp {
struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+ bool use_atu_msg;
+ int msg_atu_index;
+ struct resource *msg_res;
};
struct dw_pcie_ep_ops {
@@ -433,10 +453,8 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
@@ -668,9 +686,9 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
@@ -688,18 +706,18 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
}
-static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
{
- return 0;
}
-static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
+ return 0;
}
static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index d6842141d384..1170e1107508 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -34,10 +34,16 @@
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
#define PCIE_SMLH_LINKUP BIT(16)
#define PCIE_RDLH_LINKUP BIT(17)
#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
#define PCIE_L0S_ENTRY 0x11
#define PCIE_CLIENT_GENERAL_CONTROL 0x0
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
@@ -49,25 +55,30 @@
#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
- struct dw_pcie pci;
- void __iomem *apb_base;
- struct phy *phy;
- struct clk_bulk_data *clks;
- unsigned int clk_cnt;
- struct reset_control *rst;
- struct gpio_desc *rst_gpio;
- struct regulator *vpcie3v3;
- struct irq_domain *irq_domain;
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
+ const struct rockchip_pcie_of_data *data;
};
-static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
- u32 reg)
+struct rockchip_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct pci_epc_features *epc_features;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
{
return readl_relaxed(rockchip->apb_base + reg);
}
-static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
- u32 val, u32 reg)
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
{
writel_relaxed(val, rockchip->apb_base + reg);
}
@@ -144,16 +155,27 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return 0;
}
+static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
+{
+ return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+}
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
PCIE_CLIENT_GENERAL_CONTROL);
}
+static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
static int rockchip_pcie_link_up(struct dw_pcie *pci)
{
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
- u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+ u32 val = rockchip_pcie_get_ltssm(rockchip);
if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
(val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
@@ -186,12 +208,18 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
return 0;
}
+static void rockchip_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ rockchip_pcie_disable_ltssm(rockchip);
+}
+
static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
struct device *dev = rockchip->pci.dev;
- u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
int irq, ret;
irq = of_irq_get_byname(dev->of_node, "legacy");
@@ -205,12 +233,6 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
- /* LTSSM enable control mode */
- rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
-
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
-
return 0;
}
@@ -218,6 +240,82 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
.init = rockchip_pcie_host_init,
};
+static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int rockchip_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+};
+
+/*
+ * BAR4 on rk3588 exposes the ATU Port Logic Structure to the host regardless of
+ * iATU settings for BAR4. This means that BAR4 cannot be used by an EPF driver,
+ * so mark it as RESERVED. (rockchip_pcie_ep_init() will disable all BARs by
+ * default.) If the host could write to BAR4, the iATU settings (for all other
+ * BARs) would be overwritten, resulting in (all other BARs) no longer working.
+ */
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+};
+
+static const struct pci_epc_features *
+rockchip_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ return rockchip->data->epc_features;
+}
+
+static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
+ .init = rockchip_pcie_ep_init,
+ .raise_irq = rockchip_pcie_raise_irq,
+ .get_features = rockchip_pcie_get_features,
+};
+
static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->pci.dev;
@@ -225,11 +323,15 @@ static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
rockchip->clk_cnt = ret;
- return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ return 0;
}
static int rockchip_pcie_resource_get(struct platform_device *pdev,
@@ -237,12 +339,14 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
{
rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
if (IS_ERR(rockchip->apb_base))
- return PTR_ERR(rockchip->apb_base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->apb_base),
+ "failed to map apb registers\n");
rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(rockchip->rst_gpio))
- return PTR_ERR(rockchip->rst_gpio);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst_gpio),
+ "failed to get reset gpio\n");
rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rockchip->rst))
@@ -282,15 +386,127 @@ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = rockchip_pcie_link_up,
.start_link = rockchip_pcie_start_link,
+ .stop_link = rockchip_pcie_stop_link,
};
+static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
+ dev_dbg(dev, "hot reset or link-down reset\n");
+ dw_pcie_ep_linkdown(&pci->ep);
+ }
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ val = rockchip_pcie_get_ltssm(rockchip);
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ dev_dbg(dev, "link up\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
+{
+ struct dw_pcie_rp *pp;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
+ return -ENODEV;
+
+ /* LTSSM enable control mode */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int rockchip_pcie_configure_ep(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_EP))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0) {
+ dev_err(dev, "missing sys IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_ep_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /* LTSSM enable control mode */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
+ rockchip->pci.ep.page_size = SZ_64K;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
+ ret = dw_pcie_ep_init(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&rockchip->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(rockchip->pci.ep.epc);
+
+ /* unmask DLL up/down indicator and hot reset/link-down reset */
+ rockchip_pcie_writel_apb(rockchip, 0x60000, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct dw_pcie_rp *pp;
+ const struct rockchip_pcie_of_data *data;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
if (!rockchip)
return -ENOMEM;
@@ -299,9 +515,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->pci.dev = dev;
rockchip->pci.ops = &dw_pcie_ops;
-
- pp = &rockchip->pci.pp;
- pp->ops = &rockchip_pcie_host_ops;
+ rockchip->data = data;
ret = rockchip_pcie_resource_get(pdev, rockchip);
if (ret)
@@ -320,10 +534,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->vpcie3v3 = NULL;
} else {
ret = regulator_enable(rockchip->vpcie3v3);
- if (ret) {
- dev_err(dev, "failed to enable vpcie3v3 regulator\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to enable vpcie3v3 regulator\n");
}
ret = rockchip_pcie_phy_init(rockchip);
@@ -338,10 +551,26 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
goto deinit_phy;
- ret = dw_pcie_host_init(pp);
- if (!ret)
- return 0;
+ switch (data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = rockchip_pcie_configure_rc(rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ case DW_PCIE_EP_TYPE:
+ ret = rockchip_pcie_configure_ep(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", data->mode);
+ ret = -EINVAL;
+ goto deinit_clk;
+ }
+
+ return 0;
+deinit_clk:
clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
deinit_phy:
rockchip_pcie_phy_deinit(rockchip);
@@ -352,8 +581,33 @@ disable_regulator:
return ret;
}
+static const struct rockchip_pcie_of_data rockchip_pcie_rc_of_data_rk3568 = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3568 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3568,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3588 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3588,
+};
+
static const struct of_device_id rockchip_pcie_of_match[] = {
- { .compatible = "rockchip,rk3568-pcie", },
+ {
+ .compatible = "rockchip,rk3568-pcie",
+ .data = &rockchip_pcie_rc_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3568-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3588-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3588,
+ },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 98bbc83182b4..278205db60a2 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -442,7 +442,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
return ret;
}
- dw_pcie_ep_init_notify(&pci->ep);
+ pci_epc_init_notify(pci->ep.epc);
break;
default:
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d5523f302102..0a29136491b8 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -12,12 +12,10 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
#include <linux/pci.h>
@@ -78,16 +76,16 @@ struct kirin_pcie {
void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
/* DWC PERST# */
- int gpio_id_dwc_perst;
+ struct gpio_desc *id_dwc_perst_gpio;
/* Per-slot PERST# */
int num_slots;
- int gpio_id_reset[MAX_PCI_SLOTS];
+ struct gpio_desc *id_reset_gpio[MAX_PCI_SLOTS];
const char *reset_names[MAX_PCI_SLOTS];
/* Per-slot clkreq */
int n_gpio_clkreq;
- int gpio_id_clkreq[MAX_PCI_SLOTS];
+ struct gpio_desc *id_clkreq_gpio[MAX_PCI_SLOTS];
const char *clkreq_names[MAX_PCI_SLOTS];
};
@@ -381,15 +379,20 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
pcie->n_gpio_clkreq = ret;
for (i = 0; i < pcie->n_gpio_clkreq; i++) {
- pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
- "hisilicon,clken-gpios", i);
- if (pcie->gpio_id_clkreq[i] < 0)
- return pcie->gpio_id_clkreq[i];
+ pcie->id_clkreq_gpio[i] = devm_gpiod_get_index(dev,
+ "hisilicon,clken", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->id_clkreq_gpio[i]))
+ return dev_err_probe(dev, PTR_ERR(pcie->id_clkreq_gpio[i]),
+ "unable to get a valid clken gpio\n");
pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"pcie_clkreq_%d", i);
if (!pcie->clkreq_names[i])
return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_clkreq_gpio[i],
+ pcie->clkreq_names[i]);
}
return 0;
@@ -400,29 +403,33 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
struct device_node *node)
{
struct device *dev = &pdev->dev;
- struct device_node *parent, *child;
int ret, slot, i;
- for_each_available_child_of_node(node, parent) {
- for_each_available_child_of_node(parent, child) {
+ for_each_available_child_of_node_scoped(node, parent) {
+ for_each_available_child_of_node_scoped(parent, child) {
i = pcie->num_slots;
- pcie->gpio_id_reset[i] = of_get_named_gpio(child,
- "reset-gpios", 0);
- if (pcie->gpio_id_reset[i] < 0)
- continue;
+ pcie->id_reset_gpio[i] = devm_fwnode_gpiod_get_index(dev,
+ of_fwnode_handle(child),
+ "reset", 0, GPIOD_OUT_LOW,
+ NULL);
+ if (IS_ERR(pcie->id_reset_gpio[i])) {
+ if (PTR_ERR(pcie->id_reset_gpio[i]) == -ENOENT)
+ continue;
+ return dev_err_probe(dev, PTR_ERR(pcie->id_reset_gpio[i]),
+ "unable to get a valid reset gpio\n");
+ }
pcie->num_slots++;
if (pcie->num_slots > MAX_PCI_SLOTS) {
dev_err(dev, "Too many PCI slots!\n");
- ret = -EINVAL;
- goto put_node;
+ return -EINVAL;
}
ret = of_pci_get_devfn(child);
if (ret < 0) {
dev_err(dev, "failed to parse devfn: %d\n", ret);
- goto put_node;
+ return ret;
}
slot = PCI_SLOT(ret);
@@ -430,19 +437,15 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"pcie_perst_%d",
slot);
- if (!pcie->reset_names[i]) {
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!pcie->reset_names[i])
+ return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_reset_gpio[i],
+ pcie->reset_names[i]);
}
}
return 0;
-
-put_node:
- of_node_put(child);
- of_node_put(parent);
- return ret;
}
static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
@@ -463,14 +466,11 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
return PTR_ERR(kirin_pcie->apb);
/* pcie internal PERST# gpio */
- kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
- "reset-gpios", 0);
- if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
- dev_err(dev, "unable to get a valid gpio pin\n");
- return -ENODEV;
- }
+ kirin_pcie->id_dwc_perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(kirin_pcie->id_dwc_perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(kirin_pcie->id_dwc_perst_gpio),
+ "unable to get a valid gpio pin\n");
+ gpiod_set_consumer_name(kirin_pcie->id_dwc_perst_gpio, "pcie_perst_bridge");
ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
if (ret)
@@ -553,7 +553,7 @@ static int kirin_pcie_add_bus(struct pci_bus *bus)
/* Send PERST# to each slot */
for (i = 0; i < kirin_pcie->num_slots; i++) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
+ ret = gpiod_direction_output_raw(kirin_pcie->id_reset_gpio[i], 1);
if (ret) {
dev_err(pci->dev, "PERST# %s error: %d\n",
kirin_pcie->reset_names[i], ret);
@@ -623,44 +623,6 @@ static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
return 0;
}
-static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
- struct device *dev)
-{
- int ret, i;
-
- for (i = 0; i < kirin_pcie->num_slots; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->reset_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
- kirin_pcie->reset_names[i]);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->clkreq_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
- kirin_pcie->clkreq_names[i]);
- if (ret)
- return ret;
-
- ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
@@ -680,7 +642,7 @@ static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
return hi3660_pcie_phy_power_off(kirin_pcie);
for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
- gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
+ gpiod_direction_output_raw(kirin_pcie->id_clkreq_gpio[i], 1);
phy_power_off(kirin_pcie->phy);
phy_exit(kirin_pcie->phy);
@@ -707,10 +669,6 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
if (IS_ERR(kirin_pcie->phy))
return PTR_ERR(kirin_pcie->phy);
- ret = kirin_pcie_gpio_request(kirin_pcie, dev);
- if (ret)
- return ret;
-
ret = phy_init(kirin_pcie->phy);
if (ret)
goto err;
@@ -723,11 +681,9 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
/* perst assert Endpoint */
usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
- if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
- if (ret)
- goto err;
- }
+ ret = gpiod_direction_output_raw(kirin_pcie->id_dwc_perst_gpio, 1);
+ if (ret)
+ goto err;
usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 2fb8c15e7a91..236229f66c80 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -47,6 +47,7 @@
#define PARF_DBI_BASE_ADDR_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_NO_SNOOP_OVERIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
@@ -86,6 +87,10 @@
#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+/* PARF_NO_SNOOP_OVERIDE register fields */
+#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define PARF_DEVICE_TYPE_EP 0x0
@@ -150,6 +155,16 @@ enum qcom_pcie_ep_link_status {
};
/**
+ * struct qcom_pcie_ep_cfg - Per SoC config struct
+ * @hdma_support: HDMA support on this SoC
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
+ */
+struct qcom_pcie_ep_cfg {
+ bool hdma_support;
+ bool override_no_snoop;
+};
+
+/**
* struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
* @pci: Designware PCIe controller struct
* @parf: Qualcomm PCIe specific PARF register base
@@ -167,6 +182,7 @@ enum qcom_pcie_ep_link_status {
* @num_clks: PCIe clocks count
* @perst_en: Flag for PERST enable
* @perst_sep_en: Flag for PERST separation enable
+ * @cfg: PCIe EP config struct
* @link_status: PCIe Link status
* @global_irq: Qualcomm PCIe specific Global IRQ
* @perst_irq: PERST# IRQ
@@ -194,6 +210,7 @@ struct qcom_pcie_ep {
u32 perst_en;
u32 perst_sep_en;
+ const struct qcom_pcie_ep_cfg *cfg;
enum qcom_pcie_ep_link_status link_status;
int global_irq;
int perst_irq;
@@ -482,13 +499,17 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
val &= ~PARF_MSTR_AXI_CLK_EN;
writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
- dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+ pci_epc_init_notify(pcie_ep->pci.ep.epc);
/* Enable LTSSM */
val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
val |= BIT(8);
writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+ if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
+ writel_relaxed(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
+ pcie_ep->parf + PARF_NO_SNOOP_OVERIDE);
+
return 0;
err_disable_resources:
@@ -500,13 +521,8 @@ err_disable_resources:
static void qcom_pcie_perst_assert(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
-
- if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
- dev_dbg(dev, "Link is already disabled\n");
- return;
- }
+ pci_epc_deinit_notify(pci->ep.epc);
dw_pcie_ep_cleanup(&pci->ep);
qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
@@ -640,12 +656,12 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
- pci_epc_linkdown(pci->ep.epc);
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
- dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ dev_dbg(dev, "Received Bus Master Enable event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
qcom_pcie_ep_icc_update(pcie_ep);
- pci_epc_bme_notify(pci->ep.epc);
+ pci_epc_bus_master_enable_notify(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
@@ -816,6 +832,14 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
pcie_ep->pci.edma.nr_irqs = 1;
+
+ pcie_ep->cfg = of_device_get_match_data(dev);
+ if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
+ pcie_ep->pci.edma.ll_wr_cnt = 8;
+ pcie_ep->pci.edma.ll_rd_cnt = 8;
+ pcie_ep->pci.edma.mf = EDMA_MF_HDMA_NATIVE;
+ }
+
platform_set_drvdata(pdev, pcie_ep);
ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
@@ -874,7 +898,13 @@ static void qcom_pcie_ep_remove(struct platform_device *pdev)
qcom_pcie_disable_resources(pcie_ep);
}
+static const struct qcom_pcie_ep_cfg cfg_1_34_0 = {
+ .hdma_support = true,
+ .override_no_snoop = true,
+};
+
static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
{ .compatible = "qcom,sdx55-pcie-ep", },
{ .compatible = "qcom,sm8450-pcie-ep", },
{ }
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 14772edcf0d3..0180edf3310e 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -18,10 +18,11 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/pcie.h>
@@ -30,6 +31,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
#include "../../pci.h"
#include "pcie-designware.h"
@@ -51,6 +53,7 @@
#define PARF_SID_OFFSET 0x234
#define PARF_BDF_TRANSLATE_CFG 0x24c
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_NO_SNOOP_OVERIDE 0x3d4
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
#define PARF_BDF_TO_SID_CFG 0x2c00
@@ -118,6 +121,10 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
+/* PARF_NO_SNOOP_OVERIDE register fields */
+#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
@@ -154,58 +161,56 @@
#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
-#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
struct qcom_pcie_resources_1_0_0 {
- struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *core;
struct regulator *vdda;
};
-#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
#define QCOM_PCIE_2_1_0_MAX_RESETS 6
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
struct qcom_pcie_resources_2_1_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
int num_resets;
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
struct qcom_pcie_resources_2_3_2 {
- struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
#define QCOM_PCIE_2_3_3_MAX_RESETS 7
struct qcom_pcie_resources_2_3_3 {
- struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
};
-#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
#define QCOM_PCIE_2_4_0_MAX_RESETS 12
struct qcom_pcie_resources_2_4_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
int num_resets;
};
-#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
struct reset_control *rst;
};
-#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
struct qcom_pcie_resources_2_9_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *rst;
};
@@ -231,8 +236,15 @@ struct qcom_pcie_ops {
int (*config_sid)(struct qcom_pcie *pcie);
};
+ /**
+ * struct qcom_pcie_cfg - Per SoC config struct
+ * @ops: qcom PCIe ops structure
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
+ * snooping
+ */
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ bool override_no_snoop;
bool no_l0s;
};
@@ -245,6 +257,7 @@ struct qcom_pcie {
struct phy *phy;
struct gpio_desc *reset;
struct icc_path *icc_mem;
+ struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
struct dentry *debugfs;
bool suspended;
@@ -337,21 +350,11 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "core";
- res->clks[2].id = "phy";
- res->clks[3].id = "aux";
- res->clks[4].id = "ref";
-
- /* iface, core, phy are required */
- ret = devm_clk_bulk_get(dev, 3, res->clks);
- if (ret < 0)
- return ret;
-
- /* aux, ref are optional */
- ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->resets[0].id = "pci";
res->resets[1].id = "axi";
@@ -373,7 +376,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
reset_control_bulk_assert(res->num_resets, res->resets);
writel(1, pcie->parf + PARF_PHY_CTRL);
@@ -425,7 +428,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret)
return ret;
@@ -476,20 +479,16 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
return PTR_ERR(res->vdda);
- res->clks[0].id = "iface";
- res->clks[1].id = "aux";
- res->clks[2].id = "master_bus";
- res->clks[3].id = "slave_bus";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->core = devm_reset_control_get_exclusive(dev, "core");
return PTR_ERR_OR_ZERO(res->core);
@@ -500,7 +499,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
reset_control_assert(res->core);
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_disable(res->vdda);
}
@@ -517,7 +516,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_reset;
@@ -532,7 +531,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return 0;
err_disable_clks:
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
err_assert_reset:
reset_control_assert(res->core);
@@ -580,14 +579,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "cfg";
- res->clks[2].id = "bus_master";
- res->clks[3].id = "bus_slave";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -596,7 +592,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -613,7 +609,7 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -661,17 +657,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
int ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "master_bus";
- res->clks[2].id = "slave_bus";
- res->clks[3].id = "iface";
-
- /* qcom,pcie-ipq4019 is defined without "iface" */
- res->num_clks = is_ipq ? 3 : 4;
-
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->resets[0].id = "axi_m";
res->resets[1].id = "axi_s";
@@ -742,15 +732,11 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
struct device *dev = pci->dev;
int ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "ahb";
- res->clks[4].id = "aux";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst[0].id = "axi_m";
res->rst[1].id = "axi_s";
@@ -771,7 +757,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
@@ -801,7 +787,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
*/
usleep_range(2000, 2500);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_resets;
@@ -862,8 +848,6 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- unsigned int num_clks, num_opt_clks;
- unsigned int idx;
int ret;
res->rst = devm_reset_control_array_get_exclusive(dev);
@@ -877,36 +861,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- idx = 0;
- res->clks[idx++].id = "aux";
- res->clks[idx++].id = "cfg";
- res->clks[idx++].id = "bus_master";
- res->clks[idx++].id = "bus_slave";
- res->clks[idx++].id = "slave_q2a";
-
- num_clks = idx;
-
- ret = devm_clk_bulk_get(dev, num_clks, res->clks);
- if (ret < 0)
- return ret;
-
- res->clks[idx++].id = "tbu";
- res->clks[idx++].id = "ddrss_sf_tbu";
- res->clks[idx++].id = "aggre0";
- res->clks[idx++].id = "aggre1";
- res->clks[idx++].id = "noc_aggr";
- res->clks[idx++].id = "noc_aggr_4";
- res->clks[idx++].id = "noc_aggr_south_sf";
- res->clks[idx++].id = "cnoc_qx";
- res->clks[idx++].id = "sleep";
- res->clks[idx++].id = "cnoc_sf_axi";
-
- num_opt_clks = idx - num_clks;
- res->num_clks = idx;
-
- ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -986,6 +945,12 @@ err_disable_regulators:
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
+ const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
+
+ if (pcie_cfg->override_no_snoop)
+ writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
+ pcie->parf + PARF_NO_SNOOP_OVERIDE);
+
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
@@ -1101,17 +1066,12 @@ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "axi_bridge";
- res->clks[4].id = "rchng";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(res->rst))
@@ -1124,7 +1084,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
@@ -1153,7 +1113,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
usleep_range(2000, 2500);
- return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ return clk_bulk_prepare_enable(res->num_clks, res->clks);
}
static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
@@ -1366,6 +1326,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = {
.ops = &ops_1_9_0,
};
+static const struct qcom_pcie_cfg cfg_1_34_0 = {
+ .ops = &ops_1_9_0,
+ .override_no_snoop = true,
+};
+
static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
@@ -1409,6 +1374,9 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
if (IS_ERR(pcie->icc_mem))
return PTR_ERR(pcie->icc_mem);
+ pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
+ if (IS_ERR(pcie->icc_cpu))
+ return PTR_ERR(pcie->icc_cpu);
/*
* Some Qualcomm platforms require interconnect bandwidth constraints
* to be set before enabling interconnect clocks.
@@ -1418,23 +1386,35 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
*/
ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Since the CPU-PCIe path is only used for activities like register
+ * access of the host controller and endpoint Config/BAR space access,
+ * HW team has recommended to use a minimal bandwidth of 1KBps just to
+ * keep the path active.
+ */
+ ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
ret);
+ icc_set_bw(pcie->icc_mem, 0, 0);
return ret;
}
return 0;
}
-static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
{
+ u32 offset, status, width, speed;
struct dw_pcie *pci = pcie->pci;
- u32 offset, status;
- int speed, width;
- int ret;
-
- if (!pcie->icc_mem)
- return;
+ unsigned long freq_kbps;
+ struct dev_pm_opp *opp;
+ int ret, freq_mbps;
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
@@ -1446,10 +1426,28 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
- ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
- if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
- ret);
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0,
+ width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ }
+ } else {
+ freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
+ if (freq_mbps < 0)
+ return;
+
+ freq_kbps = freq_mbps * KILO;
+ opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
+ true);
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_set_opp(pci->dev, opp);
+ if (ret)
+ dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
+ freq_kbps * width, ret);
+ dev_pm_opp_put(opp);
+ }
}
}
@@ -1493,7 +1491,9 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
+ unsigned long max_freq = ULONG_MAX;
struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
struct qcom_pcie *pcie;
struct dw_pcie_rp *pp;
struct resource *res;
@@ -1561,9 +1561,43 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- ret = qcom_pcie_icc_init(pcie);
- if (ret)
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to add OPP table\n");
goto err_pm_runtime_put;
+ }
+
+ /*
+ * Before the PCIe link is initialized, vote for highest OPP in the OPP
+ * table, so that we are voting for maximum voltage corner for the
+ * link to come up in maximum supported speed. At the end of the
+ * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
+ */
+ if (!ret) {
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err_probe(pci->dev, ret,
+ "Unable to find max freq OPP\n");
+ goto err_pm_runtime_put;
+ } else {
+ ret = dev_pm_opp_set_opp(dev, opp);
+ }
+
+ dev_pm_opp_put(opp);
+ if (ret) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to set OPP for freq %lu\n",
+ max_freq);
+ goto err_pm_runtime_put;
+ }
+ } else {
+ /* Skip ICC init if OPP is supported as it is handled by OPP */
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
@@ -1583,7 +1617,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_phy_exit;
}
- qcom_pcie_icc_update(pcie);
+ qcom_pcie_icc_opp_update(pcie);
if (pcie->mhi)
qcom_pcie_init_debugfs(pcie);
@@ -1602,16 +1636,20 @@ err_pm_runtime_put:
static int qcom_pcie_suspend_noirq(struct device *dev)
{
struct qcom_pcie *pcie = dev_get_drvdata(dev);
- int ret;
+ int ret = 0;
/*
* Set minimum bandwidth required to keep data path functional during
* suspend.
*/
- ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
- if (ret) {
- dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
- return ret;
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev,
+ "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
}
/*
@@ -1634,7 +1672,21 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
pcie->suspended = true;
}
- return 0;
+ /*
+ * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
+ * Because on some platforms, DBI access can happen very late during the
+ * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
+ * error.
+ */
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_disable(pcie->icc_cpu);
+ if (ret)
+ dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
+
+ if (!pcie->icc_mem)
+ dev_pm_opp_set_opp(pcie->pci->dev, NULL);
+ }
+ return ret;
}
static int qcom_pcie_resume_noirq(struct device *dev)
@@ -1642,6 +1694,14 @@ static int qcom_pcie_resume_noirq(struct device *dev)
struct qcom_pcie *pcie = dev_get_drvdata(dev);
int ret;
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_enable(pcie->icc_cpu);
+ if (ret) {
+ dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
+ return ret;
+ }
+ }
+
if (pcie->suspended) {
ret = qcom_pcie_host_init(&pcie->pci->pp);
if (ret)
@@ -1650,7 +1710,7 @@ static int qcom_pcie_resume_noirq(struct device *dev)
pcie->suspended = false;
}
- qcom_pcie_icc_update(pcie);
+ qcom_pcie_icc_opp_update(pcie);
return 0;
}
@@ -1667,7 +1727,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
- { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
+ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index cfeccc2f9ee1..f0f3ebd1a033 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -2,11 +2,17 @@
/*
* PCIe controller driver for Renesas R-Car Gen4 Series SoCs
* Copyright (C) 2022-2023 Renesas Electronics Corporation
+ *
+ * The r8a779g0 (R-Car V4H) controller requires a specific firmware to be
+ * provided, to initialize the PHY. Otherwise, the PCIe controller will not
+ * work.
*/
#include <linux/delay.h>
+#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
@@ -20,9 +26,10 @@
/* Renesas-specific */
/* PCIe Mode Setting Register 0 */
#define PCIEMSR0 0x0000
-#define BIFUR_MOD_SET_ON BIT(0)
+#define APP_SRIS_MODE BIT(6)
#define DEVICE_TYPE_EP 0
#define DEVICE_TYPE_RC BIT(4)
+#define BIFUR_MOD_SET_ON BIT(0)
/* PCIe Interrupt Status 0 */
#define PCIEINTSTS0 0x0084
@@ -37,47 +44,49 @@
#define PCIEDMAINTSTSEN 0x0314
#define PCIEDMAINTSTSEN_INIT GENMASK(15, 0)
+/* Port Logic Registers 89 */
+#define PRTLGC89 0x0b70
+
+/* Port Logic Registers 90 */
+#define PRTLGC90 0x0b74
+
/* PCIe Reset Control Register 1 */
#define PCIERSTCTRL1 0x0014
#define APP_HOLD_PHY_RST BIT(16)
#define APP_LTSSM_ENABLE BIT(0)
+/* PCIe Power Management Control */
+#define PCIEPWRMNGCTRL 0x0070
+#define APP_CLK_REQ_N BIT(11)
+#define APP_CLK_PM_EN BIT(10)
+
#define RCAR_NUM_SPEED_CHANGE_RETRIES 10
#define RCAR_MAX_LINK_SPEED 4
#define RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET 0x1000
#define RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET 0x800
+#define RCAR_GEN4_PCIE_FIRMWARE_NAME "rcar_gen4_pcie.bin"
+#define RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR 0xc000
+MODULE_FIRMWARE(RCAR_GEN4_PCIE_FIRMWARE_NAME);
+
+struct rcar_gen4_pcie;
+struct rcar_gen4_pcie_drvdata {
+ void (*additional_common_init)(struct rcar_gen4_pcie *rcar);
+ int (*ltssm_control)(struct rcar_gen4_pcie *rcar, bool enable);
+ enum dw_pcie_device_mode mode;
+};
+
struct rcar_gen4_pcie {
struct dw_pcie dw;
void __iomem *base;
+ void __iomem *phy_base;
struct platform_device *pdev;
- enum dw_pcie_device_mode mode;
+ const struct rcar_gen4_pcie_drvdata *drvdata;
};
#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
/* Common */
-static void rcar_gen4_pcie_ltssm_enable(struct rcar_gen4_pcie *rcar,
- bool enable)
-{
- u32 val;
-
- val = readl(rcar->base + PCIERSTCTRL1);
- if (enable) {
- val |= APP_LTSSM_ENABLE;
- val &= ~APP_HOLD_PHY_RST;
- } else {
- /*
- * Since the datasheet of R-Car doesn't mention how to assert
- * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
- * hang-up issue happened in the dw_edma_core_off() when
- * the controller didn't detect a PCI device.
- */
- val &= ~APP_LTSSM_ENABLE;
- }
- writel(val, rcar->base + PCIERSTCTRL1);
-}
-
static int rcar_gen4_pcie_link_up(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
@@ -123,9 +132,13 @@ static int rcar_gen4_pcie_speed_change(struct dw_pcie *dw)
static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
- int i, changes;
+ int i, changes, ret;
- rcar_gen4_pcie_ltssm_enable(rcar, true);
+ if (rcar->drvdata->ltssm_control) {
+ ret = rcar->drvdata->ltssm_control(rcar, true);
+ if (ret)
+ return ret;
+ }
/*
* Require direct speed change with retrying here if the link_gen is
@@ -137,7 +150,7 @@ static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
* Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
* So, this needs remaining times for up to PCIe Gen4 if RC mode.
*/
- if (changes && rcar->mode == DW_PCIE_RC_TYPE)
+ if (changes && rcar->drvdata->mode == DW_PCIE_RC_TYPE)
changes--;
for (i = 0; i < changes; i++) {
@@ -153,7 +166,8 @@ static void rcar_gen4_pcie_stop_link(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
- rcar_gen4_pcie_ltssm_enable(rcar, false);
+ if (rcar->drvdata->ltssm_control)
+ rcar->drvdata->ltssm_control(rcar, false);
}
static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
@@ -172,9 +186,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
val = readl(rcar->base + PCIEMSR0);
- if (rcar->mode == DW_PCIE_RC_TYPE) {
+ if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
val |= DEVICE_TYPE_RC;
- } else if (rcar->mode == DW_PCIE_EP_TYPE) {
+ } else if (rcar->drvdata->mode == DW_PCIE_EP_TYPE) {
val |= DEVICE_TYPE_EP;
} else {
ret = -EINVAL;
@@ -190,6 +204,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
if (ret)
goto err_unprepare;
+ if (rcar->drvdata->additional_common_init)
+ rcar->drvdata->additional_common_init(rcar);
+
return 0;
err_unprepare:
@@ -231,6 +248,10 @@ static void rcar_gen4_pcie_unprepare(struct rcar_gen4_pcie *rcar)
static int rcar_gen4_pcie_get_resources(struct rcar_gen4_pcie *rcar)
{
+ rcar->phy_base = devm_platform_ioremap_resource_byname(rcar->pdev, "phy");
+ if (IS_ERR(rcar->phy_base))
+ return PTR_ERR(rcar->phy_base);
+
/* Renesas-specific registers */
rcar->base = devm_platform_ioremap_resource_byname(rcar->pdev, "app");
@@ -255,7 +276,7 @@ static struct rcar_gen4_pcie *rcar_gen4_pcie_alloc(struct platform_device *pdev)
rcar->dw.ops = &dw_pcie_ops;
rcar->dw.dev = dev;
rcar->pdev = pdev;
- dw_pcie_cap_set(&rcar->dw, EDMA_UNROLL);
+ rcar->dw.edma.mf = EDMA_MF_EDMA_UNROLL;
dw_pcie_cap_set(&rcar->dw, REQ_RES);
platform_set_drvdata(pdev, rcar);
@@ -437,7 +458,7 @@ static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
rcar_gen4_pcie_ep_deinit(rcar);
}
- dw_pcie_ep_init_notify(ep);
+ pci_epc_init_notify(ep->epc);
return ret;
}
@@ -451,9 +472,11 @@ static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
/* Common */
static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
{
- rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev);
+ rcar->drvdata = of_device_get_match_data(&rcar->pdev->dev);
+ if (!rcar->drvdata)
+ return -EINVAL;
- switch (rcar->mode) {
+ switch (rcar->drvdata->mode) {
case DW_PCIE_RC_TYPE:
return rcar_gen4_add_dw_pcie_rp(rcar);
case DW_PCIE_EP_TYPE:
@@ -494,7 +517,7 @@ err_unprepare:
static void rcar_gen4_remove_dw_pcie(struct rcar_gen4_pcie *rcar)
{
- switch (rcar->mode) {
+ switch (rcar->drvdata->mode) {
case DW_PCIE_RC_TYPE:
rcar_gen4_remove_dw_pcie_rp(rcar);
break;
@@ -514,14 +537,227 @@ static void rcar_gen4_pcie_remove(struct platform_device *pdev)
rcar_gen4_pcie_unprepare(rcar);
}
+static int r8a779f0_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ u32 val;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ if (enable) {
+ val |= APP_LTSSM_ENABLE;
+ val &= ~APP_HOLD_PHY_RST;
+ } else {
+ /*
+ * Since the datasheet of R-Car doesn't mention how to assert
+ * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
+ * hang-up issue happened in the dw_edma_core_off() when
+ * the controller didn't detect a PCI device.
+ */
+ val &= ~APP_LTSSM_ENABLE;
+ }
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_additional_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_LANE_SKEW);
+ val &= ~PORT_LANE_SKEW_INSERT_MASK;
+ if (dw->num_lanes < 4)
+ val |= BIT(6);
+ dw_pcie_writel_dbi(dw, PCIE_PORT_LANE_SKEW, val);
+
+ val = readl(rcar->base + PCIEPWRMNGCTRL);
+ val |= APP_CLK_REQ_N | APP_CLK_PM_EN;
+ writel(val, rcar->base + PCIEPWRMNGCTRL);
+}
+
+static void rcar_gen4_pcie_phy_reg_update_bits(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(rcar->phy_base + offset);
+ tmp &= ~mask;
+ tmp |= val;
+ writel(tmp, rcar->phy_base + offset);
+}
+
+/*
+ * SoC datasheet suggests checking port logic register bits during firmware
+ * write. If read returns non-zero value, then this function returns -EAGAIN
+ * indicating that the write needs to be done again. If read returns zero,
+ * then return 0 to indicate success.
+ */
+static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ if (dw_pcie_readl_dbi(dw, offset) & mask)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
+{
+ /* The check_addr values are magical numbers in the datasheet */
+ const u32 check_addr[] = { 0x00101018, 0x00101118, 0x00101021, 0x00101121};
+ struct dw_pcie *dw = &rcar->dw;
+ const struct firmware *fw;
+ unsigned int i, timeout;
+ u32 data;
+ int ret;
+
+ ret = request_firmware(&fw, RCAR_GEN4_PCIE_FIRMWARE_NAME, dw->dev);
+ if (ret) {
+ dev_err(dw->dev, "Failed to load firmware (%s): %d\n",
+ RCAR_GEN4_PCIE_FIRMWARE_NAME, ret);
+ return ret;
+ }
+
+ for (i = 0; i < (fw->size / 2); i++) {
+ data = fw->data[(i * 2) + 1] << 8 | fw->data[i * 2];
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR + i);
+ dw_pcie_writel_dbi(dw, PRTLGC90, data);
+ if (!rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30)))
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(17), BIT(17));
+
+ for (i = 0; i < ARRAY_SIZE(check_addr); i++) {
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, check_addr[i]);
+ ret = rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30));
+ ret |= rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC90, BIT(0));
+ if (!ret)
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+exit:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ if (!enable) {
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+ }
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(dw, PCIE_PORT_FORCE, val);
+
+ val = readl(rcar->base + PCIEMSR0);
+ val |= APP_SRIS_MODE;
+ writel(val, rcar->base + PCIEMSR0);
+
+ /*
+ * The R-Car Gen4 datasheet doesn't describe the PHY registers' name.
+ * But, the initialization procedure describes these offsets. So,
+ * this driver has magical offset numbers.
+ */
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(28), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(20), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(12), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(4), 0);
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(19), BIT(19));
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_HOLD_PHY_RST;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = rcar_gen4_pcie_download_phy_firmware(rcar);
+ if (ret)
+ return ret;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val |= APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie_ep = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie_ep = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id rcar_gen4_pcie_of_match[] = {
{
+ .compatible = "renesas,r8a779f0-pcie",
+ .data = &drvdata_r8a779f0_pcie,
+ },
+ {
+ .compatible = "renesas,r8a779f0-pcie-ep",
+ .data = &drvdata_r8a779f0_pcie_ep,
+ },
+ {
.compatible = "renesas,rcar-gen4-pcie",
- .data = (void *)DW_PCIE_RC_TYPE,
+ .data = &drvdata_rcar_gen4_pcie,
},
{
.compatible = "renesas,rcar-gen4-pcie-ep",
- .data = (void *)DW_PCIE_EP_TYPE,
+ .data = &drvdata_rcar_gen4_pcie_ep,
},
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 93f5433c5c55..4bf7b433417a 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
@@ -21,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -308,10 +306,6 @@ static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
return readl_relaxed(pcie->appl_base + reg);
}
-struct tegra_pcie_soc {
- enum dw_pcie_device_mode mode;
-};
-
static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
@@ -1715,6 +1709,7 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
if (ret)
dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+ pci_epc_deinit_notify(pcie->pci.ep.epc);
dw_pcie_ep_cleanup(&pcie->pci.ep);
reset_control_assert(pcie->core_rst);
@@ -1903,7 +1898,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
goto fail_init_complete;
}
- dw_pcie_ep_init_notify(ep);
+ pci_epc_init_notify(ep->epc);
/* Program the private control to allow sending LTR upstream */
if (pcie->of_data->has_ltr_req_fix) {
@@ -2015,6 +2010,7 @@ static const struct pci_epc_features tegra_pcie_epc_features = {
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_RESERVED, },
.bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index a2b844268e28..d6e73811216e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -410,7 +410,7 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return ret;
}
- dw_pcie_ep_init_notify(&priv->pci.ep);
+ pci_epc_init_notify(priv->pci.ep.epc);
return 0;
}
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index d7b7350f02dd..5af22bee913b 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -190,7 +190,7 @@ static void ls_g4_pcie_reset(struct work_struct *work)
ls_g4_pcie_enable_interrupt(pcie);
}
-static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
+static const struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
.interrupt_init = ls_g4_pcie_interrupt_init,
};
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index 6082b8afbc31..e63abb887ee3 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -151,7 +151,7 @@ struct mobiveil_rp_ops {
struct mobiveil_root_port {
void __iomem *config_axi_slave_base; /* endpoint config base */
struct resource *ob_io_res;
- struct mobiveil_rp_ops *ops;
+ const struct mobiveil_rp_ops *ops;
int irq;
raw_spinlock_t intx_mask_lock;
struct irq_domain *intx_domain;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 71ecd7ddcc8a..8b3e1a079cf3 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -23,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/msi.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include "../pci.h"
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 45b71806182d..cf5f59a745b3 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -73,10 +73,6 @@ int pci_host_common_probe(struct platform_device *pdev)
if (IS_ERR(cfg))
return PTR_ERR(cfg);
- /* Do not reassign resources if probe only */
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
bridge->msi_domain = true;
@@ -96,4 +92,5 @@ void pci_host_common_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
+MODULE_DESCRIPTION("Generic PCI host common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 41cb6a057f6e..5f06f94db7b1 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -86,4 +86,5 @@ static struct platform_driver gen_pci_driver = {
};
module_platform_driver(gen_pci_driver);
+MODULE_DESCRIPTION("Generic PCI host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 5992280e8110..cdd5be16021d 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1130,8 +1130,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
PCI_CAPABILITY_LIST) {
/* ROM BARs are unimplemented */
*val = 0;
- } else if (where >= PCI_INTERRUPT_LINE && where + size <=
- PCI_INTERRUPT_PIN) {
+ } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
+ (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
/*
* Interrupt Line and Interrupt PIN are hard-wired to zero
* because this front-end only supports message-signaled
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 8b34ccff073a..bc630ab8a283 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -163,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+static void loongson_pci_msi_quirk(struct pci_dev *dev)
+{
+ u16 val, class = dev->class >> 8;
+
+ if (class != PCI_CLASS_BRIDGE_HOST)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
+
static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
{
struct pci_config_window *cfg;
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 6ad5427490b5..16336a525c16 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -290,4 +290,5 @@ static void __exit altera_msi_exit(void)
subsys_initcall(altera_msi_init);
MODULE_DEVICE_TABLE(of, altera_msi_of_match);
module_exit(altera_msi_exit);
+MODULE_DESCRIPTION("Altera PCIe MSI support driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index a9536dc4bf96..ef73baefaeb9 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -826,4 +826,5 @@ static struct platform_driver altera_pcie_driver = {
MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
module_platform_driver(altera_pcie_driver);
+MODULE_DESCRIPTION("Altera PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index f7a248393a8f..fefab2758a06 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -839,4 +839,5 @@ static struct platform_driver apple_pcie_driver = {
};
module_platform_driver(apple_pcie_driver);
+MODULE_DESCRIPTION("Apple PCIe host bridge driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 975b3024fb08..b7e8e24f6a40 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -1091,4 +1091,5 @@ static struct platform_driver mtk_pcie_driver = {
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 48372013f26d..7fc0d7709b7f 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1252,4 +1252,5 @@ static struct platform_driver mtk_pcie_driver = {
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index d97b956e6e57..9b4754a45515 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -549,4 +549,5 @@ static struct platform_driver mt7621_pcie_driver = {
};
builtin_platform_driver(mt7621_pcie_driver);
+MODULE_DESCRIPTION("MediaTek MT7621 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 996077ab7cfd..c01efc6ea64f 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -78,7 +78,11 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1IATN, pcie_base + PMCTLR);
ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
val & L1FAEG, 10, 1000);
- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ if (ret) {
+ dev_warn_ratelimited(pcie_dev,
+ "Timeout waiting for L1 link state, ret=%d\n",
+ ret);
+ }
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 300b9dc85ecc..cbec71114825 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -322,8 +322,11 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+ msleep(PCIE_T_RRS_READY_MS);
+
/* 500ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
status, PCIE_LINK_UP(status), 20,
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index 0ef2e622d36e..c07d7129f1c7 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -121,7 +121,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->is_rc) {
rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(rockchip->ep_gpio))
return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
"failed to get ep GPIO\n");
diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig
new file mode 100644
index 000000000000..c0e14146d7e4
--- /dev/null
+++ b/drivers/pci/controller/plda/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "PLDA-based PCIe controllers"
+ depends on PCI
+
+config PCIE_PLDA_HOST
+ bool
+
+config PCIE_MICROCHIP_HOST
+ tristate "Microchip AXI PCIe controller"
+ depends on PCI_MSI && OF
+ select PCI_HOST_COMMON
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want kernel to support the Microchip AXI PCIe
+ Host Bridge driver.
+
+config PCIE_STARFIVE_HOST
+ tristate "StarFive PCIe host controller"
+ depends on PCI_MSI && OF
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want to support the StarFive PCIe controller in
+ host mode. StarFive PCIe controller uses PLDA PCIe core.
+
+ If you choose to build this driver as module it will be dynamically
+ linked and module will be called pcie-starfive.ko.
+
+endmenu
diff --git a/drivers/pci/controller/plda/Makefile b/drivers/pci/controller/plda/Makefile
new file mode 100644
index 000000000000..0ac6851bed48
--- /dev/null
+++ b/drivers/pci/controller/plda/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_PLDA_HOST) += pcie-plda-host.o
+obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
+obj-$(CONFIG_PCIE_STARFIVE_HOST) += pcie-starfive.o
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
index 137fb8570ba2..48f60a04b740 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -18,10 +18,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../pci.h"
-
-/* Number of MSI IRQs */
-#define MC_MAX_NUM_MSI_IRQS 32
+#include "../../pci.h"
+#include "pcie-plda.h"
/* PCIe Bridge Phy and Controller Phy offsets */
#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
@@ -30,84 +28,6 @@
#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
-/* PCIe Bridge Phy Regs */
-#define PCIE_PCI_IRQ_DW0 0xa8
-#define MSIX_CAP_MASK BIT(31)
-#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
-#define NUM_MSI_MSGS_SHIFT 4
-
-#define IMASK_LOCAL 0x180
-#define DMA_END_ENGINE_0_MASK 0x00000000u
-#define DMA_END_ENGINE_0_SHIFT 0
-#define DMA_END_ENGINE_1_MASK 0x00000000u
-#define DMA_END_ENGINE_1_SHIFT 1
-#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
-#define DMA_ERROR_ENGINE_0_SHIFT 8
-#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
-#define DMA_ERROR_ENGINE_1_SHIFT 9
-#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
-#define A_ATR_EVT_POST_ERR_SHIFT 16
-#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
-#define A_ATR_EVT_FETCH_ERR_SHIFT 17
-#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
-#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
-#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define A_ATR_EVT_DOORBELL_SHIFT 19
-#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
-#define P_ATR_EVT_POST_ERR_SHIFT 20
-#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
-#define P_ATR_EVT_FETCH_ERR_SHIFT 21
-#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
-#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
-#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define P_ATR_EVT_DOORBELL_SHIFT 23
-#define PM_MSI_INT_INTA_MASK 0x01000000u
-#define PM_MSI_INT_INTA_SHIFT 24
-#define PM_MSI_INT_INTB_MASK 0x02000000u
-#define PM_MSI_INT_INTB_SHIFT 25
-#define PM_MSI_INT_INTC_MASK 0x04000000u
-#define PM_MSI_INT_INTC_SHIFT 26
-#define PM_MSI_INT_INTD_MASK 0x08000000u
-#define PM_MSI_INT_INTD_SHIFT 27
-#define PM_MSI_INT_INTX_MASK 0x0f000000u
-#define PM_MSI_INT_INTX_SHIFT 24
-#define PM_MSI_INT_MSI_MASK 0x10000000u
-#define PM_MSI_INT_MSI_SHIFT 28
-#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
-#define PM_MSI_INT_AER_EVT_SHIFT 29
-#define PM_MSI_INT_EVENTS_MASK 0x40000000u
-#define PM_MSI_INT_EVENTS_SHIFT 30
-#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
-#define PM_MSI_INT_SYS_ERR_SHIFT 31
-#define NUM_LOCAL_EVENTS 15
-#define ISTATUS_LOCAL 0x184
-#define IMASK_HOST 0x188
-#define ISTATUS_HOST 0x18c
-#define IMSI_ADDR 0x190
-#define ISTATUS_MSI 0x194
-
-/* PCIe Master table init defines */
-#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
-#define ATR0_PCIE_ATR_SIZE 0x25
-#define ATR0_PCIE_ATR_SIZE_SHIFT 1
-#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
-#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
-
-/* PCIe AXI slave table init defines */
-#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
-#define ATR_SIZE_SHIFT 1
-#define ATR_IMPL_ENABLE 1
-#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
-#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
-#define PCIE_TX_RX_INTERFACE 0x00000000u
-#define PCIE_CONFIG_INTERFACE 0x00000001u
-
-#define ATR_ENTRY_SIZE 32
-
/* PCIe Controller Phy Regs */
#define SEC_ERROR_EVENT_CNT 0x20
#define DED_ERROR_EVENT_CNT 0x24
@@ -179,20 +99,21 @@
#define EVENT_LOCAL_DMA_END_ENGINE_1 12
#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
-#define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
-#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
-#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
-#define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
-#define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
-#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
-#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
-#define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
-#define EVENT_LOCAL_PM_MSI_INT_INTX 23
-#define EVENT_LOCAL_PM_MSI_INT_MSI 24
-#define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
-#define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
-#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
-#define NUM_EVENTS 28
+#define NUM_MC_EVENTS 15
+#define EVENT_LOCAL_A_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_AXI_POST_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_AXI_FETCH_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_AXI_DISCARD_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_AXI_DOORBELL)
+#define EVENT_LOCAL_P_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_PCIE_POST_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_PCIE_FETCH_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_PCIE_DISCARD_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_PCIE_DOORBELL)
+#define EVENT_LOCAL_PM_MSI_INT_INTX (NUM_MC_EVENTS + PLDA_INTX)
+#define EVENT_LOCAL_PM_MSI_INT_MSI (NUM_MC_EVENTS + PLDA_MSI)
+#define EVENT_LOCAL_PM_MSI_INT_AER_EVT (NUM_MC_EVENTS + PLDA_AER_EVENT)
+#define EVENT_LOCAL_PM_MSI_INT_EVENTS (NUM_MC_EVENTS + PLDA_MISC_EVENTS)
+#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR (NUM_MC_EVENTS + PLDA_SYS_ERR)
+#define NUM_EVENTS (NUM_MC_EVENTS + PLDA_INT_EVENT_NUM)
#define PCIE_EVENT_CAUSE(x, s) \
[EVENT_PCIE_ ## x] = { __stringify(x), s }
@@ -255,22 +176,10 @@ struct event_map {
u32 event_bit;
};
-struct mc_msi {
- struct mutex lock; /* Protect used bitmap */
- struct irq_domain *msi_domain;
- struct irq_domain *dev_domain;
- u32 num_vectors;
- u64 vector_phy;
- DECLARE_BITMAP(used, MC_MAX_NUM_MSI_IRQS);
-};
struct mc_pcie {
+ struct plda_pcie_rp plda;
void __iomem *axi_base_addr;
- struct device *dev;
- struct irq_domain *intx_domain;
- struct irq_domain *event_domain;
- raw_spinlock_t lock;
- struct mc_msi msi;
};
struct cause {
@@ -388,7 +297,7 @@ static struct mc_pcie *port;
static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
{
- struct mc_msi *msi = &port->msi;
+ struct plda_msi *msi = &port->plda.msi;
u16 reg;
u8 queue_size;
@@ -409,246 +318,6 @@ static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
}
-static void mc_handle_msi(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- struct mc_msi *msi = &port->msi;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_MSI_MASK) {
- writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- for_each_set_bit(bit, &status, msi->num_vectors) {
- ret = generic_handle_domain_irq(msi->dev_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_msi_bottom_irq_ack(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 bitpos = data->hwirq;
-
- writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
-}
-
-static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- phys_addr_t addr = port->msi.vector_phy;
-
- msg->address_lo = lower_32_bits(addr);
- msg->address_hi = upper_32_bits(addr);
- msg->data = data->hwirq;
-
- dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
- (int)data->hwirq, msg->address_hi, msg->address_lo);
-}
-
-static int mc_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
-static struct irq_chip mc_msi_bottom_irq_chip = {
- .name = "Microchip MSI",
- .irq_ack = mc_msi_bottom_irq_ack,
- .irq_compose_msi_msg = mc_compose_msi_msg,
- .irq_set_affinity = mc_msi_set_affinity,
-};
-
-static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
-{
- struct mc_pcie *port = domain->host_data;
- struct mc_msi *msi = &port->msi;
- unsigned long bit;
-
- mutex_lock(&msi->lock);
- bit = find_first_zero_bit(msi->used, msi->num_vectors);
- if (bit >= msi->num_vectors) {
- mutex_unlock(&msi->lock);
- return -ENOSPC;
- }
-
- set_bit(bit, msi->used);
-
- irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
- domain->host_data, handle_edge_irq, NULL, NULL);
-
- mutex_unlock(&msi->lock);
-
- return 0;
-}
-
-static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct mc_pcie *port = irq_data_get_irq_chip_data(d);
- struct mc_msi *msi = &port->msi;
-
- mutex_lock(&msi->lock);
-
- if (test_bit(d->hwirq, msi->used))
- __clear_bit(d->hwirq, msi->used);
- else
- dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
-
- mutex_unlock(&msi->lock);
-}
-
-static const struct irq_domain_ops msi_domain_ops = {
- .alloc = mc_irq_msi_domain_alloc,
- .free = mc_irq_msi_domain_free,
-};
-
-static struct irq_chip mc_msi_irq_chip = {
- .name = "Microchip PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info mc_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mc_msi_irq_chip,
-};
-
-static int mc_allocate_msi_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct mc_msi *msi = &port->msi;
-
- mutex_init(&port->msi.lock);
-
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
- &msi_domain_ops, port);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void mc_handle_intx(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_INTX_MASK) {
- status &= PM_MSI_INT_INTX_MASK;
- status >>= PM_MSI_INT_INTX_SHIFT;
- for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- ret = generic_handle_domain_irq(port->intx_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
-
- writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
-}
-
-static void mc_mask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val &= ~mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void mc_unmask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static struct irq_chip mc_intx_irq_chip = {
- .name = "Microchip PCIe INTx",
- .irq_ack = mc_ack_intx_irq,
- .irq_mask = mc_mask_intx_irq,
- .irq_unmask = mc_unmask_intx_irq,
-};
-
-static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
- .map = mc_pcie_intx_map,
-};
-
static inline u32 reg_to_event(u32 reg, struct event_map field)
{
return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
@@ -706,21 +375,22 @@ static u32 local_events(struct mc_pcie *port)
return val;
}
-static u32 get_events(struct mc_pcie *port)
+static u32 mc_get_events(struct plda_pcie_rp *port)
{
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
u32 events = 0;
- events |= pcie_events(port);
- events |= sec_errors(port);
- events |= ded_errors(port);
- events |= local_events(port);
+ events |= pcie_events(mc_port);
+ events |= sec_errors(mc_port);
+ events |= ded_errors(mc_port);
+ events |= local_events(mc_port);
return events;
}
static irqreturn_t mc_event_handler(int irq, void *dev_id)
{
- struct mc_pcie *port = dev_id;
+ struct plda_pcie_rp *port = dev_id;
struct device *dev = port->dev;
struct irq_data *data;
@@ -734,31 +404,15 @@ static irqreturn_t mc_event_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mc_handle_event(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- unsigned long events;
- u32 bit;
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
-
- events = get_events(port);
-
- for_each_set_bit(bit, &events, NUM_EVENTS)
- generic_handle_domain_irq(port->event_domain, bit);
-
- chained_irq_exit(chip, desc);
-}
-
static void mc_ack_event_irq(struct irq_data *data)
{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
- addr = port->axi_base_addr + event_descs[event].base +
+ addr = mc_port->axi_base_addr + event_descs[event].base +
event_descs[event].offset;
mask = event_descs[event].mask;
mask |= event_descs[event].enb_mask;
@@ -768,13 +422,14 @@ static void mc_ack_event_irq(struct irq_data *data)
static void mc_mask_event_irq(struct irq_data *data)
{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
u32 val;
- addr = port->axi_base_addr + event_descs[event].base +
+ addr = mc_port->axi_base_addr + event_descs[event].base +
event_descs[event].mask_offset;
mask = event_descs[event].mask;
if (event_descs[event].enb_mask) {
@@ -798,13 +453,14 @@ static void mc_mask_event_irq(struct irq_data *data)
static void mc_unmask_event_irq(struct irq_data *data)
{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
u32 val;
- addr = port->axi_base_addr + event_descs[event].base +
+ addr = mc_port->axi_base_addr + event_descs[event].base +
event_descs[event].mask_offset;
mask = event_descs[event].mask;
@@ -834,19 +490,6 @@ static struct irq_chip mc_event_irq_chip = {
.irq_unmask = mc_unmask_event_irq,
};
-static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops event_domain_ops = {
- .map = mc_pcie_event_map,
-};
-
static inline void mc_pcie_deinit_clk(void *data)
{
struct clk *clk = data;
@@ -892,105 +535,22 @@ static int mc_pcie_init_clks(struct device *dev)
return 0;
}
-static int mc_pcie_init_irq_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node;
-
- /* Setup INTx */
- pcie_intc_node = of_get_next_child(node, NULL);
- if (!pcie_intc_node) {
- dev_err(dev, "failed to find PCIe Intc node\n");
- return -EINVAL;
- }
-
- port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
- &event_domain_ops, port);
- if (!port->event_domain) {
- dev_err(dev, "failed to get event domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
-
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
- if (!port->intx_domain) {
- dev_err(dev, "failed to get an INTx IRQ domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
-
- of_node_put(pcie_intc_node);
- raw_spin_lock_init(&port->lock);
-
- return mc_allocate_msi_domains(port);
-}
-
-static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
- phys_addr_t axi_addr, phys_addr_t pci_addr,
- size_t size)
+static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
+ int event)
{
- u32 atr_sz = ilog2(size) - 1;
- u32 val;
-
- if (index == 0)
- val = PCIE_CONFIG_INTERFACE;
- else
- val = PCIE_TX_RX_INTERFACE;
-
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_PARAM);
-
- val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
- ATR_IMPL_ENABLE;
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRCADDR_PARAM);
-
- val = upper_32_bits(axi_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRC_ADDR);
-
- val = lower_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
-
- val = upper_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
-
- val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
- writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+ return devm_request_irq(plda->dev, event_irq, mc_event_handler,
+ 0, event_cause[event].sym, plda);
}
-static int mc_pcie_setup_windows(struct platform_device *pdev,
- struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
- struct resource_entry *entry;
- u64 pci_addr;
- u32 index = 1;
-
- resource_list_for_each_entry(entry, &bridge->windows) {
- if (resource_type(entry->res) == IORESOURCE_MEM) {
- pci_addr = entry->res->start - entry->offset;
- mc_pcie_setup_window(bridge_base_addr, index,
- entry->res->start, pci_addr,
- resource_size(entry->res));
- index++;
- }
- }
+static const struct plda_event_ops mc_event_ops = {
+ .get_events = mc_get_events,
+};
- return 0;
-}
+static const struct plda_event mc_event = {
+ .request_event_irq = mc_request_event_irq,
+ .intx_event = EVENT_LOCAL_PM_MSI_INT_INTX,
+ .msi_event = EVENT_LOCAL_PM_MSI_INT_MSI,
+};
static inline void mc_clear_secs(struct mc_pcie *port)
{
@@ -1052,85 +612,34 @@ static void mc_disable_interrupts(struct mc_pcie *port)
writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
}
-static int mc_init_interrupts(struct platform_device *pdev, struct mc_pcie *port)
-{
- struct device *dev = &pdev->dev;
- int irq;
- int i, intx_irq, msi_irq, event_irq;
- int ret;
-
- ret = mc_pcie_init_irq_domains(port);
- if (ret) {
- dev_err(dev, "failed creating IRQ domains\n");
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
-
- for (i = 0; i < NUM_EVENTS; i++) {
- event_irq = irq_create_mapping(port->event_domain, i);
- if (!event_irq) {
- dev_err(dev, "failed to map hwirq %d\n", i);
- return -ENXIO;
- }
-
- ret = devm_request_irq(dev, event_irq, mc_event_handler,
- 0, event_cause[i].sym, port);
- if (ret) {
- dev_err(dev, "failed to request IRQ %d\n", event_irq);
- return ret;
- }
- }
-
- intx_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_INTX);
- if (!intx_irq) {
- dev_err(dev, "failed to map INTx interrupt\n");
- return -ENXIO;
- }
-
- /* Plug the INTx chained handler */
- irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
-
- msi_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_MSI);
- if (!msi_irq)
- return -ENXIO;
-
- /* Plug the MSI chained handler */
- irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
-
- /* Plug the main event chained handler */
- irq_set_chained_handler_and_data(irq, mc_handle_event, port);
-
- return 0;
-}
-
static int mc_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
int ret;
/* Configure address translation table 0 for PCIe config space */
- mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
- cfg->res.start,
- resource_size(&cfg->res));
+ plda_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
+ cfg->res.start,
+ resource_size(&cfg->res));
/* Need some fixups in config space */
mc_pcie_enable_msi(port, cfg->win);
/* Configure non-config space outbound ranges */
- ret = mc_pcie_setup_windows(pdev, port);
+ ret = plda_pcie_setup_iomems(bridge, &port->plda);
if (ret)
return ret;
+ port->plda.event_ops = &mc_event_ops;
+ port->plda.event_irq_chip = &mc_event_irq_chip;
+ port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
+
/* Address translation is up; safe to enable interrupts */
- ret = mc_init_interrupts(pdev, port);
+ ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
if (ret)
return ret;
@@ -1141,6 +650,7 @@ static int mc_host_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
void __iomem *bridge_base_addr;
+ struct plda_pcie_rp *plda;
int ret;
u32 val;
@@ -1148,7 +658,8 @@ static int mc_host_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
- port->dev = dev;
+ plda = &port->plda;
+ plda->dev = dev;
port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(port->axi_base_addr))
@@ -1157,6 +668,8 @@ static int mc_host_probe(struct platform_device *pdev)
mc_disable_interrupts(port);
bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
+ plda->bridge_addr = bridge_base_addr;
+ plda->num_events = NUM_EVENTS;
/* Allow enabling MSI by disabling MSI-X */
val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
@@ -1168,10 +681,10 @@ static int mc_host_probe(struct platform_device *pdev)
val &= NUM_MSI_MSGS_MASK;
val >>= NUM_MSI_MSGS_SHIFT;
- port->msi.num_vectors = 1 << val;
+ plda->msi.num_vectors = 1 << val;
/* Pick vector address from design */
- port->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
+ plda->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
ret = mc_pcie_init_clks(dev);
if (ret) {
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
new file mode 100644
index 000000000000..a18923d7cea6
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLDA PCIe XpressRich host controller driver
+ *
+ * Copyright (C) 2023 Microchip Co. Ltd
+ * StarFive Co. Ltd
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/pci_regs.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-plda.h"
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct plda_pcie_rp *pcie = bus->sysdata;
+
+ return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
+
+static void plda_handle_msi(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ struct plda_msi *msi = &port->msi;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK,
+ bridge_base_addr + ISTATUS_LOCAL);
+ status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
+ for_each_set_bit(bit, &status, msi->num_vectors) {
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 bitpos = data->hwirq;
+
+ writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
+}
+
+static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = port->msi.vector_phy;
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int plda_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip plda_msi_bottom_irq_chip = {
+ .name = "PLDA MSI",
+ .irq_ack = plda_msi_bottom_irq_ack,
+ .irq_compose_msi_msg = plda_compose_msi_msg,
+ .irq_set_affinity = plda_msi_set_affinity,
+};
+
+static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct plda_pcie_rp *port = domain->host_data;
+ struct plda_msi *msi = &port->msi;
+ unsigned long bit;
+
+ mutex_lock(&msi->lock);
+ bit = find_first_zero_bit(msi->used, msi->num_vectors);
+ if (bit >= msi->num_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq, NULL, NULL);
+
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void plda_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
+ struct plda_msi *msi = &port->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (test_bit(d->hwirq, msi->used))
+ __clear_bit(d->hwirq, msi->used);
+ else
+ dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = plda_irq_msi_domain_alloc,
+ .free = plda_irq_msi_domain_free,
+};
+
+static struct irq_chip plda_msi_irq_chip = {
+ .name = "PLDA PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info plda_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &plda_msi_irq_chip,
+};
+
+static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct plda_msi *msi = &port->msi;
+
+ mutex_init(&port->msi.lock);
+
+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
+ &msi_domain_ops, port);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &plda_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void plda_handle_intx(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_INTX_MASK) {
+ status &= PM_MSI_INT_INTX_MASK;
+ status >>= PM_MSI_INT_INTX_SHIFT;
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_ack_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+
+ writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void plda_unmask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip plda_intx_irq_chip = {
+ .name = "PLDA PCIe INTx",
+ .irq_ack = plda_ack_intx_irq,
+ .irq_mask = plda_mask_intx_irq,
+ .irq_unmask = plda_unmask_intx_irq,
+};
+
+static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = plda_pcie_intx_map,
+};
+
+static u32 plda_get_events(struct plda_pcie_rp *port)
+{
+ u32 events, val, origin;
+
+ origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
+
+ /* MSI event and sys events */
+ val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
+ events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
+
+ /* INTx events */
+ if (origin & PM_MSI_INT_INTX_MASK)
+ events |= BIT(PM_MSI_INT_INTX_SHIFT);
+
+ /* remains are same with register */
+ events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
+
+ return events;
+}
+
+static irqreturn_t plda_event_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static void plda_handle_event(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ unsigned long events;
+ u32 bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ events = port->event_ops->get_events(port);
+
+ events &= port->events_bitmap;
+ for_each_set_bit(bit, &events, port->num_events)
+ generic_handle_domain_irq(port->event_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static u32 plda_hwirq_to_mask(int hwirq)
+{
+ u32 mask;
+
+ /* hwirq 23 - 0 are the same with register */
+ if (hwirq < EVENT_PM_MSI_INT_INTX)
+ mask = BIT(hwirq);
+ else if (hwirq == EVENT_PM_MSI_INT_INTX)
+ mask = PM_MSI_INT_INTX_MASK;
+ else
+ mask = BIT(hwirq + PCI_NUM_INTX - 1);
+
+ return mask;
+}
+
+static void plda_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(plda_hwirq_to_mask(data->hwirq),
+ port->bridge_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static void plda_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip plda_event_irq_chip = {
+ .name = "PLDA PCIe EVENT",
+ .irq_ack = plda_ack_event_irq,
+ .irq_mask = plda_mask_event_irq,
+ .irq_unmask = plda_unmask_event_irq,
+};
+
+static const struct plda_event_ops plda_event_ops = {
+ .get_events = plda_get_events,
+};
+
+static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct plda_pcie_rp *port = (void *)domain->host_data;
+
+ irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops plda_event_domain_ops = {
+ .map = plda_pcie_event_map,
+};
+
+static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "failed to find PCIe Intc node\n");
+ return -EINVAL;
+ }
+
+ port->event_domain = irq_domain_add_linear(pcie_intc_node,
+ port->num_events,
+ &plda_event_domain_ops,
+ port);
+ if (!port->event_domain) {
+ dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return plda_allocate_msi_domains(port);
+}
+
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event)
+{
+ struct device *dev = &pdev->dev;
+ int event_irq, ret;
+ u32 i;
+
+ if (!port->event_ops)
+ port->event_ops = &plda_event_ops;
+
+ if (!port->event_irq_chip)
+ port->event_irq_chip = &plda_event_irq_chip;
+
+ ret = plda_pcie_init_irq_domains(port);
+ if (ret) {
+ dev_err(dev, "failed creating IRQ domains\n");
+ return ret;
+ }
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return -ENODEV;
+
+ for_each_set_bit(i, &port->events_bitmap, port->num_events) {
+ event_irq = irq_create_mapping(port->event_domain, i);
+ if (!event_irq) {
+ dev_err(dev, "failed to map hwirq %d\n", i);
+ return -ENXIO;
+ }
+
+ if (event->request_event_irq)
+ ret = event->request_event_irq(port, event_irq, i);
+ else
+ ret = devm_request_irq(dev, event_irq,
+ plda_event_handler,
+ 0, NULL, port);
+
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", event_irq);
+ return ret;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->event_domain,
+ event->intx_event);
+ if (!port->intx_irq) {
+ dev_err(dev, "failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
+
+ port->msi_irq = irq_create_mapping(port->event_domain,
+ event->msi_event);
+ if (!port->msi_irq)
+ return -ENXIO;
+
+ /* Plug the MSI chained handler */
+ irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_init_interrupts);
+
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size)
+{
+ u32 atr_sz = ilog2(size) - 1;
+ u32 val;
+
+ if (index == 0)
+ val = PCIE_CONFIG_INTERFACE;
+ else
+ val = PCIE_TX_RX_INTERFACE;
+
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_PARAM);
+
+ val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
+ ATR_IMPL_ENABLE;
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRCADDR_PARAM);
+
+ val = upper_32_bits(axi_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRC_ADDR);
+
+ val = lower_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
+
+ val = upper_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+
+ val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ struct resource_entry *entry;
+ u64 pci_addr;
+ u32 index = 1;
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ pci_addr = entry->res->start - entry->offset;
+ plda_pcie_setup_window(bridge_base_addr, index,
+ entry->res->start, pci_addr,
+ resource_size(entry->res));
+ index++;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
+
+static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
+
+ irq_domain_remove(pcie->msi.msi_domain);
+ irq_domain_remove(pcie->msi.dev_domain);
+
+ irq_domain_remove(pcie->intx_domain);
+ irq_domain_remove(pcie->event_domain);
+}
+
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event)
+{
+ struct device *dev = port->dev;
+ struct pci_host_bridge *bridge;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *cfg_res;
+ int ret;
+
+ pdev = to_platform_device(dev);
+
+ port->bridge_addr =
+ devm_platform_ioremap_resource_byname(pdev, "apb");
+
+ if (IS_ERR(port->bridge_addr))
+ return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
+ "failed to map reg memory\n");
+
+ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!cfg_res)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get config memory\n");
+
+ port->config_base = devm_ioremap_resource(dev, cfg_res);
+ if (IS_ERR(port->config_base))
+ return dev_err_probe(dev, PTR_ERR(port->config_base),
+ "failed to map config memory\n");
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to alloc bridge\n");
+
+ if (port->host_ops && port->host_ops->host_init) {
+ ret = port->host_ops->host_init(port);
+ if (ret)
+ return ret;
+ }
+
+ port->bridge = bridge;
+ plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
+ resource_size(cfg_res));
+ plda_pcie_setup_iomems(bridge, port);
+ plda_set_default_msi(&port->msi);
+ ret = plda_init_interrupts(pdev, port, plda_event);
+ if (ret)
+ goto err_host;
+
+ /* Set default bus ops */
+ bridge->ops = ops;
+ bridge->sysdata = port;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to probe pci host\n");
+ goto err_probe;
+ }
+
+ return ret;
+
+err_probe:
+ plda_pcie_irq_domain_deinit(port);
+err_host:
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_init);
+
+void plda_pcie_host_deinit(struct plda_pcie_rp *port)
+{
+ pci_stop_root_bus(port->bridge->bus);
+ pci_remove_root_bus(port->bridge->bus);
+
+ plda_pcie_irq_domain_deinit(port);
+
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
new file mode 100644
index 000000000000..0e7dc0d8e5ba
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PLDA PCIe host controller driver
+ */
+
+#ifndef _PCIE_PLDA_H
+#define _PCIE_PLDA_H
+
+/* Number of MSI IRQs */
+#define PLDA_MAX_NUM_MSI_IRQS 32
+
+/* PCIe Bridge Phy Regs */
+#define GEN_SETTINGS 0x80
+#define RP_ENABLE 1
+#define PCIE_PCI_IDS_DW1 0x9c
+#define IDS_CLASS_CODE_SHIFT 16
+#define REVISION_ID_MASK GENMASK(7, 0)
+#define CLASS_CODE_ID_MASK GENMASK(31, 8)
+#define PCIE_PCI_IRQ_DW0 0xa8
+#define MSIX_CAP_MASK BIT(31)
+#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
+#define NUM_MSI_MSGS_SHIFT 4
+#define PCI_MISC 0xb4
+#define PHY_FUNCTION_DIS BIT(15)
+#define PCIE_WINROM 0xfc
+#define PREF_MEM_WIN_64_SUPPORT BIT(3)
+
+#define IMASK_LOCAL 0x180
+#define DMA_END_ENGINE_0_MASK 0x00000000u
+#define DMA_END_ENGINE_0_SHIFT 0
+#define DMA_END_ENGINE_1_MASK 0x00000000u
+#define DMA_END_ENGINE_1_SHIFT 1
+#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
+#define DMA_ERROR_ENGINE_0_SHIFT 8
+#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
+#define DMA_ERROR_ENGINE_1_SHIFT 9
+#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
+#define A_ATR_EVT_POST_ERR_SHIFT 16
+#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
+#define A_ATR_EVT_FETCH_ERR_SHIFT 17
+#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
+#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
+#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define A_ATR_EVT_DOORBELL_SHIFT 19
+#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
+#define P_ATR_EVT_POST_ERR_SHIFT 20
+#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
+#define P_ATR_EVT_FETCH_ERR_SHIFT 21
+#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
+#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
+#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define P_ATR_EVT_DOORBELL_SHIFT 23
+#define PM_MSI_INT_INTA_MASK 0x01000000u
+#define PM_MSI_INT_INTA_SHIFT 24
+#define PM_MSI_INT_INTB_MASK 0x02000000u
+#define PM_MSI_INT_INTB_SHIFT 25
+#define PM_MSI_INT_INTC_MASK 0x04000000u
+#define PM_MSI_INT_INTC_SHIFT 26
+#define PM_MSI_INT_INTD_MASK 0x08000000u
+#define PM_MSI_INT_INTD_SHIFT 27
+#define PM_MSI_INT_INTX_MASK 0x0f000000u
+#define PM_MSI_INT_INTX_SHIFT 24
+#define PM_MSI_INT_MSI_MASK 0x10000000u
+#define PM_MSI_INT_MSI_SHIFT 28
+#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
+#define PM_MSI_INT_AER_EVT_SHIFT 29
+#define PM_MSI_INT_EVENTS_MASK 0x40000000u
+#define PM_MSI_INT_EVENTS_SHIFT 30
+#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
+#define PM_MSI_INT_SYS_ERR_SHIFT 31
+#define SYS_AND_MSI_MASK GENMASK(31, 28)
+#define NUM_LOCAL_EVENTS 15
+#define ISTATUS_LOCAL 0x184
+#define IMASK_HOST 0x188
+#define ISTATUS_HOST 0x18c
+#define IMSI_ADDR 0x190
+#define ISTATUS_MSI 0x194
+#define PMSG_SUPPORT_RX 0x3f0
+#define PMSG_LTR_SUPPORT BIT(2)
+
+/* PCIe Master table init defines */
+#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
+#define ATR0_PCIE_ATR_SIZE 0x25
+#define ATR0_PCIE_ATR_SIZE_SHIFT 1
+#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
+#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
+
+/* PCIe AXI slave table init defines */
+#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
+#define ATR_SIZE_SHIFT 1
+#define ATR_IMPL_ENABLE 1
+#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
+#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
+#define PCIE_TX_RX_INTERFACE 0x00000000u
+#define PCIE_CONFIG_INTERFACE 0x00000001u
+
+#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
+
+#define ATR_ENTRY_SIZE 32
+
+enum plda_int_event {
+ PLDA_AXI_POST_ERR,
+ PLDA_AXI_FETCH_ERR,
+ PLDA_AXI_DISCARD_ERR,
+ PLDA_AXI_DOORBELL,
+ PLDA_PCIE_POST_ERR,
+ PLDA_PCIE_FETCH_ERR,
+ PLDA_PCIE_DISCARD_ERR,
+ PLDA_PCIE_DOORBELL,
+ PLDA_INTX,
+ PLDA_MSI,
+ PLDA_AER_EVENT,
+ PLDA_MISC_EVENTS,
+ PLDA_SYS_ERR,
+ PLDA_INT_EVENT_NUM
+};
+
+#define PLDA_NUM_DMA_EVENTS 16
+
+#define EVENT_PM_MSI_INT_INTX (PLDA_NUM_DMA_EVENTS + PLDA_INTX)
+#define EVENT_PM_MSI_INT_MSI (PLDA_NUM_DMA_EVENTS + PLDA_MSI)
+#define PLDA_MAX_EVENT_NUM (PLDA_NUM_DMA_EVENTS + PLDA_INT_EVENT_NUM)
+
+/*
+ * PLDA interrupt register
+ *
+ * 31 27 23 15 7 0
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * |12|11|10|9| intx |7|6|5|4|3|2|1|0| DMA error | DMA end |
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * event bit
+ * 0-7 (0-7) DMA interrupt end : reserved for vendor implement
+ * 8-15 (8-15) DMA error : reserved for vendor implement
+ * 16 (16) AXI post error (PLDA_AXI_POST_ERR)
+ * 17 (17) AXI fetch error (PLDA_AXI_FETCH_ERR)
+ * 18 (18) AXI discard error (PLDA_AXI_DISCARD_ERR)
+ * 19 (19) AXI doorbell (PLDA_PCIE_DOORBELL)
+ * 20 (20) PCIe post error (PLDA_PCIE_POST_ERR)
+ * 21 (21) PCIe fetch error (PLDA_PCIE_FETCH_ERR)
+ * 22 (22) PCIe discard error (PLDA_PCIE_DISCARD_ERR)
+ * 23 (23) PCIe doorbell (PLDA_PCIE_DOORBELL)
+ * 24 (27-24) INTx interruts (PLDA_INTX)
+ * 25 (28): MSI interrupt (PLDA_MSI)
+ * 26 (29): AER event (PLDA_AER_EVENT)
+ * 27 (30): PM/LTR/Hotplug (PLDA_MISC_EVENTS)
+ * 28 (31): System error (PLDA_SYS_ERR)
+ */
+
+struct plda_pcie_rp;
+
+struct plda_event_ops {
+ u32 (*get_events)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_pcie_host_ops {
+ int (*host_init)(struct plda_pcie_rp *pcie);
+ void (*host_deinit)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_msi {
+ struct mutex lock; /* Protect used bitmap */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ u32 num_vectors;
+ u64 vector_phy;
+ DECLARE_BITMAP(used, PLDA_MAX_NUM_MSI_IRQS);
+};
+
+struct plda_pcie_rp {
+ struct device *dev;
+ struct pci_host_bridge *bridge;
+ struct irq_domain *intx_domain;
+ struct irq_domain *event_domain;
+ raw_spinlock_t lock;
+ struct plda_msi msi;
+ const struct plda_event_ops *event_ops;
+ const struct irq_chip *event_irq_chip;
+ const struct plda_pcie_host_ops *host_ops;
+ void __iomem *bridge_addr;
+ void __iomem *config_base;
+ unsigned long events_bitmap;
+ int irq;
+ int msi_irq;
+ int intx_irq;
+ int num_events;
+};
+
+struct plda_event {
+ int (*request_event_irq)(struct plda_pcie_rp *pcie,
+ int event_irq, int event);
+ int intx_event;
+ int msi_event;
+};
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event);
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size);
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port);
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event);
+void plda_pcie_host_deinit(struct plda_pcie_rp *pcie);
+
+static inline void plda_set_default_msi(struct plda_msi *msi)
+{
+ msi->vector_phy = IMSI_ADDR;
+ msi->num_vectors = PLDA_MAX_NUM_MSI_IRQS;
+}
+
+static inline void plda_pcie_enable_root_port(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + GEN_SETTINGS);
+ value |= RP_ENABLE;
+ writel_relaxed(value, plda->bridge_addr + GEN_SETTINGS);
+}
+
+static inline void plda_pcie_set_standard_class(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ /* set class code and reserve revision id */
+ value = readl_relaxed(plda->bridge_addr + PCIE_PCI_IDS_DW1);
+ value &= REVISION_ID_MASK;
+ value |= (PCI_CLASS_BRIDGE_PCI << IDS_CLASS_CODE_SHIFT);
+ writel_relaxed(value, plda->bridge_addr + PCIE_PCI_IDS_DW1);
+}
+
+static inline void plda_pcie_set_pref_win_64bit(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCIE_WINROM);
+ value |= PREF_MEM_WIN_64_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PCIE_WINROM);
+}
+
+static inline void plda_pcie_disable_ltr(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PMSG_SUPPORT_RX);
+ value &= ~PMSG_LTR_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PMSG_SUPPORT_RX);
+}
+
+static inline void plda_pcie_disable_func(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCI_MISC);
+ value |= PHY_FUNCTION_DIS;
+ writel_relaxed(value, plda->bridge_addr + PCI_MISC);
+}
+
+static inline void plda_pcie_write_rc_bar(struct plda_pcie_rp *plda, u64 val)
+{
+ void __iomem *addr = plda->bridge_addr + CONFIG_SPACE_ADDR_OFFSET;
+
+ writel_relaxed(lower_32_bits(val), addr + PCI_BASE_ADDRESS_0);
+ writel_relaxed(upper_32_bits(val), addr + PCI_BASE_ADDRESS_1);
+}
+#endif /* _PCIE_PLDA_H */
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
new file mode 100644
index 000000000000..c9933ecf6833
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for StarFive JH7110 Soc.
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "../../pci.h"
+
+#include "pcie-plda.h"
+
+#define PCIE_FUNC_NUM 4
+
+/* system control */
+#define STG_SYSCON_PCIE0_BASE 0x48
+#define STG_SYSCON_PCIE1_BASE 0x1f8
+
+#define STG_SYSCON_AR_OFFSET 0x78
+#define STG_SYSCON_AXI4_SLVL_AR_MASK GENMASK(22, 8)
+#define STG_SYSCON_AXI4_SLVL_PHY_AR(x) FIELD_PREP(GENMASK(20, 17), x)
+#define STG_SYSCON_AW_OFFSET 0x7c
+#define STG_SYSCON_AXI4_SLVL_AW_MASK GENMASK(14, 0)
+#define STG_SYSCON_AXI4_SLVL_PHY_AW(x) FIELD_PREP(GENMASK(12, 9), x)
+#define STG_SYSCON_CLKREQ BIT(22)
+#define STG_SYSCON_CKREF_SRC_MASK GENMASK(19, 18)
+#define STG_SYSCON_RP_NEP_OFFSET 0xe8
+#define STG_SYSCON_K_RP_NEP BIT(8)
+#define STG_SYSCON_LNKSTA_OFFSET 0x170
+#define DATA_LINK_ACTIVE BIT(5)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+struct starfive_jh7110_pcie {
+ struct plda_pcie_rp plda;
+ struct reset_control *resets;
+ struct clk_bulk_data *clks;
+ struct regmap *reg_syscon;
+ struct gpio_desc *power_gpio;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
+
+ unsigned int stg_pcie_base;
+ int num_clks;
+};
+
+/*
+ * JH7110 PCIe port BAR0/1 can be configured as 64-bit prefetchable memory
+ * space. PCIe read and write requests targeting BAR0/1 are routed to so called
+ * 'Bridge Configuration space' in PLDA IP datasheet, which contains the bridge
+ * internal registers, such as interrupt, DMA and ATU registers...
+ * JH7110 can access the Bridge Configuration space by local bus, and don`t
+ * want the bridge internal registers accessed by the DMA from EP devices.
+ * Thus, they are unimplemented and should be hidden here.
+ */
+static bool starfive_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if (pci_is_root_bus(bus) && !devfn &&
+ (offset == PCI_BASE_ADDRESS_0 || offset == PCI_BASE_ADDRESS_1))
+ return true;
+
+ return false;
+}
+
+static int starfive_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where)) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_parse_dt(struct starfive_jh7110_pcie *pcie,
+ struct device *dev)
+{
+ int domain_nr;
+
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0)
+ return dev_err_probe(dev, pcie->num_clks,
+ "failed to get pcie clocks\n");
+
+ pcie->resets = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(pcie->resets))
+ return dev_err_probe(dev, PTR_ERR(pcie->resets),
+ "failed to get pcie resets");
+
+ pcie->reg_syscon =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "starfive,stg-syscon");
+
+ if (IS_ERR(pcie->reg_syscon))
+ return dev_err_probe(dev, PTR_ERR(pcie->reg_syscon),
+ "failed to parse starfive,stg-syscon\n");
+
+ pcie->phy = devm_phy_optional_get(dev, NULL);
+ if (IS_ERR(pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(pcie->phy),
+ "failed to get pcie phy\n");
+
+ /*
+ * The PCIe domain numbers are set to be static in JH7110 DTS.
+ * As the STG system controller defines different bases in PCIe RP0 &
+ * RP1, we use them to identify which controller is doing the hardware
+ * initialization.
+ */
+ domain_nr = of_get_pci_domain_nr(dev->of_node);
+
+ if (domain_nr < 0 || domain_nr > 1)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get valid pcie domain\n");
+
+ if (domain_nr == 0)
+ pcie->stg_pcie_base = STG_SYSCON_PCIE0_BASE;
+ else
+ pcie->stg_pcie_base = STG_SYSCON_PCIE1_BASE;
+
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "perst",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->reset_gpio),
+ "failed to get perst-gpio\n");
+
+ pcie->power_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->power_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->power_gpio),
+ "failed to get power-gpio\n");
+
+ return 0;
+}
+
+static struct pci_ops starfive_pcie_ops = {
+ .map_bus = plda_pcie_map_bus,
+ .read = starfive_pcie_config_read,
+ .write = starfive_pcie_config_write,
+};
+
+static int starfive_pcie_clk_rst_init(struct starfive_jh7110_pcie *pcie)
+{
+ struct device *dev = pcie->plda.dev;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ ret = reset_control_deassert(pcie->resets);
+ if (ret) {
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ dev_err_probe(dev, ret, "failed to deassert resets\n");
+ }
+
+ return ret;
+}
+
+static void starfive_pcie_clk_rst_deinit(struct starfive_jh7110_pcie *pcie)
+{
+ reset_control_assert(pcie->resets);
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+}
+
+static bool starfive_pcie_link_up(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ int ret;
+ u32 stg_reg_val;
+
+ ret = regmap_read(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_LNKSTA_OFFSET,
+ &stg_reg_val);
+ if (ret) {
+ dev_err(pcie->plda.dev, "failed to read link status\n");
+ return false;
+ }
+
+ return !!(stg_reg_val & DATA_LINK_ACTIVE);
+}
+
+static int starfive_pcie_host_wait_for_link(struct starfive_jh7110_pcie *pcie)
+{
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (starfive_pcie_link_up(&pcie->plda)) {
+ dev_info(pcie->plda.dev, "port link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int starfive_pcie_enable_phy(struct device *dev,
+ struct starfive_jh7110_pcie *pcie)
+{
+ int ret;
+
+ if (!pcie->phy)
+ return 0;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to initialize pcie phy\n");
+
+ ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to set pcie mode\n");
+ goto err_phy_on;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to power on pcie phy\n");
+ goto err_phy_on;
+ }
+
+ return 0;
+
+err_phy_on:
+ phy_exit(pcie->phy);
+ return ret;
+}
+
+static void starfive_pcie_disable_phy(struct starfive_jh7110_pcie *pcie)
+{
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+
+static void starfive_pcie_host_deinit(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+
+ starfive_pcie_clk_rst_deinit(pcie);
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 0);
+ starfive_pcie_disable_phy(pcie);
+}
+
+static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ struct device *dev = plda->dev;
+ int ret;
+ int i;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_RP_NEP_OFFSET,
+ STG_SYSCON_K_RP_NEP, STG_SYSCON_K_RP_NEP);
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CKREF_SRC_MASK,
+ FIELD_PREP(STG_SYSCON_CKREF_SRC_MASK, 2));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CLKREQ, STG_SYSCON_CLKREQ);
+
+ ret = starfive_pcie_clk_rst_init(pcie);
+ if (ret)
+ return ret;
+
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 1);
+
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+
+ /* Disable physical functions except #0 */
+ for (i = 1; i < PCIE_FUNC_NUM; i++) {
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AR(i));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AW(i));
+
+ plda_pcie_disable_func(plda);
+ }
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK, 0);
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK, 0);
+
+ plda_pcie_enable_root_port(plda);
+ plda_pcie_write_rc_bar(plda, 0);
+
+ /* PCIe PCI Standard Configuration Identification Settings. */
+ plda_pcie_set_standard_class(plda);
+
+ /*
+ * The LTR message receiving is enabled by the register "PCIe Message
+ * Reception" as default, but the forward id & addr are uninitialized.
+ * If we do not disable LTR message forwarding here, or set a legal
+ * forwarding address, the kernel will get stuck.
+ * To workaround, disable the LTR message forwarding here before using
+ * this feature.
+ */
+ plda_pcie_disable_ltr(plda);
+
+ /*
+ * Enable the prefetchable memory window 64-bit addressing in JH7110.
+ * The 64-bits prefetchable address translation configurations in ATU
+ * can be work after enable the register setting below.
+ */
+ plda_pcie_set_pref_win_64bit(plda);
+
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms,
+ * the sleep value is T_PVPERL from PCIe CEM spec r2.0 (Table 2-4)
+ */
+ msleep(100);
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+
+ /*
+ * With a Downstream Port (<=5GT/s), software must wait a minimum
+ * of 100ms following exit from a conventional reset before
+ * sending a configuration request to the device.
+ */
+ msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS);
+
+ if (starfive_pcie_host_wait_for_link(pcie))
+ dev_info(dev, "port link down\n");
+
+ return 0;
+}
+
+static const struct plda_pcie_host_ops sf_host_ops = {
+ .host_init = starfive_pcie_host_init,
+ .host_deinit = starfive_pcie_host_deinit,
+};
+
+static const struct plda_event stf_pcie_event = {
+ .intx_event = EVENT_PM_MSI_INT_INTX,
+ .msi_event = EVENT_PM_MSI_INT_MSI
+};
+
+static int starfive_pcie_probe(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie;
+ struct device *dev = &pdev->dev;
+ struct plda_pcie_rp *plda;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ plda = &pcie->plda;
+ plda->dev = dev;
+
+ ret = starfive_pcie_parse_dt(pcie, dev);
+ if (ret)
+ return ret;
+
+ plda->host_ops = &sf_host_ops;
+ plda->num_events = PLDA_MAX_EVENT_NUM;
+ /* mask doorbell event */
+ plda->events_bitmap = GENMASK(PLDA_INT_EVENT_NUM - 1, 0)
+ & ~BIT(PLDA_AXI_DOORBELL)
+ & ~BIT(PLDA_PCIE_DOORBELL);
+ plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS;
+ ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops,
+ &stf_pcie_event);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ platform_set_drvdata(pdev, pcie);
+
+ return 0;
+}
+
+static void starfive_pcie_remove(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie = platform_get_drvdata(pdev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ plda_pcie_host_deinit(&pcie->plda);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static int starfive_pcie_suspend_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ starfive_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int starfive_pcie_resume_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ starfive_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops starfive_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(starfive_pcie_suspend_noirq,
+ starfive_pcie_resume_noirq)
+};
+
+static const struct of_device_id starfive_pcie_of_match[] = {
+ { .compatible = "starfive,jh7110-pcie", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, starfive_pcie_of_match);
+
+static struct platform_driver starfive_pcie_driver = {
+ .driver = {
+ .name = "pcie-starfive",
+ .of_match_table = of_match_ptr(starfive_pcie_of_match),
+ .pm = pm_sleep_ptr(&starfive_pcie_pm_ops),
+ },
+ .probe = starfive_pcie_probe,
+ .remove_new = starfive_pcie_remove,
+};
+module_platform_driver(starfive_pcie_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 PCIe host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 87b7856f375a..a726de0af011 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -925,6 +925,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
dev_set_msi_domain(&vmd->bus->dev,
dev_get_msi_domain(&vmd->dev->dev));
+ WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+ "domain"), "Can't create symlink to domain\n");
+
vmd_acpi_begin();
pci_scan_child_bus(vmd->bus);
@@ -964,9 +967,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_bus_add_devices(vmd->bus);
vmd_acpi_end();
-
- WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
- "domain"), "Can't create symlink to domain\n");
return 0;
}
@@ -1042,8 +1042,8 @@ static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
+ sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
@@ -1128,5 +1128,6 @@ static struct pci_driver vmd_drv = {
module_pci_driver(vmd_drv);
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Volume Management Device driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.6");
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 2c562b9eaf80..3780a9f9ec00 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -4,14 +4,249 @@
#include "pci.h"
/*
- * PCI iomap devres
+ * On the state of PCI's devres implementation:
+ *
+ * The older devres API for PCI has two significant problems:
+ *
+ * 1. It is very strongly tied to the statically allocated mapping table in
+ * struct pcim_iomap_devres below. This is mostly solved in the sense of the
+ * pcim_ functions in this file providing things like ranged mapping by
+ * bypassing this table, whereas the functions that were present in the old
+ * API still enter the mapping addresses into the table for users of the old
+ * API.
+ *
+ * 2. The region-request-functions in pci.c do become managed IF the device has
+ * been enabled with pcim_enable_device() instead of pci_enable_device().
+ * This resulted in the API becoming inconsistent: Some functions have an
+ * obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()),
+ * whereas some don't and are never managed, while others don't and are
+ * _sometimes_ managed (e.g. pci_request_region()).
+ *
+ * Consequently, in the new API, region requests performed by the pcim_
+ * functions are automatically cleaned up through the devres callback
+ * pcim_addr_resource_release().
+ *
+ * Users of pcim_enable_device() + pci_*region*() are redirected in
+ * pci.c to the managed functions here in this file. This isn't exactly
+ * perfect, but the only alternative way would be to port ALL drivers
+ * using said combination to pcim_ functions.
+ *
+ * TODO:
+ * Remove the legacy table entirely once all calls to pcim_iomap_table() in
+ * the kernel have been removed.
*/
-#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
+/*
+ * Legacy struct storing addresses to whole mapped BARs.
+ */
struct pcim_iomap_devres {
- void __iomem *table[PCIM_IOMAP_MAX];
+ void __iomem *table[PCI_STD_NUM_BARS];
+};
+
+/* Used to restore the old INTx state on driver detach. */
+struct pcim_intx_devres {
+ int orig_intx;
+};
+
+enum pcim_addr_devres_type {
+ /* Default initializer. */
+ PCIM_ADDR_DEVRES_TYPE_INVALID,
+
+ /* A requested region spanning an entire BAR. */
+ PCIM_ADDR_DEVRES_TYPE_REGION,
+
+ /*
+ * A requested region spanning an entire BAR, and a mapping for
+ * the entire BAR.
+ */
+ PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING,
+
+ /*
+ * A mapping within a BAR, either spanning the whole BAR or just a
+ * range. Without a requested region.
+ */
+ PCIM_ADDR_DEVRES_TYPE_MAPPING,
+};
+
+/*
+ * This struct envelops IO or MEM addresses, i.e., mappings and region
+ * requests, because those are very frequently requested and released
+ * together.
+ */
+struct pcim_addr_devres {
+ enum pcim_addr_devres_type type;
+ void __iomem *baseaddr;
+ unsigned long offset;
+ unsigned long len;
+ int bar;
};
+static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
+{
+ memset(res, 0, sizeof(*res));
+ res->bar = -1;
+}
+
+/*
+ * The following functions, __pcim_*_region*, exist as counterparts to the
+ * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e.,
+ * sometimes managed, sometimes not.
+ *
+ * To separate the APIs cleanly, we define our own, simplified versions here.
+ */
+
+/**
+ * __pcim_request_region_range - Request a ranged region
+ * @pdev: PCI device the region belongs to
+ * @bar: BAR the range is within
+ * @offset: offset from the BAR's start address
+ * @maxlen: length in bytes, beginning at @offset
+ * @name: name associated with the request
+ * @req_flags: flags for the request, e.g., for kernel-exclusive requests
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request a range within a device's PCI BAR. Sanity check the input.
+ */
+static int __pcim_request_region_range(struct pci_dev *pdev, int bar,
+ unsigned long offset,
+ unsigned long maxlen,
+ const char *name, int req_flags)
+{
+ resource_size_t start = pci_resource_start(pdev, bar);
+ resource_size_t len = pci_resource_len(pdev, bar);
+ unsigned long dev_flags = pci_resource_flags(pdev, bar);
+
+ if (start == 0 || len == 0) /* Unused BAR. */
+ return 0;
+ if (len <= offset)
+ return -EINVAL;
+
+ start += offset;
+ len -= offset;
+
+ if (len > maxlen && maxlen != 0)
+ len = maxlen;
+
+ if (dev_flags & IORESOURCE_IO) {
+ if (!request_region(start, len, name))
+ return -EBUSY;
+ } else if (dev_flags & IORESOURCE_MEM) {
+ if (!__request_mem_region(start, len, name, req_flags))
+ return -EBUSY;
+ } else {
+ /* That's not a device we can request anything on. */
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __pcim_release_region_range(struct pci_dev *pdev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(pdev, bar);
+ resource_size_t len = pci_resource_len(pdev, bar);
+ unsigned long flags = pci_resource_flags(pdev, bar);
+
+ if (len <= offset || start == 0)
+ return;
+
+ if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */
+ return;
+
+ start += offset;
+ len -= offset;
+
+ if (len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_IO)
+ release_region(start, len);
+ else if (flags & IORESOURCE_MEM)
+ release_mem_region(start, len);
+}
+
+static int __pcim_request_region(struct pci_dev *pdev, int bar,
+ const char *name, int flags)
+{
+ unsigned long offset = 0;
+ unsigned long len = pci_resource_len(pdev, bar);
+
+ return __pcim_request_region_range(pdev, bar, offset, len, name, flags);
+}
+
+static void __pcim_release_region(struct pci_dev *pdev, int bar)
+{
+ unsigned long offset = 0;
+ unsigned long len = pci_resource_len(pdev, bar);
+
+ __pcim_release_region_range(pdev, bar, offset, len);
+}
+
+static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcim_addr_devres *res = resource_raw;
+
+ switch (res->type) {
+ case PCIM_ADDR_DEVRES_TYPE_REGION:
+ __pcim_release_region(pdev, res->bar);
+ break;
+ case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
+ pci_iounmap(pdev, res->baseaddr);
+ __pcim_release_region(pdev, res->bar);
+ break;
+ case PCIM_ADDR_DEVRES_TYPE_MAPPING:
+ pci_iounmap(pdev, res->baseaddr);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct pcim_addr_devres *pcim_addr_devres_alloc(struct pci_dev *pdev)
+{
+ struct pcim_addr_devres *res;
+
+ res = devres_alloc_node(pcim_addr_resource_release, sizeof(*res),
+ GFP_KERNEL, dev_to_node(&pdev->dev));
+ if (res)
+ pcim_addr_devres_clear(res);
+ return res;
+}
+
+/* Just for consistency and readability. */
+static inline void pcim_addr_devres_free(struct pcim_addr_devres *res)
+{
+ devres_free(res);
+}
+
+/*
+ * Used by devres to identify a pcim_addr_devres.
+ */
+static int pcim_addr_resources_match(struct device *dev,
+ void *a_raw, void *b_raw)
+{
+ struct pcim_addr_devres *a, *b;
+
+ a = a_raw;
+ b = b_raw;
+
+ if (a->type != b->type)
+ return 0;
+
+ switch (a->type) {
+ case PCIM_ADDR_DEVRES_TYPE_REGION:
+ case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
+ return a->bar == b->bar;
+ case PCIM_ADDR_DEVRES_TYPE_MAPPING:
+ return a->baseaddr == b->baseaddr;
+ default:
+ return 0;
+ }
+}
static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
{
@@ -92,8 +327,8 @@ EXPORT_SYMBOL(devm_pci_remap_cfgspace);
*
* All operations are managed and will be undone on driver detach.
*
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example::
+ * Returns a pointer to the remapped memory or an IOMEM_ERR_PTR() encoded error
+ * code on failure. Usage example::
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_pci_remap_cfg_resource(&pdev->dev, res);
@@ -140,96 +375,147 @@ void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
}
EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
+static void __pcim_clear_mwi(void *pdev_raw)
+{
+ struct pci_dev *pdev = pdev_raw;
+
+ pci_clear_mwi(pdev);
+}
+
/**
* pcim_set_mwi - a device-managed pci_set_mwi()
- * @dev: the PCI device for which MWI is enabled
+ * @pdev: the PCI device for which MWI is enabled
*
* Managed pci_set_mwi().
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-int pcim_set_mwi(struct pci_dev *dev)
+int pcim_set_mwi(struct pci_dev *pdev)
{
- struct pci_devres *dr;
+ int ret;
- dr = find_pci_dr(dev);
- if (!dr)
- return -ENOMEM;
+ ret = devm_add_action(&pdev->dev, __pcim_clear_mwi, pdev);
+ if (ret != 0)
+ return ret;
+
+ ret = pci_set_mwi(pdev);
+ if (ret != 0)
+ devm_remove_action(&pdev->dev, __pcim_clear_mwi, pdev);
- dr->mwi = 1;
- return pci_set_mwi(dev);
+ return ret;
}
EXPORT_SYMBOL(pcim_set_mwi);
+static inline bool mask_contains_bar(int mask, int bar)
+{
+ return mask & BIT(bar);
+}
-static void pcim_release(struct device *gendev, void *res)
+/*
+ * This is a copy of pci_intx() used to bypass the problem of recursive
+ * function calls due to the hybrid nature of pci_intx().
+ */
+static void __pcim_intx(struct pci_dev *pdev, int enable)
{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pci_devres *this = res;
- int i;
+ u16 pci_command, new;
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
- if (this->region_mask & (1 << i))
- pci_release_region(dev, i);
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+
+ if (enable)
+ new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
+ else
+ new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (this->mwi)
- pci_clear_mwi(dev);
+ if (new != pci_command)
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+}
- if (this->restore_intx)
- pci_intx(dev, this->orig_intx);
+static void pcim_intx_restore(struct device *dev, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcim_intx_devres *res = data;
- if (this->enabled && !this->pinned)
- pci_disable_device(dev);
+ __pcim_intx(pdev, res->orig_intx);
}
-/*
- * TODO: After the last four callers in pci.c are ported, find_pci_dr()
- * needs to be made static again.
- */
-struct pci_devres *find_pci_dr(struct pci_dev *pdev)
+static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
{
- if (pci_is_managed(pdev))
- return devres_find(&pdev->dev, pcim_release, NULL, NULL);
- return NULL;
+ struct pcim_intx_devres *res;
+
+ res = devres_find(dev, pcim_intx_restore, NULL, NULL);
+ if (res)
+ return res;
+
+ res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
+ if (res)
+ devres_add(dev, res);
+
+ return res;
}
-static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
+/**
+ * pcim_intx - managed pci_intx()
+ * @pdev: the PCI device to operate on
+ * @enable: boolean: whether to enable or disable PCI INTx
+ *
+ * Returns: 0 on success, -ENOMEM on error.
+ *
+ * Enable/disable PCI INTx for device @pdev.
+ * Restore the original state on driver detach.
+ */
+int pcim_intx(struct pci_dev *pdev, int enable)
{
- struct pci_devres *dr, *new_dr;
+ struct pcim_intx_devres *res;
- dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
- if (dr)
- return dr;
+ res = get_or_create_intx_devres(&pdev->dev);
+ if (!res)
+ return -ENOMEM;
- new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
- if (!new_dr)
- return NULL;
- return devres_get(&pdev->dev, new_dr, NULL, NULL);
+ res->orig_intx = !enable;
+ __pcim_intx(pdev, enable);
+
+ return 0;
+}
+
+static void pcim_disable_device(void *pdev_raw)
+{
+ struct pci_dev *pdev = pdev_raw;
+
+ if (!pdev->pinned)
+ pci_disable_device(pdev);
}
/**
* pcim_enable_device - Managed pci_enable_device()
* @pdev: PCI device to be initialized
*
- * Managed pci_enable_device().
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * Managed pci_enable_device(). Device will automatically be disabled on
+ * driver detach.
*/
int pcim_enable_device(struct pci_dev *pdev)
{
- struct pci_devres *dr;
- int rc;
+ int ret;
+
+ ret = devm_add_action(&pdev->dev, pcim_disable_device, pdev);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * We prefer removing the action in case of an error over
+ * devm_add_action_or_reset() because the latter could theoretically be
+ * disturbed by users having pinned the device too soon.
+ */
+ ret = pci_enable_device(pdev);
+ if (ret != 0) {
+ devm_remove_action(&pdev->dev, pcim_disable_device, pdev);
+ return ret;
+ }
- dr = get_pci_dr(pdev);
- if (unlikely(!dr))
- return -ENOMEM;
- if (dr->enabled)
- return 0;
+ pdev->is_managed = true;
- rc = pci_enable_device(pdev);
- if (!rc) {
- pdev->is_managed = 1;
- dr->enabled = 1;
- }
- return rc;
+ return ret;
}
EXPORT_SYMBOL(pcim_enable_device);
@@ -237,36 +523,32 @@ EXPORT_SYMBOL(pcim_enable_device);
* pcim_pin_device - Pin managed PCI device
* @pdev: PCI device to pin
*
- * Pin managed PCI device @pdev. Pinned device won't be disabled on
- * driver detach. @pdev must have been enabled with
- * pcim_enable_device().
+ * Pin managed PCI device @pdev. Pinned device won't be disabled on driver
+ * detach. @pdev must have been enabled with pcim_enable_device().
*/
void pcim_pin_device(struct pci_dev *pdev)
{
- struct pci_devres *dr;
-
- dr = find_pci_dr(pdev);
- WARN_ON(!dr || !dr->enabled);
- if (dr)
- dr->pinned = 1;
+ pdev->pinned = true;
}
EXPORT_SYMBOL(pcim_pin_device);
static void pcim_iomap_release(struct device *gendev, void *res)
{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pcim_iomap_devres *this = res;
- int i;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (this->table[i])
- pci_iounmap(dev, this->table[i]);
+ /*
+ * Do nothing. This is legacy code.
+ *
+ * Cleanup of the mappings is now done directly through the callbacks
+ * registered when creating them.
+ */
}
/**
- * pcim_iomap_table - access iomap allocation table
+ * pcim_iomap_table - access iomap allocation table (DEPRECATED)
* @pdev: PCI device to access iomap table for
*
+ * Returns:
+ * Const pointer to array of __iomem pointers on success, NULL on failure.
+ *
* Access iomap allocation table for @dev. If iomap table doesn't
* exist and @pdev is managed, it will be allocated. All iomaps
* recorded in the iomap table are automatically unmapped on driver
@@ -275,6 +557,11 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* This function might sleep when the table is first allocated but can
* be safely called without context and guaranteed to succeed once
* allocated.
+ *
+ * This function is DEPRECATED. Do not use it in new code. Instead, obtain a
+ * mapping's address directly from one of the pcim_* mapping functions. For
+ * example:
+ * void __iomem \*mappy = pcim_iomap(pdev, bar, length);
*/
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
{
@@ -293,27 +580,114 @@ void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
}
EXPORT_SYMBOL(pcim_iomap_table);
+/*
+ * Fill the legacy mapping-table, so that drivers using the old API can
+ * still get a BAR's mapping address through pcim_iomap_table().
+ */
+static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
+ void __iomem *mapping, int bar)
+{
+ void __iomem **legacy_iomap_table;
+
+ if (bar >= PCI_STD_NUM_BARS)
+ return -EINVAL;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return -ENOMEM;
+
+ /* The legacy mechanism doesn't allow for duplicate mappings. */
+ WARN_ON(legacy_iomap_table[bar]);
+
+ legacy_iomap_table[bar] = mapping;
+
+ return 0;
+}
+
+/*
+ * Remove a mapping. The table only contains whole-BAR mappings, so this will
+ * never interfere with ranged mappings.
+ */
+static void pcim_remove_mapping_from_legacy_table(struct pci_dev *pdev,
+ void __iomem *addr)
+{
+ int bar;
+ void __iomem **legacy_iomap_table;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (legacy_iomap_table[bar] == addr) {
+ legacy_iomap_table[bar] = NULL;
+ return;
+ }
+ }
+}
+
+/*
+ * The same as pcim_remove_mapping_from_legacy_table(), but identifies the
+ * mapping by its BAR index.
+ */
+static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
+{
+ void __iomem **legacy_iomap_table;
+
+ if (bar >= PCI_STD_NUM_BARS)
+ return;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return;
+
+ legacy_iomap_table[bar] = NULL;
+}
+
/**
* pcim_iomap - Managed pcim_iomap()
* @pdev: PCI device to iomap for
* @bar: BAR to iomap
* @maxlen: Maximum length of iomap
*
- * Managed pci_iomap(). Map is automatically unmapped on driver
- * detach.
+ * Returns: __iomem pointer on success, NULL on failure.
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver detach. If
+ * desired, unmap manually only with pcim_iounmap().
+ *
+ * This SHOULD only be used once per BAR.
+ *
+ * NOTE:
+ * Contrary to the other pcim_* functions, this function does not return an
+ * IOMEM_ERR_PTR() on failure, but a simple NULL. This is done for backwards
+ * compatibility.
*/
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
- void __iomem **tbl;
-
- BUG_ON(bar >= PCIM_IOMAP_MAX);
+ void __iomem *mapping;
+ struct pcim_addr_devres *res;
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
return NULL;
+ res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
- tbl[bar] = pci_iomap(pdev, bar, maxlen);
- return tbl[bar];
+ mapping = pci_iomap(pdev, bar, maxlen);
+ if (!mapping)
+ goto err_iomap;
+ res->baseaddr = mapping;
+
+ if (pcim_add_mapping_to_legacy_table(pdev, mapping, bar) != 0)
+ goto err_table;
+
+ devres_add(&pdev->dev, res);
+ return mapping;
+
+err_table:
+ pci_iounmap(pdev, mapping);
+err_iomap:
+ pcim_addr_devres_free(res);
+ return NULL;
}
EXPORT_SYMBOL(pcim_iomap);
@@ -322,102 +696,314 @@ EXPORT_SYMBOL(pcim_iomap);
* @pdev: PCI device to iounmap for
* @addr: Address to unmap
*
- * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ * Managed pci_iounmap(). @addr must have been mapped using a pcim_* mapping
+ * function.
*/
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
{
- void __iomem **tbl;
- int i;
+ struct pcim_addr_devres res_searched;
- pci_iounmap(pdev, addr);
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
+ res_searched.baseaddr = addr;
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- BUG_ON(!tbl);
+ if (devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched) != 0) {
+ /* Doesn't exist. User passed nonsense. */
+ return;
+ }
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (tbl[i] == addr) {
- tbl[i] = NULL;
- return;
- }
- WARN_ON(1);
+ pcim_remove_mapping_from_legacy_table(pdev, addr);
}
EXPORT_SYMBOL(pcim_iounmap);
/**
+ * pcim_iomap_region - Request and iomap a PCI BAR
+ * @pdev: PCI device to map IO resources for
+ * @bar: Index of a BAR to map
+ * @name: Name associated with the request
+ *
+ * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
+ *
+ * Mapping and region will get automatically released on driver detach. If
+ * desired, release manually only with pcim_iounmap_region().
+ */
+static void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ const char *name)
+{
+ int ret;
+ struct pcim_addr_devres *res;
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
+ res->bar = bar;
+
+ ret = __pcim_request_region(pdev, bar, name, 0);
+ if (ret != 0)
+ goto err_region;
+
+ res->baseaddr = pci_iomap(pdev, bar, 0);
+ if (!res->baseaddr) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
+ devres_add(&pdev->dev, res);
+ return res->baseaddr;
+
+err_iomap:
+ __pcim_release_region(pdev, bar);
+err_region:
+ pcim_addr_devres_free(res);
+
+ return IOMEM_ERR_PTR(ret);
+}
+
+/**
+ * pcim_iounmap_region - Unmap and release a PCI BAR
+ * @pdev: PCI device to operate on
+ * @bar: Index of BAR to unmap and release
+ *
+ * Unmap a BAR and release its region manually. Only pass BARs that were
+ * previously mapped by pcim_iomap_region().
+ */
+static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
+{
+ struct pcim_addr_devres res_searched;
+
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
+ res_searched.bar = bar;
+
+ devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched);
+}
+
+/**
* pcim_iomap_regions - Request and iomap PCI BARs
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
- * @name: Name used when requesting regions
+ * @name: Name associated with the requests
+ *
+ * Returns: 0 on success, negative error code on failure.
*
* Request and iomap regions specified by @mask.
*/
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
{
- void __iomem * const *iomap;
- int i, rc;
+ int ret;
+ int bar;
+ void __iomem *mapping;
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return -ENOMEM;
+ for (bar = 0; bar < DEVICE_COUNT_RESOURCE; bar++) {
+ if (!mask_contains_bar(mask, bar))
+ continue;
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- unsigned long len;
+ mapping = pcim_iomap_region(pdev, bar, name);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ goto err;
+ }
+ ret = pcim_add_mapping_to_legacy_table(pdev, mapping, bar);
+ if (ret != 0)
+ goto err;
+ }
- if (!(mask & (1 << i)))
- continue;
+ return 0;
- rc = -EINVAL;
- len = pci_resource_len(pdev, i);
- if (!len)
- goto err_inval;
+err:
+ while (--bar >= 0) {
+ pcim_iounmap_region(pdev, bar);
+ pcim_remove_bar_from_legacy_table(pdev, bar);
+ }
- rc = pci_request_region(pdev, i, name);
- if (rc)
- goto err_inval;
+ return ret;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
- rc = -ENOMEM;
- if (!pcim_iomap(pdev, i, 0))
- goto err_region;
+static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
+ int request_flags)
+{
+ int ret;
+ struct pcim_addr_devres *res;
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return -ENOMEM;
+ res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
+ res->bar = bar;
+
+ ret = __pcim_request_region(pdev, bar, name, request_flags);
+ if (ret != 0) {
+ pcim_addr_devres_free(res);
+ return ret;
}
+ devres_add(&pdev->dev, res);
return 0;
+}
- err_region:
- pci_release_region(pdev, i);
- err_inval:
- while (--i >= 0) {
- if (!(mask & (1 << i)))
- continue;
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
+/**
+ * pcim_request_region - Request a PCI BAR
+ * @pdev: PCI device to requestion region for
+ * @bar: Index of BAR to request
+ * @name: Name associated with the request
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request region specified by @bar.
+ *
+ * The region will automatically be released on driver detach. If desired,
+ * release manually only with pcim_release_region().
+ */
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
+{
+ return _pcim_request_region(pdev, bar, name, 0);
+}
+
+/**
+ * pcim_request_region_exclusive - Request a PCI BAR exclusively
+ * @pdev: PCI device to requestion region for
+ * @bar: Index of BAR to request
+ * @name: Name associated with the request
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request region specified by @bar exclusively.
+ *
+ * The region will automatically be released on driver detach. If desired,
+ * release manually only with pcim_release_region().
+ */
+int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name)
+{
+ return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE);
+}
+
+/**
+ * pcim_release_region - Release a PCI BAR
+ * @pdev: PCI device to operate on
+ * @bar: Index of BAR to release
+ *
+ * Release a region manually that was previously requested by
+ * pcim_request_region().
+ */
+void pcim_release_region(struct pci_dev *pdev, int bar)
+{
+ struct pcim_addr_devres res_searched;
+
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION;
+ res_searched.bar = bar;
+
+ devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched);
+}
+
+
+/**
+ * pcim_release_all_regions - Release all regions of a PCI-device
+ * @pdev: the PCI device
+ *
+ * Release all regions previously requested through pcim_request_region()
+ * or pcim_request_all_regions().
+ *
+ * Can be called from any context, i.e., not necessarily as a counterpart to
+ * pcim_request_all_regions().
+ */
+static void pcim_release_all_regions(struct pci_dev *pdev)
+{
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ pcim_release_region(pdev, bar);
+}
+
+/**
+ * pcim_request_all_regions - Request all regions
+ * @pdev: PCI device to map IO resources for
+ * @name: name associated with the request
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * Requested regions will automatically be released at driver detach. If
+ * desired, release individual regions with pcim_release_region() or all of
+ * them at once with pcim_release_all_regions().
+ */
+static int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
+{
+ int ret;
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ ret = pcim_request_region(pdev, bar, name);
+ if (ret != 0)
+ goto err;
}
- return rc;
+ return 0;
+
+err:
+ pcim_release_all_regions(pdev);
+
+ return ret;
}
-EXPORT_SYMBOL(pcim_iomap_regions);
/**
* pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * (DEPRECATED)
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to iomap
- * @name: Name used when requesting regions
+ * @name: Name associated with the requests
+ *
+ * Returns: 0 on success, negative error code on failure.
*
* Request all PCI BARs and iomap regions specified by @mask.
+ *
+ * To release these resources manually, call pcim_release_region() for the
+ * regions and pcim_iounmap() for the mappings.
+ *
+ * This function is DEPRECATED. Don't use it in new code. Instead, use one
+ * of the pcim_* region request functions in combination with a pcim_*
+ * mapping function.
*/
int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
const char *name)
{
- int request_mask = ((1 << 6) - 1) & ~mask;
- int rc;
+ int bar;
+ int ret;
+ void __iomem **legacy_iomap_table;
+
+ ret = pcim_request_all_regions(pdev, name);
+ if (ret != 0)
+ return ret;
- rc = pci_request_selected_regions(pdev, request_mask, name);
- if (rc)
- return rc;
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!mask_contains_bar(mask, bar))
+ continue;
+ if (!pcim_iomap(pdev, bar, 0))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ /*
+ * If bar is larger than 0, then pcim_iomap() above has most likely
+ * failed because of -EINVAL. If it is equal 0, most likely the table
+ * couldn't be created, indicating -ENOMEM.
+ */
+ ret = bar > 0 ? -EINVAL : -ENOMEM;
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
- rc = pcim_iomap_regions(pdev, mask, name);
- if (rc)
- pci_release_selected_regions(pdev, request_mask);
- return rc;
+ while (--bar >= 0)
+ pcim_iounmap(pdev, legacy_iomap_table[bar]);
+
+ pcim_release_all_regions(pdev);
+
+ return ret;
}
EXPORT_SYMBOL(pcim_iomap_regions_request_all);
@@ -430,19 +1016,58 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all);
*/
void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
{
- void __iomem * const *iomap;
int i;
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++) {
- if (!(mask & (1 << i)))
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (!mask_contains_bar(mask, i))
continue;
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
+ pcim_iounmap_region(pdev, i);
+ pcim_remove_bar_from_legacy_table(pdev, i);
}
}
EXPORT_SYMBOL(pcim_iounmap_regions);
+
+/**
+ * pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
+ * @pdev: PCI device to map IO resources for
+ * @bar: Index of the BAR
+ * @offset: Offset from the begin of the BAR
+ * @len: Length in bytes for the mapping
+ *
+ * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
+ *
+ * Creates a new IO-Mapping within the specified @bar, ranging from @offset to
+ * @offset + @len.
+ *
+ * The mapping will automatically get unmapped on driver detach. If desired,
+ * release manually only with pcim_iounmap().
+ */
+void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long len)
+{
+ void __iomem *mapping;
+ struct pcim_addr_devres *res;
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ mapping = pci_iomap_range(pdev, bar, offset, len);
+ if (!mapping) {
+ pcim_addr_devres_free(res);
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
+ res->baseaddr = mapping;
+
+ /*
+ * Ranged mappings don't get added to the legacy-table, since the table
+ * only ever keeps track of whole BARs.
+ */
+
+ devres_add(&pdev->dev, res);
+ return mapping;
+}
+EXPORT_SYMBOL(pcim_iomap_range);
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 2c54d80107cf..7d070b1def11 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -137,6 +137,7 @@ static const struct pci_epf_mhi_ep_info sa8775p_info = {
.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
.msi_count = 32,
.mru = 0x8000,
+ .flags = MHI_EPF_USE_DMA,
};
struct pci_epf_mhi {
@@ -716,7 +717,7 @@ static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
epf_mhi->dma_chan_rx = NULL;
}
-static int pci_epf_mhi_core_init(struct pci_epf *epf)
+static int pci_epf_mhi_epc_init(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
@@ -753,9 +754,35 @@ static int pci_epf_mhi_core_init(struct pci_epf *epf)
if (!epf_mhi->epc_features)
return -ENODATA;
+ if (info->flags & MHI_EPF_USE_DMA) {
+ ret = pci_epf_mhi_dma_init(epf_mhi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DMA: %d\n", ret);
+ return ret;
+ }
+ }
+
return 0;
}
+static void pci_epf_mhi_epc_deinit(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+
+ if (mhi_cntrl->mhi_dev) {
+ mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ }
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+}
+
static int pci_epf_mhi_link_up(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
@@ -765,14 +792,6 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
struct device *dev = &epf->dev;
int ret;
- if (info->flags & MHI_EPF_USE_DMA) {
- ret = pci_epf_mhi_dma_init(epf_mhi);
- if (ret) {
- dev_err(dev, "Failed to initialize DMA: %d\n", ret);
- return ret;
- }
- }
-
mhi_cntrl->mmio = epf_mhi->mmio;
mhi_cntrl->irq = epf_mhi->irq;
mhi_cntrl->mru = info->mru;
@@ -819,7 +838,7 @@ static int pci_epf_mhi_link_down(struct pci_epf *epf)
return 0;
}
-static int pci_epf_mhi_bme(struct pci_epf *epf)
+static int pci_epf_mhi_bus_master_enable(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
@@ -882,8 +901,8 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
/*
* Forcefully power down the MHI EP stack. Only way to bring the MHI EP
- * stack back to working state after successive bind is by getting BME
- * from host.
+ * stack back to working state after successive bind is by getting Bus
+ * Master Enable event from host.
*/
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
@@ -897,10 +916,11 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
}
static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
- .core_init = pci_epf_mhi_core_init,
+ .epc_init = pci_epf_mhi_epc_init,
+ .epc_deinit = pci_epf_mhi_epc_deinit,
.link_up = pci_epf_mhi_link_up,
.link_down = pci_epf_mhi_link_down,
- .bme = pci_epf_mhi_bme,
+ .bus_master_enable = pci_epf_mhi_bus_master_enable,
};
static int pci_epf_mhi_probe(struct pci_epf *epf,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 977fb79c1567..7c2ed6eae53a 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -686,25 +686,6 @@ reset_handler:
msecs_to_jiffies(1));
}
-static void pci_epf_test_unbind(struct pci_epf *epf)
-{
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- struct pci_epc *epc = epf->epc;
- int bar;
-
- cancel_delayed_work(&epf_test->cmd_handler);
- pci_epf_test_clean_dma_chan(epf_test);
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- if (!epf_test->reg[bar])
- continue;
-
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
- &epf->bar[bar]);
- pci_epf_free_space(epf, epf_test->reg[bar], bar,
- PRIMARY_INTERFACE);
- }
-}
-
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
int bar, ret;
@@ -731,23 +712,36 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
return 0;
}
-static int pci_epf_test_core_init(struct pci_epf *epf)
+static void pci_epf_test_clear_bar(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
+ continue;
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[bar]);
+ }
+}
+
+static int pci_epf_test_epc_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epf_header *header = epf->header;
- const struct pci_epc_features *epc_features;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
bool linkup_notifier = false;
- bool msix_capable = false;
- bool msi_capable = true;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
- if (epc_features) {
- msix_capable = epc_features->msix_capable;
- msi_capable = epc_features->msi_capable;
- }
+ epf_test->dma_supported = true;
+
+ ret = pci_epf_test_init_dma_chan(epf_test);
+ if (ret)
+ epf_test->dma_supported = false;
if (epf->vfunc_no <= 1) {
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
@@ -761,7 +755,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
if (ret)
return ret;
- if (msi_capable) {
+ if (epc_features->msi_capable) {
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
epf->msi_interrupts);
if (ret) {
@@ -770,7 +764,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
- if (msix_capable) {
+ if (epc_features->msix_capable) {
ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
epf->msix_interrupts,
epf_test->test_reg_bar,
@@ -788,6 +782,15 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
return 0;
}
+static void pci_epf_test_epc_deinit(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epf_test_clean_dma_chan(epf_test);
+ pci_epf_test_clear_bar(epf);
+}
+
static int pci_epf_test_link_up(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -798,9 +801,20 @@ static int pci_epf_test_link_up(struct pci_epf *epf)
return 0;
}
+static int pci_epf_test_link_down(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+
+ return 0;
+}
+
static const struct pci_epc_event_ops pci_epf_test_event_ops = {
- .core_init = pci_epf_test_core_init,
+ .epc_init = pci_epf_test_epc_init,
+ .epc_deinit = pci_epf_test_epc_deinit,
.link_up = pci_epf_test_link_up,
+ .link_down = pci_epf_test_link_down,
};
static int pci_epf_test_alloc_space(struct pci_epf *epf)
@@ -810,19 +824,15 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
size_t msix_table_size = 0;
size_t test_reg_bar_size;
size_t pba_size = 0;
- bool msix_capable;
void *base;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
enum pci_barno bar;
- const struct pci_epc_features *epc_features;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
size_t test_reg_size;
- epc_features = epf_test->epc_features;
-
test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
- msix_capable = epc_features->msix_capable;
- if (msix_capable) {
+ if (epc_features->msix_capable) {
msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
epf_test->msix_table_offset = test_reg_bar_size;
/* Align to QWORD or 8 Bytes */
@@ -857,6 +867,20 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
return 0;
}
+static void pci_epf_test_free_space(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
+ continue;
+
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
+ }
+}
+
static int pci_epf_test_bind(struct pci_epf *epf)
{
int ret;
@@ -885,13 +909,20 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
return ret;
- epf_test->dma_supported = true;
+ return 0;
+}
- ret = pci_epf_test_init_dma_chan(epf_test);
- if (ret)
- epf_test->dma_supported = false;
+static void pci_epf_test_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
- return 0;
+ cancel_delayed_work(&epf_test->cmd_handler);
+ if (epc->init_complete) {
+ pci_epf_test_clean_dma_chan(epf_test);
+ pci_epf_test_clear_bar(epf);
+ }
+ pci_epf_test_free_space(epf);
}
static const struct pci_epf_device_id pci_epf_test_ids[] = {
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 8e779eecd62d..874cb097b093 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -799,8 +799,9 @@ err_config_interrupt:
*/
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
{
- epf_ntb_db_bar_clear(ntb);
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_config_sspad_bar_clear(ntb);
}
#define EPF_NTB_R(_name) \
@@ -1018,8 +1019,10 @@ static int vpci_scan_bus(void *sysdata)
struct epf_ntb *ndev = sysdata;
vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
- if (vpci_bus)
- pr_err("create pci bus\n");
+ if (!vpci_bus) {
+ pr_err("create pci bus failed\n");
+ return -EINVAL;
+ }
pci_bus_add_devices(vpci_bus);
@@ -1335,13 +1338,19 @@ static int epf_ntb_bind(struct pci_epf *epf)
ret = pci_register_driver(&vntb_pci_driver);
if (ret) {
dev_err(dev, "failure register vntb pci driver\n");
- goto err_bar_alloc;
+ goto err_epc_cleanup;
}
- vpci_scan_bus(ntb);
+ ret = vpci_scan_bus(ntb);
+ if (ret)
+ goto err_unregister;
return 0;
+err_unregister:
+ pci_unregister_driver(&vntb_pci_driver);
+err_epc_cleanup:
+ epf_ntb_epc_cleanup(ntb);
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 3b21e28f9b59..d712c7a866d2 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -23,7 +23,6 @@ struct pci_epf_group {
struct config_group group;
struct config_group primary_epc_group;
struct config_group secondary_epc_group;
- struct config_group *type_group;
struct delayed_work cfs_work;
struct pci_epf *epf;
int index;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 47d27ec7439d..84309dfe0c68 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -14,7 +14,9 @@
#include <linux/pci-epf.h>
#include <linux/pci-ep-cfs.h>
-static struct class *pci_epc_class;
+static const struct class pci_epc_class = {
+ .name = "pci_epc",
+};
static void devm_pci_epc_release(struct device *dev, void *res)
{
@@ -60,7 +62,7 @@ struct pci_epc *pci_epc_get(const char *epc_name)
struct device *dev;
struct class_dev_iter iter;
- class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
+ class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
if (strcmp(epc_name, dev_name(dev)))
continue;
@@ -727,9 +729,9 @@ void pci_epc_linkdown(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_linkdown);
/**
- * pci_epc_init_notify() - Notify the EPF device that EPC device's core
- * initialization is completed.
- * @epc: the EPC device whose core initialization is completed
+ * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
+ * is completed.
+ * @epc: the EPC device whose initialization is completed
*
* Invoke to Notify the EPF device that the EPC device's initialization
* is completed.
@@ -744,8 +746,8 @@ void pci_epc_init_notify(struct pci_epc *epc)
mutex_lock(&epc->list_lock);
list_for_each_entry(epf, &epc->pci_epf, list) {
mutex_lock(&epf->lock);
- if (epf->event_ops && epf->event_ops->core_init)
- epf->event_ops->core_init(epf);
+ if (epf->event_ops && epf->event_ops->epc_init)
+ epf->event_ops->epc_init(epf);
mutex_unlock(&epf->lock);
}
epc->init_complete = true;
@@ -756,7 +758,7 @@ EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
* pci_epc_notify_pending_init() - Notify the pending EPC device initialization
* complete to the EPF device
- * @epc: the EPC device whose core initialization is pending to be notified
+ * @epc: the EPC device whose initialization is pending to be notified
* @epf: the EPF device to be notified
*
* Invoke to notify the pending EPC device initialization complete to the EPF
@@ -767,22 +769,20 @@ void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
{
if (epc->init_complete) {
mutex_lock(&epf->lock);
- if (epf->event_ops && epf->event_ops->core_init)
- epf->event_ops->core_init(epf);
+ if (epf->event_ops && epf->event_ops->epc_init)
+ epf->event_ops->epc_init(epf);
mutex_unlock(&epf->lock);
}
}
EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
/**
- * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
- * the BME event from the Root complex
- * @epc: the EPC device that received the BME event
+ * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
+ * @epc: the EPC device whose deinitialization is completed
*
- * Invoke to Notify the EPF device that the EPC device has received the Bus
- * Master Enable (BME) event from the Root complex
+ * Invoke to notify the EPF device that the EPC deinitialization is completed.
*/
-void pci_epc_bme_notify(struct pci_epc *epc)
+void pci_epc_deinit_notify(struct pci_epc *epc)
{
struct pci_epf *epf;
@@ -792,13 +792,41 @@ void pci_epc_bme_notify(struct pci_epc *epc)
mutex_lock(&epc->list_lock);
list_for_each_entry(epf, &epc->pci_epf, list) {
mutex_lock(&epf->lock);
- if (epf->event_ops && epf->event_ops->bme)
- epf->event_ops->bme(epf);
+ if (epf->event_ops && epf->event_ops->epc_deinit)
+ epf->event_ops->epc_deinit(epf);
mutex_unlock(&epf->lock);
}
+ epc->init_complete = false;
mutex_unlock(&epc->list_lock);
}
-EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
+EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
+
+/**
+ * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
+ * device has received the Bus Master
+ * Enable event from the Root complex
+ * @epc: the EPC device that received the Bus Master Enable event
+ *
+ * Notify the EPF device that the EPC device has generated the Bus Master Enable
+ * event due to host setting the Bus Master Enable bit in the Command register.
+ */
+void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
+{
+ struct pci_epf *epf;
+
+ if (IS_ERR_OR_NULL(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->bus_master_enable)
+ epf->event_ops->bus_master_enable(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
+}
+EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
/**
* pci_epc_destroy() - destroy the EPC device
@@ -867,7 +895,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
INIT_LIST_HEAD(&epc->pci_epf);
device_initialize(&epc->dev);
- epc->dev.class = pci_epc_class;
+ epc->dev.class = &pci_epc_class;
epc->dev.parent = dev;
epc->dev.release = pci_epc_release;
epc->ops = ops;
@@ -927,20 +955,13 @@ EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
static int __init pci_epc_init(void)
{
- pci_epc_class = class_create("pci_epc");
- if (IS_ERR(pci_epc_class)) {
- pr_err("failed to create pci epc class --> %ld\n",
- PTR_ERR(pci_epc_class));
- return PTR_ERR(pci_epc_class);
- }
-
- return 0;
+ return class_register(&pci_epc_class);
}
module_init(pci_epc_init);
static void __exit pci_epc_exit(void)
{
- class_destroy(pci_epc_class);
+ class_unregister(&pci_epc_class);
}
module_exit(pci_epc_exit);
diff --git a/drivers/pci/hotplug/acpiphp_ampere_altra.c b/drivers/pci/hotplug/acpiphp_ampere_altra.c
index 3fddd04851b6..f5c9e741c1d4 100644
--- a/drivers/pci/hotplug/acpiphp_ampere_altra.c
+++ b/drivers/pci/hotplug/acpiphp_ampere_altra.c
@@ -124,4 +124,5 @@ static struct platform_driver altra_led_driver = {
module_platform_driver(altra_led_driver);
MODULE_AUTHOR("D Scott Phillips <scott@os.amperecomputing.com>");
+MODULE_DESCRIPTION("ACPI PCI Hot Plug Extension for Ampere Altra");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e0a614acee05..273dd8c66f4e 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -46,6 +46,9 @@ extern int pciehp_poll_time;
/**
* struct controller - PCIe hotplug controller
* @pcie: pointer to the controller's PCIe port service device
+ * @dsn: cached copy of Device Serial Number of Function 0 in the hotplug slot
+ * (PCIe r6.2 sec 7.9.3); used to determine whether a hotplugged device
+ * was replaced with a different one during system sleep
* @slot_cap: cached copy of the Slot Capabilities register
* @inband_presence_disabled: In-Band Presence Detect Disable supported by
* controller and disabled per spec recommendation (PCIe r5.0, appendix I
@@ -87,6 +90,7 @@ extern int pciehp_poll_time;
*/
struct controller {
struct pcie_device *pcie;
+ u64 dsn;
u32 slot_cap; /* capabilities and quirks */
unsigned int inband_presence_disabled:1;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ddd55ad97a58..ff458e692fed 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -284,6 +284,32 @@ static int pciehp_suspend(struct pcie_device *dev)
return 0;
}
+static bool pciehp_device_replaced(struct controller *ctrl)
+{
+ struct pci_dev *pdev __free(pci_dev_put);
+ u32 reg;
+
+ pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
+ if (!pdev)
+ return true;
+
+ if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
+ reg != (pdev->vendor | (pdev->device << 16)) ||
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
+ reg != (pdev->revision | (pdev->class << 8)))
+ return true;
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
+ reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
+ return true;
+
+ if (pci_get_dsn(pdev) != ctrl->dsn)
+ return true;
+
+ return false;
+}
+
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
@@ -293,9 +319,23 @@ static int pciehp_resume_noirq(struct pcie_device *dev)
ctrl->cmd_busy = true;
/* clear spurious events from rediscovery of inserted card */
- if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE)
+ if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) {
pcie_clear_hotplug_events(ctrl);
+ /*
+ * If hotplugged device was replaced with a different one
+ * during system sleep, mark the old device disconnected
+ * (to prevent its driver from accessing the new device)
+ * and synthesize a Presence Detect Changed event.
+ */
+ if (pciehp_device_replaced(ctrl)) {
+ ctrl_dbg(ctrl, "device replaced during system sleep\n");
+ pci_walk_bus(ctrl->pcie->port->subordinate,
+ pci_dev_set_disconnected, NULL);
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
+ }
+ }
+
return 0;
}
#endif
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b1d0a1b3917d..061f01f60db4 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -1055,6 +1055,11 @@ struct controller *pcie_init(struct pcie_device *dev)
}
}
+ pdev = pci_get_slot(subordinate, PCI_DEVFN(0, 0));
+ if (pdev)
+ ctrl->dsn = pci_get_dsn(pdev);
+ pci_dev_put(pdev);
+
return ctrl;
}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index ad12515a4a12..65e50bee1a8c 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -72,6 +72,10 @@ int pciehp_configure_device(struct controller *ctrl)
pci_bus_add_devices(parent);
down_read_nested(&ctrl->reset_lock, ctrl->depth);
+ dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
+ ctrl->dsn = pci_get_dsn(dev);
+ pci_dev_put(dev);
+
out:
pci_unlock_rescan_remove();
return ret;
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 694349be9d0a..573a41869c15 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -40,7 +40,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
bool disable_device)
{
struct pci_dev *pdev = php_slot->pdev;
- int irq = php_slot->irq;
u16 ctrl;
if (php_slot->irq > 0) {
@@ -59,7 +58,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
php_slot->wq = NULL;
}
- if (disable_device || irq > 0) {
+ if (disable_device) {
if (pdev->msix_enabled)
pci_disable_msix(pdev);
else if (pdev->msi_enabled)
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
index c9725428e387..a715a4803c95 100644
--- a/drivers/pci/iomap.c
+++ b/drivers/pci/iomap.c
@@ -23,6 +23,10 @@
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
+ *
+ * NOTE:
+ * This function is never managed, even if you initialized with
+ * pcim_enable_device().
* */
void __iomem *pci_iomap_range(struct pci_dev *dev,
int bar,
@@ -63,6 +67,10 @@ EXPORT_SYMBOL(pci_iomap_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
+ *
+ * NOTE:
+ * This function is never managed, even if you initialized with
+ * pcim_enable_device().
* */
void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
int bar,
@@ -106,6 +114,10 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
+ *
+ * NOTE:
+ * This function is never managed, even if you initialized with
+ * pcim_enable_device(). If you need automatic cleanup, use pcim_iomap().
* */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
@@ -127,6 +139,10 @@ EXPORT_SYMBOL(pci_iomap);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
+ *
+ * NOTE:
+ * This function is never managed, even if you initialized with
+ * pcim_enable_device().
* */
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
{
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index 03d2dd25790d..569125726b3e 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -148,17 +148,35 @@ static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *d
arg->hwirq = desc->msi_index;
}
+static __always_inline void cond_mask_parent(struct irq_data *data)
+{
+ struct msi_domain_info *info = data->domain->host_data;
+
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ irq_chip_mask_parent(data);
+}
+
+static __always_inline void cond_unmask_parent(struct irq_data *data)
+{
+ struct msi_domain_info *info = data->domain->host_data;
+
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ irq_chip_unmask_parent(data);
+}
+
static void pci_irq_mask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
pci_msi_mask(desc, BIT(data->irq - desc->irq));
+ cond_mask_parent(data);
}
static void pci_irq_unmask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
+ cond_unmask_parent(data);
pci_msi_unmask(desc, BIT(data->irq - desc->irq));
}
@@ -195,10 +213,12 @@ static const struct msi_domain_template pci_msi_template = {
static void pci_irq_mask_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
+ cond_mask_parent(data);
}
static void pci_irq_unmask_msix(struct irq_data *data)
{
+ cond_unmask_parent(data);
pci_msix_unmask(irq_data_get_msi_desc(data));
}
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index b908fe1ae951..dacea3fc5128 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -240,27 +240,61 @@ int of_get_pci_domain_nr(struct device_node *node)
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
- * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
- * is present and valid
+ * of_pci_preserve_config - Return true if the boot configuration needs to
+ * be preserved
+ * @node: Device tree node.
+ *
+ * Look for "linux,pci-probe-only" property for a given PCI controller's
+ * node and return true if found. Also look in the chosen node if the
+ * property is not found in the given controller's node. Having this
+ * property ensures that the kernel doesn't reconfigure the BARs and bridge
+ * windows that are already done by the platform firmware.
+ *
+ * Return: true if the property exists; false otherwise.
*/
-void of_pci_check_probe_only(void)
+bool of_pci_preserve_config(struct device_node *node)
{
- u32 val;
+ u32 val = 0;
int ret;
- ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
+ if (!node) {
+ pr_warn("device node is NULL, trying with of_chosen\n");
+ node = of_chosen;
+ }
+
+retry:
+ ret = of_property_read_u32(node, "linux,pci-probe-only", &val);
if (ret) {
- if (ret == -ENODATA || ret == -EOVERFLOW)
- pr_warn("linux,pci-probe-only without valid value, ignoring\n");
- return;
+ if (ret == -ENODATA || ret == -EOVERFLOW) {
+ pr_warn("Incorrect value for linux,pci-probe-only in %pOF, ignoring\n",
+ node);
+ return false;
+ }
+ if (ret == -EINVAL) {
+ if (node == of_chosen)
+ return false;
+
+ node = of_chosen;
+ goto retry;
+ }
}
if (val)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
+ * is present and valid
+ */
+void of_pci_check_probe_only(void)
+{
+ if (of_pci_preserve_config(of_chosen))
pci_add_flags(PCI_PROBE_ONLY);
else
pci_clear_flags(PCI_PROBE_ONLY);
-
- pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled");
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 004575091596..9cc447da9475 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -119,6 +119,28 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
return (phys_addr_t)mcfg_addr;
}
+bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
+{
+ if (ACPI_HANDLE(&host_bridge->dev)) {
+ union acpi_object *obj;
+
+ /*
+ * Evaluate the "PCI Boot Configuration" _DSM Function. If it
+ * exists and returns 0, we must preserve any PCI resource
+ * assignments made by firmware for this host bridge.
+ */
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev),
+ &pci_acpi_dsm_guid,
+ 1, DSM_PCI_PRESERVE_BOOT_CONFIG,
+ NULL, ACPI_TYPE_INTEGER);
+ if (obj && obj->integer.value == 0)
+ return true;
+ ACPI_FREE(obj);
+ }
+
+ return false;
+}
+
/* _HPX PCI Setting Record (Type 0); same as _HPP */
struct hpx_type0 {
u32 revision; /* Not present in _HPP */
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index fbfd78127123..bed9f0755271 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -38,8 +38,8 @@ pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
* arch/x86/platform/intel-mid/pwr.c.
*/
static const struct x86_cpu_id lpss_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, NULL),
{}
};
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
index 45855a5e9fca..da4db4928907 100644
--- a/drivers/pci/pci-pf-stub.c
+++ b/drivers/pci/pci-pf-stub.c
@@ -39,4 +39,5 @@ static struct pci_driver pf_stub_driver = {
};
module_pci_driver(pf_stub_driver);
+MODULE_DESCRIPTION("SR-IOV PF stub driver with no functionality");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index d1f4c1ce7bd1..9bc478df4e8f 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -92,5 +92,6 @@ static void __exit pci_stub_exit(void)
module_init(pci_stub_init);
module_exit(pci_stub_exit);
+MODULE_DESCRIPTION("VM device assignment stub driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 35fb1f17a589..e3a49f66982d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -946,30 +946,67 @@ void pci_request_acs(void)
}
static const char *disable_acs_redir_param;
+static const char *config_acs_param;
-/**
- * pci_disable_acs_redir - disable ACS redirect capabilities
- * @dev: the PCI device
- *
- * For only devices specified in the disable_acs_redir parameter.
- */
-static void pci_disable_acs_redir(struct pci_dev *dev)
+struct pci_acs {
+ u16 cap;
+ u16 ctrl;
+ u16 fw_ctrl;
+};
+
+static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+ const char *p, u16 mask, u16 flags)
{
+ char *delimit;
int ret = 0;
- const char *p;
- int pos;
- u16 ctrl;
- if (!disable_acs_redir_param)
+ if (!p)
return;
- p = disable_acs_redir_param;
while (*p) {
+ if (!mask) {
+ /* Check for ACS flags */
+ delimit = strstr(p, "@");
+ if (delimit) {
+ int end;
+ u32 shift = 0;
+
+ end = delimit - p - 1;
+
+ while (end > -1) {
+ if (*(p + end) == '0') {
+ mask |= 1 << shift;
+ shift++;
+ end--;
+ } else if (*(p + end) == '1') {
+ mask |= 1 << shift;
+ flags |= 1 << shift;
+ shift++;
+ end--;
+ } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
+ shift++;
+ end--;
+ } else {
+ pci_err(dev, "Invalid ACS flags... Ignoring\n");
+ return;
+ }
+ }
+ p = delimit + 1;
+ } else {
+ pci_err(dev, "ACS Flags missing\n");
+ return;
+ }
+ }
+
+ if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
+ PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
+ pci_err(dev, "Invalid ACS flags specified\n");
+ return;
+ }
+
ret = pci_dev_str_match(dev, p, &p);
if (ret < 0) {
- pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
- disable_acs_redir_param);
-
+ pr_info_once("PCI: Can't parse ACS command line parameter\n");
break;
} else if (ret == 1) {
/* Found a match */
@@ -989,56 +1026,38 @@ static void pci_disable_acs_redir(struct pci_dev *dev)
if (!pci_dev_specific_disable_acs_redir(dev))
return;
- pos = dev->acs_cap;
- if (!pos) {
- pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
- return;
- }
-
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+ pci_dbg(dev, "ACS mask = %#06x\n", mask);
+ pci_dbg(dev, "ACS flags = %#06x\n", flags);
- /* P2P Request & Completion Redirect */
- ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
+ /* If mask is 0 then we copy the bit from the firmware setting. */
+ caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask);
+ caps->ctrl |= flags;
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
-
- pci_info(dev, "disabled ACS redirect\n");
+ pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
}
/**
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
* @dev: the PCI device
+ * @caps: default ACS controls
*/
-static void pci_std_enable_acs(struct pci_dev *dev)
+static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
{
- int pos;
- u16 cap;
- u16 ctrl;
-
- pos = dev->acs_cap;
- if (!pos)
- return;
-
- pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
-
/* Source Validation */
- ctrl |= (cap & PCI_ACS_SV);
+ caps->ctrl |= (caps->cap & PCI_ACS_SV);
/* P2P Request Redirect */
- ctrl |= (cap & PCI_ACS_RR);
+ caps->ctrl |= (caps->cap & PCI_ACS_RR);
/* P2P Completion Redirect */
- ctrl |= (cap & PCI_ACS_CR);
+ caps->ctrl |= (caps->cap & PCI_ACS_CR);
/* Upstream Forwarding */
- ctrl |= (cap & PCI_ACS_UF);
+ caps->ctrl |= (caps->cap & PCI_ACS_UF);
/* Enable Translation Blocking for external devices and noats */
if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
- ctrl |= (cap & PCI_ACS_TB);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+ caps->ctrl |= (caps->cap & PCI_ACS_TB);
}
/**
@@ -1047,23 +1066,33 @@ static void pci_std_enable_acs(struct pci_dev *dev)
*/
static void pci_enable_acs(struct pci_dev *dev)
{
- if (!pci_acs_enable)
- goto disable_acs_redir;
+ struct pci_acs caps;
+ int pos;
- if (!pci_dev_specific_enable_acs(dev))
- goto disable_acs_redir;
+ pos = dev->acs_cap;
+ if (!pos)
+ return;
- pci_std_enable_acs(dev);
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
+ caps.fw_ctrl = caps.ctrl;
+
+ /* If an iommu is present we start with kernel default caps */
+ if (pci_acs_enable) {
+ if (pci_dev_specific_enable_acs(dev))
+ pci_std_enable_acs(dev, &caps);
+ }
-disable_acs_redir:
/*
- * Note: pci_disable_acs_redir() must be called even if ACS was not
- * enabled by the kernel because it may have been enabled by
- * platform firmware. So if we are told to disable it, we should
- * always disable it after setting the kernel's default
- * preferences.
+ * Always apply caps from the command line, even if there is no iommu.
+ * Trust that the admin has a reason to change the ACS settings.
*/
- pci_disable_acs_redir(dev);
+ __pci_config_acs(dev, &caps, disable_acs_redir_param,
+ PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
+ ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
+ __pci_config_acs(dev, &caps, config_acs_param, 0, 0);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
}
/**
@@ -2218,12 +2247,6 @@ void pci_disable_enabled_device(struct pci_dev *dev)
*/
void pci_disable_device(struct pci_dev *dev)
{
- struct pci_devres *dr;
-
- dr = find_pci_dr(dev);
- if (dr)
- dr->enabled = 0;
-
dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
"disabling already-disabled device");
@@ -3872,7 +3895,15 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
*/
void pci_release_region(struct pci_dev *pdev, int bar)
{
- struct pci_devres *dr;
+ /*
+ * This is done for backwards compatibility, because the old PCI devres
+ * API had a mode in which the function became managed if it had been
+ * enabled with pcim_enable_device() instead of pci_enable_device().
+ */
+ if (pci_is_managed(pdev)) {
+ pcim_release_region(pdev, bar);
+ return;
+ }
if (pci_resource_len(pdev, bar) == 0)
return;
@@ -3882,10 +3913,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
release_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
-
- dr = find_pci_dr(pdev);
- if (dr)
- dr->region_mask &= ~(1 << bar);
}
EXPORT_SYMBOL(pci_release_region);
@@ -3896,6 +3923,8 @@ EXPORT_SYMBOL(pci_release_region);
* @res_name: Name to be associated with resource.
* @exclusive: whether the region access is exclusive or not
*
+ * Returns: 0 on success, negative error code on failure.
+ *
* Mark the PCI region associated with PCI device @pdev BAR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
@@ -3911,7 +3940,12 @@ EXPORT_SYMBOL(pci_release_region);
static int __pci_request_region(struct pci_dev *pdev, int bar,
const char *res_name, int exclusive)
{
- struct pci_devres *dr;
+ if (pci_is_managed(pdev)) {
+ if (exclusive == IORESOURCE_EXCLUSIVE)
+ return pcim_request_region_exclusive(pdev, bar, res_name);
+
+ return pcim_request_region(pdev, bar, res_name);
+ }
if (pci_resource_len(pdev, bar) == 0)
return 0;
@@ -3927,10 +3961,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
goto err_out;
}
- dr = find_pci_dr(pdev);
- if (dr)
- dr->region_mask |= 1 << bar;
-
return 0;
err_out:
@@ -3945,6 +3975,8 @@ err_out:
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource
*
+ * Returns: 0 on success, negative error code on failure.
+ *
* Mark the PCI region associated with PCI device @pdev BAR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
@@ -3952,6 +3984,11 @@ err_out:
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
{
@@ -4002,6 +4039,13 @@ err_out:
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
* @res_name: Name to be associated with resource
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name)
@@ -4010,6 +4054,19 @@ int pci_request_selected_regions(struct pci_dev *pdev, int bars,
}
EXPORT_SYMBOL(pci_request_selected_regions);
+/**
+ * pci_request_selected_regions_exclusive - Request regions exclusively
+ * @pdev: PCI device to request regions from
+ * @bars: bit mask of BARs to request
+ * @res_name: name to be associated with the requests
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
+ */
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
const char *res_name)
{
@@ -4027,7 +4084,6 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
* successful call to pci_request_regions(). Call this function only
* after all use of the PCI regions has ceased.
*/
-
void pci_release_regions(struct pci_dev *pdev)
{
pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
@@ -4046,6 +4102,11 @@ EXPORT_SYMBOL(pci_release_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
@@ -4059,6 +4120,8 @@ EXPORT_SYMBOL(pci_request_regions);
* @pdev: PCI device whose resources are to be reserved
* @res_name: Name to be associated with resource.
*
+ * Returns: 0 on success, negative error code on failure.
+ *
* Mark all PCI regions associated with PCI device @pdev as being reserved
* by owner @res_name. Do not access any address inside the PCI regions
* unless this call returns successfully.
@@ -4068,6 +4131,11 @@ EXPORT_SYMBOL(pci_request_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning message is also
* printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
@@ -4399,11 +4467,22 @@ void pci_disable_parity(struct pci_dev *dev)
* @enable: boolean: whether to enable or disable PCI INTx
*
* Enables/disables PCI INTx for device @pdev
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
*/
void pci_intx(struct pci_dev *pdev, int enable)
{
u16 pci_command, new;
+ /* Preserve the "hybrid" behavior for backwards compatibility */
+ if (pci_is_managed(pdev)) {
+ WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
+ return;
+ }
+
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
if (enable)
@@ -4411,17 +4490,8 @@ void pci_intx(struct pci_dev *pdev, int enable)
else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (new != pci_command) {
- struct pci_devres *dr;
-
+ if (new != pci_command)
pci_write_config_word(pdev, PCI_COMMAND, new);
-
- dr = find_pci_dr(pdev);
- if (dr && !dr->restore_intx) {
- dr->restore_intx = 1;
- dr->orig_intx = !enable;
- }
- }
}
EXPORT_SYMBOL_GPL(pci_intx);
@@ -4753,7 +4823,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
*/
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
{
- struct pci_dev *child;
+ struct pci_dev *child __free(pci_dev_put) = NULL;
int delay;
if (pci_dev_is_disconnected(dev))
@@ -4782,8 +4852,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
return 0;
}
- child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
- bus_list);
+ child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
+ struct pci_dev, bus_list));
up_read(&pci_bus_sem);
/*
@@ -4883,6 +4953,9 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
*/
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
+ if (!dev->block_cfg_access)
+ pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
+ __builtin_return_address(0));
pcibios_reset_secondary_bus(dev);
return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
@@ -5441,10 +5514,12 @@ static void pci_bus_lock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ pci_dev_lock(bus->self);
list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5456,8 +5531,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
}
/* Return 1 on successful lock, 0 on contention */
@@ -5465,15 +5542,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ if (!pci_dev_trylock(bus->self))
+ return 0;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
- if (!pci_bus_trylock(dev->subordinate)) {
- pci_dev_unlock(dev);
+ if (!pci_bus_trylock(dev->subordinate))
goto unlock;
- }
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5481,8 +5558,10 @@ unlock:
list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
return 0;
}
@@ -5514,9 +5593,10 @@ static void pci_slot_lock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5542,14 +5622,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
if (!pci_bus_trylock(dev->subordinate)) {
pci_dev_unlock(dev);
goto unlock;
}
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5560,7 +5639,8 @@ unlock:
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
return 0;
}
@@ -6019,24 +6099,7 @@ int pcie_link_speed_mbps(struct pci_dev *pdev)
if (err)
return err;
- switch (to_pcie_link_speed(lnksta)) {
- case PCIE_SPEED_2_5GT:
- return 2500;
- case PCIE_SPEED_5_0GT:
- return 5000;
- case PCIE_SPEED_8_0GT:
- return 8000;
- case PCIE_SPEED_16_0GT:
- return 16000;
- case PCIE_SPEED_32_0GT:
- return 32000;
- case PCIE_SPEED_64_0GT:
- return 64000;
- default:
- break;
- }
-
- return -EINVAL;
+ return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
}
EXPORT_SYMBOL(pcie_link_speed_mbps);
@@ -6839,6 +6902,8 @@ static int __init pci_setup(char *str)
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
disable_acs_redir_param = str + 18;
+ } else if (!strncmp(str, "config_acs=", 11)) {
+ config_acs_param = str + 11;
} else {
pr_err("PCI: Unknown option `%s'\n", str);
}
@@ -6863,6 +6928,7 @@ static int __init pci_realloc_setup_params(void)
resource_alignment_param = kstrdup(resource_alignment_param,
GFP_KERNEL);
disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+ config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fd44565c4756..79c8398f3938 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -17,11 +17,54 @@
#define PCIE_T_PVPERL_MS 100
/*
+ * End of conventional reset (PERST# de-asserted) to first configuration
+ * request (device able to respond with a "Request Retry Status" completion),
+ * from PCIe r6.0, sec 6.6.1.
+ */
+#define PCIE_T_RRS_READY_MS 100
+
+/*
* PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization>
* Recommends 1ms to 10ms timeout to check L2 ready.
*/
#define PCIE_PME_TO_L2_TIMEOUT_US 10000
+/*
+ * PCIe r6.0, sec 6.6.1 <Conventional Reset>
+ *
+ * - "With a Downstream Port that does not support Link speeds greater
+ * than 5.0 GT/s, software must wait a minimum of 100 ms following exit
+ * from a Conventional Reset before sending a Configuration Request to
+ * the device immediately below that Port."
+ *
+ * - "With a Downstream Port that supports Link speeds greater than
+ * 5.0 GT/s, software must wait a minimum of 100 ms after Link training
+ * completes before sending a Configuration Request to the device
+ * immediately below that Port."
+ */
+#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100
+
+/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
+#define PCIE_MSG_TYPE_R_RC 0
+#define PCIE_MSG_TYPE_R_ADDR 1
+#define PCIE_MSG_TYPE_R_ID 2
+#define PCIE_MSG_TYPE_R_BC 3
+#define PCIE_MSG_TYPE_R_LOCAL 4
+#define PCIE_MSG_TYPE_R_GATHER 5
+
+/* Power Management Messages; PCIe r6.0, sec 2.2.8.2 */
+#define PCIE_MSG_CODE_PME_TURN_OFF 0x19
+
+/* INTx Mechanism Messages; PCIe r6.0, sec 2.2.8.1 */
+#define PCIE_MSG_CODE_ASSERT_INTA 0x20
+#define PCIE_MSG_CODE_ASSERT_INTB 0x21
+#define PCIE_MSG_CODE_ASSERT_INTC 0x22
+#define PCIE_MSG_CODE_ASSERT_INTD 0x23
+#define PCIE_MSG_CODE_DEASSERT_INTA 0x24
+#define PCIE_MSG_CODE_DEASSERT_INTB 0x25
+#define PCIE_MSG_CODE_DEASSERT_INTC 0x26
+#define PCIE_MSG_CODE_DEASSERT_INTD 0x27
+
extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
@@ -290,6 +333,28 @@ void pci_bus_put(struct pci_bus *bus);
(speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \
0)
+static inline int pcie_dev_speed_mbps(enum pci_bus_speed speed)
+{
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ return 2500;
+ case PCIE_SPEED_5_0GT:
+ return 5000;
+ case PCIE_SPEED_8_0GT:
+ return 8000;
+ case PCIE_SPEED_16_0GT:
+ return 16000;
+ case PCIE_SPEED_32_0GT:
+ return 32000;
+ case PCIE_SPEED_64_0GT:
+ return 64000;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
const char *pci_speed_string(enum pci_bus_speed speed);
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
@@ -648,6 +713,7 @@ int of_pci_get_max_link_speed(struct device_node *node);
u32 of_pci_get_slot_power_limit(struct device_node *node,
u8 *slot_power_limit_value,
u8 *slot_power_limit_scale);
+bool of_pci_preserve_config(struct device_node *node);
int pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
@@ -686,6 +752,11 @@ of_pci_get_slot_power_limit(struct device_node *node,
return 0;
}
+static inline bool of_pci_preserve_config(struct device_node *node)
+{
+ return false;
+}
+
static inline int pci_set_of_node(struct pci_dev *dev) { return 0; }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
@@ -732,6 +803,7 @@ static inline void pci_restore_aer_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_ACPI
+bool pci_acpi_preserve_config(struct pci_host_bridge *bridge);
int pci_acpi_program_hp_params(struct pci_dev *dev);
extern const struct attribute_group pci_dev_acpi_attr_group;
void pci_set_acpi_fwnode(struct pci_dev *dev);
@@ -745,6 +817,10 @@ int acpi_pci_wakeup(struct pci_dev *dev, bool enable);
bool acpi_pci_need_resume(struct pci_dev *dev);
pci_power_t acpi_pci_choose_state(struct pci_dev *pdev);
#else
+static inline bool pci_acpi_preserve_config(struct pci_host_bridge *bridge)
+{
+ return false;
+}
static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
{
return -ENOTTY;
@@ -810,26 +886,12 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
-/*
- * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
- * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
- * there's no need to track it separately. pci_devres is initialized
- * when a device is enabled using managed PCI device enable interface.
- *
- * TODO: Struct pci_devres and find_pci_dr() only need to be here because
- * they're used in pci.c. Port or move these functions to devres.c and
- * then remove them from here.
- */
-struct pci_devres {
- unsigned int enabled:1;
- unsigned int pinned:1;
- unsigned int orig_intx:1;
- unsigned int restore_intx:1;
- unsigned int mwi:1;
- u32 region_mask;
-};
+int pcim_intx(struct pci_dev *dev, int enable);
-struct pci_devres *find_pci_dr(struct pci_dev *pdev);
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
+int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
+ const char *name);
+void pcim_release_region(struct pci_dev *pdev, int bar);
/*
* Config Address for PCI Configuration Mechanism #1
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index ac6293c24976..13b8586924ea 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1497,6 +1497,22 @@ static int aer_probe(struct pcie_device *dev)
return 0;
}
+static int aer_suspend(struct pcie_device *dev)
+{
+ struct aer_rpc *rpc = get_service_data(dev);
+
+ aer_disable_rootport(rpc);
+ return 0;
+}
+
+static int aer_resume(struct pcie_device *dev)
+{
+ struct aer_rpc *rpc = get_service_data(dev);
+
+ aer_enable_rootport(rpc);
+ return 0;
+}
+
/**
* aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP
* @dev: pointer to Root Port, RCEC, or RCiEP
@@ -1561,6 +1577,8 @@ static struct pcie_port_service_driver aerdriver = {
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
+ .suspend = aer_suspend,
+ .resume = aer_resume,
.remove = aer_remove,
};
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index a668820696dc..2b6ef7efa3c1 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -412,13 +412,44 @@ void pci_dpc_init(struct pci_dev *pdev)
}
}
+static void dpc_enable(struct pcie_device *dev)
+{
+ struct pci_dev *pdev = dev->port;
+ int dpc = pdev->dpc_cap;
+ u16 ctl;
+
+ /*
+ * Clear DPC Interrupt Status so we don't get an interrupt for an
+ * old event when setting DPC Interrupt Enable.
+ */
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_INTERRUPT);
+
+ pci_read_config_word(pdev, dpc + PCI_EXP_DPC_CTL, &ctl);
+ ctl &= ~PCI_EXP_DPC_CTL_EN_MASK;
+ ctl |= PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_CTL, ctl);
+}
+
+static void dpc_disable(struct pcie_device *dev)
+{
+ struct pci_dev *pdev = dev->port;
+ int dpc = pdev->dpc_cap;
+ u16 ctl;
+
+ /* Disable DPC triggering and DPC interrupts */
+ pci_read_config_word(pdev, dpc + PCI_EXP_DPC_CTL, &ctl);
+ ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_CTL, ctl);
+}
+
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
static int dpc_probe(struct pcie_device *dev)
{
struct pci_dev *pdev = dev->port;
struct device *device = &dev->device;
int status;
- u16 ctl, cap;
+ u16 cap;
if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
@@ -433,11 +464,7 @@ static int dpc_probe(struct pcie_device *dev)
}
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
-
- pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
- ctl &= ~PCI_EXP_DPC_CTL_EN_MASK;
- ctl |= PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
- pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
+ dpc_enable(dev);
pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
@@ -450,14 +477,21 @@ static int dpc_probe(struct pcie_device *dev)
return status;
}
-static void dpc_remove(struct pcie_device *dev)
+static int dpc_suspend(struct pcie_device *dev)
{
- struct pci_dev *pdev = dev->port;
- u16 ctl;
+ dpc_disable(dev);
+ return 0;
+}
- pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
- ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
- pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
+static int dpc_resume(struct pcie_device *dev)
+{
+ dpc_enable(dev);
+ return 0;
+}
+
+static void dpc_remove(struct pcie_device *dev)
+{
+ dpc_disable(dev);
}
static struct pcie_port_service_driver dpcdriver = {
@@ -465,6 +499,8 @@ static struct pcie_port_service_driver dpcdriver = {
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
+ .suspend = dpc_suspend,
+ .resume = dpc_resume,
.remove = dpc_remove,
};
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index bb65dfe43409..6af5e0425872 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -786,7 +786,7 @@ static const struct pci_error_handlers pcie_portdrv_err_handler = {
static struct pci_driver pcie_portdriver = {
.name = "pcieport",
- .id_table = &port_pci_ids[0],
+ .id_table = port_pci_ids,
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4c367f13acdc..b14b9876c030 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -889,6 +889,17 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
dev_set_msi_domain(&bus->dev, d);
}
+static bool pci_preserve_config(struct pci_host_bridge *host_bridge)
+{
+ if (pci_acpi_preserve_config(host_bridge))
+ return true;
+
+ if (host_bridge->dev.parent && host_bridge->dev.parent->of_node)
+ return of_pci_preserve_config(host_bridge->dev.parent->of_node);
+
+ return false;
+}
+
static int pci_register_host_bridge(struct pci_host_bridge *bridge)
{
struct device *parent = bridge->dev.parent;
@@ -983,6 +994,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
+ /* Check if the boot configuration by FW needs to be preserved */
+ bridge->preserve_config = pci_preserve_config(bridge);
+
/* Coalesce contiguous windows */
resource_list_for_each_entry_safe(window, n, &resources) {
if (list_is_last(&window->node, &resources))
@@ -3079,20 +3093,18 @@ int pci_host_probe(struct pci_host_bridge *bridge)
bus = bridge->bus;
+ /* If we must preserve the resource configuration, claim now */
+ if (bridge->preserve_config)
+ pci_bus_claim_resources(bus);
+
/*
- * We insert PCI resources into the iomem_resource and
- * ioport_resource trees in either pci_bus_claim_resources()
- * or pci_bus_assign_resources().
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
*/
- if (pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_claim_resources(bus);
- } else {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ pci_assign_unassigned_root_bus_resources(bus);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
- }
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 568410e64ce6..a2ce4e08edf5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5099,6 +5099,10 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1760, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1761, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 909e6a7c3cc3..23082bc0ca37 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -14,6 +14,7 @@
* tighter packing. Prefetchable range support.
*/
+#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,6 +22,8 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/cache.h>
+#include <linux/limits.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include "pci.h"
@@ -829,11 +832,9 @@ static resource_size_t calculate_memsize(resource_size_t size,
size = min_size;
if (old_size == 1)
old_size = 0;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
@@ -959,7 +960,7 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
for (order = 0; order <= max_order; order++) {
resource_size_t align1 = 1;
- align1 <<= (order + 20);
+ align1 <<= order + __ffs(SZ_1M);
if (!align)
min_align = align1;
@@ -972,6 +973,67 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
}
/**
+ * pbus_upstream_space_available - Check no upstream resource limits allocation
+ * @bus: The bus
+ * @mask: Mask the resource flag, then compare it with type
+ * @type: The type of resource from bridge
+ * @size: The size required from the bridge window
+ * @align: Required alignment for the resource
+ *
+ * Checks that @size can fit inside the upstream bridge resources that are
+ * already assigned.
+ *
+ * Return: %true if enough space is available on all assigned upstream
+ * resources.
+ */
+static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mask,
+ unsigned long type, resource_size_t size,
+ resource_size_t align)
+{
+ struct resource_constraint constraint = {
+ .max = RESOURCE_SIZE_MAX,
+ .align = align,
+ };
+ struct pci_bus *downstream = bus;
+ struct resource *r;
+
+ while ((bus = bus->parent)) {
+ if (pci_is_root_bus(bus))
+ break;
+
+ pci_bus_for_each_resource(bus, r) {
+ if (!r || !r->parent || (r->flags & mask) != type)
+ continue;
+
+ if (resource_size(r) >= size) {
+ struct resource gap = {};
+
+ if (find_resource_space(r, &gap, size, &constraint) == 0) {
+ gap.flags = type;
+ pci_dbg(bus->self,
+ "Assigned bridge window %pR to %pR free space at %pR\n",
+ r, &bus->busn_res, &gap);
+ return true;
+ }
+ }
+
+ if (bus->self) {
+ pci_info(bus->self,
+ "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
+ r, &bus->busn_res,
+ (unsigned long long)size,
+ pci_name(downstream->self),
+ &downstream->busn_res);
+ }
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
* pbus_size_mem() - Size the memory window of a given bus
*
* @bus: The bus
@@ -997,7 +1059,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- resource_size_t min_align, align, size, size0, size1;
+ resource_size_t min_align, win_align, align, size, size0, size1;
resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
int order, max_order;
struct resource *b_res = find_bus_resource_of_type(bus,
@@ -1049,7 +1111,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
* resources.
*/
align = pci_resource_alignment(dev, r);
- order = __ffs(align) - 20;
+ order = __ffs(align) - __ffs(SZ_1M);
if (order < 0)
order = 0;
if (order >= ARRAY_SIZE(aligns)) {
@@ -1076,10 +1138,23 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
}
+ win_align = window_alignment(bus, b_res->flags);
min_align = calculate_mem_align(aligns, max_order);
- min_align = max(min_align, window_alignment(bus, b_res->flags));
+ min_align = max(min_align, win_align);
size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
add_align = max(min_align, add_align);
+
+ if (bus->self && size0 &&
+ !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
+ size0, add_align)) {
+ min_align = 1ULL << (max_order + __ffs(SZ_1M));
+ min_align = max(min_align, win_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
+ add_align = win_align;
+ pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
+ b_res, &bus->busn_res);
+ }
+
size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
calculate_memsize(size, min_size, add_size, children_add_size,
resource_size(b_res), add_align);
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 5a4adf6c04cf..c7e1089ffdaf 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -37,7 +37,9 @@ MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful fo
static dev_t switchtec_devt;
static DEFINE_IDA(switchtec_minor_ida);
-struct class *switchtec_class;
+const struct class switchtec_class = {
+ .name = "switchtec",
+};
EXPORT_SYMBOL_GPL(switchtec_class);
enum mrpc_state {
@@ -1363,7 +1365,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
dev = &stdev->dev;
device_initialize(dev);
- dev->class = switchtec_class;
+ dev->class = &switchtec_class;
dev->parent = &pdev->dev;
dev->groups = switchtec_device_groups;
dev->release = stdev_release;
@@ -1851,11 +1853,9 @@ static int __init switchtec_init(void)
if (rc)
return rc;
- switchtec_class = class_create("switchtec");
- if (IS_ERR(switchtec_class)) {
- rc = PTR_ERR(switchtec_class);
+ rc = class_register(&switchtec_class);
+ if (rc)
goto err_create_class;
- }
rc = pci_register_driver(&switchtec_pci_driver);
if (rc)
@@ -1866,7 +1866,7 @@ static int __init switchtec_init(void)
return 0;
err_pci_register:
- class_destroy(switchtec_class);
+ class_unregister(&switchtec_class);
err_create_class:
unregister_chrdev_region(switchtec_devt, max_devices);
@@ -1878,7 +1878,7 @@ module_init(switchtec_init);
static void __exit switchtec_exit(void)
{
pci_unregister_driver(&switchtec_pci_driver);
- class_destroy(switchtec_class);
+ class_unregister(&switchtec_class);
unregister_chrdev_region(switchtec_devt, max_devices);
ida_destroy(&switchtec_minor_ida);
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index a5414441834a..5bda3e6d43d8 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -456,7 +456,6 @@ struct platform_driver bcm63xx_pcmcia_driver = {
.remove_new = bcm63xx_drv_pcmcia_remove,
.driver = {
.name = "bcm63xx_pcmcia",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index a335748bdef5..a947ffb2df55 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -23,6 +23,7 @@
#include "i82092aa.h"
#include "i82365.h"
+MODULE_DESCRIPTION("Driver for Intel I82092AA PCI-PCMCIA bridge");
MODULE_LICENSE("GPL");
/* PCI core routines */
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 891ccea2cccb..86a357837a7b 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1342,5 +1342,6 @@ static void __exit exit_i82365(void)
module_init(init_i82365);
module_exit(exit_i82365);
+MODULE_DESCRIPTION("Driver for Intel 82365 and compatible PC Card controllers");
MODULE_LICENSE("Dual MPL/GPL");
/*====================================================================*/
diff --git a/drivers/pcmcia/max1600.c b/drivers/pcmcia/max1600.c
index 379875a5e7cd..7be9068f6191 100644
--- a/drivers/pcmcia/max1600.c
+++ b/drivers/pcmcia/max1600.c
@@ -119,4 +119,5 @@ int max1600_configure(struct max1600 *m, unsigned int vcc, unsigned int vpp)
}
EXPORT_SYMBOL_GPL(max1600_configure);
+MODULE_DESCRIPTION("MAX1600 PCMCIA power switch library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index 252893216e50..3a1d2baa466f 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -66,5 +66,6 @@ EXPORT_SYMBOL(pccard_static_ops);
MODULE_AUTHOR("David A. Hinds, Dominik Brodowski");
+MODULE_DESCRIPTION("PCMCIA resource management routines");
MODULE_LICENSE("GPL");
MODULE_ALIAS("rsrc_nonstatic");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 1365eaa20ff4..020ea86c24ec 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -638,11 +638,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
start = PCIBIOS_MIN_CARDBUS_IO;
end = ~0U;
} else {
- unsigned long avail = root->end - root->start;
+ unsigned long avail = resource_size(root);
int i;
size = BRIDGE_MEM_MAX;
- if (size > avail/8) {
- size = (avail+1)/8;
+ if (size > (avail - 1) / 8) {
+ size = avail / 8;
/* round size down to next power of 2 */
i = 0;
while ((size /= 2) != 0)
@@ -1452,4 +1452,5 @@ static struct pci_driver yenta_cardbus_driver = {
module_pci_driver(yenta_cardbus_driver);
+MODULE_DESCRIPTION("Driver for CardBus yenta-compatible bridges");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 029efe16f8cc..6ecc656abc44 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -249,7 +249,9 @@ PIN_DECL_2(E26, GPIOD3, RGMII3RXD3, RMII3RXER);
FUNC_GROUP_DECL(RGMII3, H24, J22, H22, H23, G22, F22, G23, G24, F23, F26, F25,
E26);
-FUNC_GROUP_DECL(RMII3, H24, J22, H22, H23, G23, F23, F26, F25, E26);
+GROUP_DECL(RMII3, H24, J22, H22, H23, G23, F23, F26, F25, E26);
+GROUP_DECL(NCSI3, J22, H22, H23, G23, F23, F26, F25, E26);
+FUNC_DECL_2(RMII3, RMII3, NCSI3);
#define F24 28
SIG_EXPR_LIST_DECL_SESG(F24, NCTS3, NCTS3, SIG_DESC_SET(SCU410, 28));
@@ -355,7 +357,9 @@ FUNC_GROUP_DECL(NRTS4, B24);
FUNC_GROUP_DECL(RGMII4, F24, E23, E24, E25, D26, D24, C25, C26, C24, B26, B25,
B24);
-FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
+GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
+GROUP_DECL(NCSI4, E23, E24, E25, C25, C24, B26, B25, B24);
+FUNC_DECL_2(RMII4, RMII4, NCSI4);
#define D22 40
SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
@@ -1977,6 +1981,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(MDIO2),
ASPEED_PINCTRL_GROUP(MDIO3),
ASPEED_PINCTRL_GROUP(MDIO4),
+ ASPEED_PINCTRL_GROUP(NCSI3),
+ ASPEED_PINCTRL_GROUP(NCSI4),
ASPEED_PINCTRL_GROUP(NCTS1),
ASPEED_PINCTRL_GROUP(NCTS2),
ASPEED_PINCTRL_GROUP(NCTS3),
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 27fd54795791..184641e221d4 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -34,6 +34,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <dt-bindings/pinctrl/bcm2835.h>
@@ -752,7 +753,7 @@ static void bcm2835_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
int irq = irq_find_mapping(chip->irq.domain, offset);
seq_printf(s, "function %s in %s; irq %d (%s)",
- fname, value ? "hi" : "lo",
+ fname, str_hi_lo(value),
irq, irq_type_names[pc->irq_type[offset]]);
}
@@ -1428,7 +1429,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
}
dev_info(dev, "GPIO_OUT persistence: %s\n",
- persist_gpio_outputs ? "yes" : "no");
+ str_yes_no(persist_gpio_outputs));
return 0;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm4908.c b/drivers/pinctrl/bcm/pinctrl-bcm4908.c
index cdfa165fc033..f190e0997f1f 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm4908.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm4908.c
@@ -559,5 +559,6 @@ static struct platform_driver bcm4908_pinctrl_driver = {
module_platform_driver(bcm4908_pinctrl_driver);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom BCM4908 pinmux driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_pinctrl_of_match_table);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm63xx.c b/drivers/pinctrl/bcm/pinctrl-bcm63xx.c
index e1285fe2fbc0..59d2ce8462d8 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm63xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm63xx.c
@@ -67,7 +67,6 @@ int bcm63xx_pinctrl_probe(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct bcm63xx_pinctrl *pc;
- struct device_node *node;
int err;
pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
@@ -94,12 +93,11 @@ int bcm63xx_pinctrl_probe(struct platform_device *pdev,
if (IS_ERR(pc->pctl_dev))
return PTR_ERR(pc->pctl_dev);
- for_each_child_of_node(dev->parent->of_node, node) {
+ for_each_child_of_node_scoped(dev->parent->of_node, node) {
if (of_match_node(bcm63xx_gpio_of_match, node)) {
err = bcm63xx_gpio_probe(dev, node, soc, pc);
if (err) {
dev_err(dev, "could not add GPIO chip\n");
- of_node_put(node);
return err;
}
}
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index 9550cc8095c2..c372a2a24be4 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -27,7 +27,7 @@ struct berlin_pinctrl {
struct regmap *regmap;
struct device *dev;
const struct berlin_pinctrl_desc *desc;
- struct berlin_pinctrl_function *functions;
+ struct pinfunction *functions;
unsigned nfunctions;
struct pinctrl_dev *pctrl_dev;
};
@@ -120,12 +120,12 @@ static const char *berlin_pinmux_get_function_name(struct pinctrl_dev *pctrl_dev
static int berlin_pinmux_get_function_groups(struct pinctrl_dev *pctrl_dev,
unsigned function,
const char * const **groups,
- unsigned * const num_groups)
+ unsigned * const ngroups)
{
struct berlin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrl_dev);
*groups = pctrl->functions[function].groups;
- *num_groups = pctrl->functions[function].ngroups;
+ *ngroups = pctrl->functions[function].ngroups;
return 0;
}
@@ -153,7 +153,7 @@ static int berlin_pinmux_set(struct pinctrl_dev *pctrl_dev,
{
struct berlin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrl_dev);
const struct berlin_desc_group *group_desc = pctrl->desc->groups + group;
- struct berlin_pinctrl_function *func = pctrl->functions + function;
+ struct pinfunction *func = pctrl->functions + function;
struct berlin_desc_function *function_desc =
berlin_pinctrl_find_function_by_name(pctrl, group_desc,
func->name);
@@ -180,7 +180,7 @@ static const struct pinmux_ops berlin_pinmux_ops = {
static int berlin_pinctrl_add_function(struct berlin_pinctrl *pctrl,
const char *name)
{
- struct berlin_pinctrl_function *function = pctrl->functions;
+ struct pinfunction *function = pctrl->functions;
while (function->name) {
if (!strcmp(function->name, name)) {
@@ -214,8 +214,7 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
}
/* we will reallocate later */
- pctrl->functions = kcalloc(max_functions,
- sizeof(*pctrl->functions), GFP_KERNEL);
+ pctrl->functions = kcalloc(max_functions, sizeof(*pctrl->functions), GFP_KERNEL);
if (!pctrl->functions)
return -ENOMEM;
@@ -242,8 +241,7 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
desc_function = desc_group->functions;
while (desc_function->name) {
- struct berlin_pinctrl_function
- *function = pctrl->functions;
+ struct pinfunction *function = pctrl->functions;
const char **groups;
bool found = false;
@@ -264,16 +262,15 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
function->groups =
devm_kcalloc(&pdev->dev,
function->ngroups,
- sizeof(char *),
+ sizeof(*function->groups),
GFP_KERNEL);
-
if (!function->groups) {
kfree(pctrl->functions);
return -ENOMEM;
}
}
- groups = function->groups;
+ groups = (const char **)function->groups;
while (*groups)
groups++;
diff --git a/drivers/pinctrl/berlin/berlin.h b/drivers/pinctrl/berlin/berlin.h
index d7787754d1ed..231aab61d415 100644
--- a/drivers/pinctrl/berlin/berlin.h
+++ b/drivers/pinctrl/berlin/berlin.h
@@ -28,12 +28,6 @@ struct berlin_pinctrl_desc {
unsigned ngroups;
};
-struct berlin_pinctrl_function {
- const char *name;
- const char **groups;
- unsigned ngroups;
-};
-
#define BERLIN_PINCTRL_GROUP(_name, _offset, _width, _lsb, ...) \
{ \
.name = _name, \
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index f424a57f0013..314ab93d7691 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1670,13 +1670,23 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
seq_printf(s, "pin %d (%s) ", pin, desc->name);
#ifdef CONFIG_GPIOLIB
+ gdev = NULL;
gpio_num = -1;
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
- if ((pin >= range->pin_base) &&
- (pin < (range->pin_base + range->npins))) {
- gpio_num = range->base + (pin - range->pin_base);
- break;
+ if (range->pins != NULL) {
+ for (int i = 0; i < range->npins; ++i) {
+ if (range->pins[i] == pin) {
+ gpio_num = range->base + i;
+ break;
+ }
+ }
+ } else if ((pin >= range->pin_base) &&
+ (pin < (range->pin_base + range->npins))) {
+ gpio_num =
+ range->base + (pin - range->pin_base);
}
+ if (gpio_num != -1)
+ break;
}
if (gpio_num >= 0)
/*
@@ -2080,6 +2090,14 @@ out_err:
return ERR_PTR(ret);
}
+static void pinctrl_uninit_controller(struct pinctrl_dev *pctldev, struct pinctrl_desc *pctldesc)
+{
+ pinctrl_free_pindescs(pctldev, pctldesc->pins,
+ pctldesc->npins);
+ mutex_destroy(&pctldev->mutex);
+ kfree(pctldev);
+}
+
static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
{
pctldev->p = create_pinctrl(pctldev->dev, pctldev);
@@ -2160,8 +2178,10 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
return pctldev;
error = pinctrl_enable(pctldev);
- if (error)
+ if (error) {
+ pinctrl_uninit_controller(pctldev, pctldesc);
return ERR_PTR(error);
+ }
return pctldev;
}
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 837fd5bd903d..4e07707d2435 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -206,7 +206,7 @@ struct group_desc {
void *data;
};
-/* Convenience macro to define a generic pin group descriptor */
+/* Convenient macro to define a generic pin group descriptor */
#define PINCTRL_GROUP_DESC(_name, _pins, _num_pins, _data) \
(struct group_desc) { \
.grp = PINCTRL_PINGROUP(_name, _pins, _num_pins), \
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 27bdc548f3a7..3b59d7189004 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -7,6 +7,17 @@ config PINCTRL_IMX
select PINCONF
select REGMAP
+config PINCTRL_IMX_SCMI
+ tristate "i.MX95 pinctrl driver using SCMI protocol interface"
+ depends on ARM_SCMI_PROTOCOL && OF || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ help
+ i.MX95 SCMI firmware provides pinctrl protocol. This driver
+ utilizes the SCMI interface to do pinctrl configuration.
+
config PINCTRL_IMX_SCU
tristate
depends on IMX_SCU
@@ -184,6 +195,13 @@ config PINCTRL_IMXRT1050
help
Say Y here to enable the imxrt1050 pinctrl driver
+config PINCTRL_IMX91
+ tristate "IMX91 pinctrl driver"
+ depends on ARCH_MXC
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx91 pinctrl driver
+
config PINCTRL_IMX93
tristate "IMX93 pinctrl driver"
depends on ARCH_MXC
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 647dff060477..d27085c2b4c4 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -2,6 +2,7 @@
# Freescale pin control drivers
obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
obj-$(CONFIG_PINCTRL_IMX_SCU) += pinctrl-scu.o
+obj-$(CONFIG_PINCTRL_IMX_SCMI) += pinctrl-imx-scmi.o
obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
obj-$(CONFIG_PINCTRL_IMX1) += pinctrl-imx1.o
obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
@@ -25,6 +26,7 @@ obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o
obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o
obj-$(CONFIG_PINCTRL_IMX8DXL) += pinctrl-imx8dxl.o
obj-$(CONFIG_PINCTRL_IMX8ULP) += pinctrl-imx8ulp.o
+obj-$(CONFIG_PINCTRL_IMX91) += pinctrl-imx91.o
obj-$(CONFIG_PINCTRL_IMX93) += pinctrl-imx93.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
new file mode 100644
index 000000000000..2991047535bc
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Power Interface (SCMI) Protocol based i.MX pinctrl driver
+ *
+ * Copyright 2024 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/scmi_protocol.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "../pinctrl-utils.h"
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#define DRV_NAME "scmi-pinctrl-imx"
+
+struct scmi_pinctrl_imx {
+ struct device *dev;
+ struct scmi_protocol_handle *ph;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pctl_desc;
+ const struct scmi_pinctrl_proto_ops *ops;
+};
+
+/* SCMI pin control types, aligned with SCMI firmware */
+#define IMX_SCMI_NUM_CFG 4
+#define IMX_SCMI_PIN_MUX 192
+#define IMX_SCMI_PIN_CONFIG 193
+#define IMX_SCMI_PIN_DAISY_ID 194
+#define IMX_SCMI_PIN_DAISY_CFG 195
+
+#define IMX_SCMI_NO_PAD_CTL BIT(31)
+#define IMX_SCMI_PAD_SION BIT(30)
+#define IMX_SCMI_IOMUXC_CONFIG_SION BIT(4)
+
+#define IMX_SCMI_PIN_SIZE 24
+
+#define IMX95_DAISY_OFF 0x408
+
+static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct pinctrl_map *new_map;
+ const __be32 *list;
+ unsigned long *configs = NULL;
+ unsigned long cfg[IMX_SCMI_NUM_CFG];
+ int map_num, size, pin_size, pin_id, num_pins;
+ int mux_reg, conf_reg, input_reg, mux_val, conf_val, input_val;
+ int i, j;
+ uint32_t ncfg;
+ static uint32_t daisy_off;
+
+ if (!daisy_off) {
+ if (of_machine_is_compatible("fsl,imx95")) {
+ daisy_off = IMX95_DAISY_OFF;
+ } else {
+ dev_err(pctldev->dev, "platform not support scmi pinctrl\n");
+ return -EINVAL;
+ }
+ }
+
+ list = of_get_property(np, "fsl,pins", &size);
+ if (!list) {
+ dev_err(pctldev->dev, "no fsl,pins property in node %pOF\n", np);
+ return -EINVAL;
+ }
+
+ pin_size = IMX_SCMI_PIN_SIZE;
+
+ if (!size || size % pin_size) {
+ dev_err(pctldev->dev, "Invalid fsl,pins or pins property in node %pOF\n", np);
+ return -EINVAL;
+ }
+
+ num_pins = size / pin_size;
+ map_num = num_pins;
+
+ new_map = kmalloc_array(map_num, sizeof(struct pinctrl_map),
+ GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+
+ *map = new_map;
+ *num_maps = map_num;
+
+ /* create config map */
+ for (i = 0; i < num_pins; i++) {
+ j = 0;
+ ncfg = IMX_SCMI_NUM_CFG;
+ mux_reg = be32_to_cpu(*list++);
+ conf_reg = be32_to_cpu(*list++);
+ input_reg = be32_to_cpu(*list++);
+ mux_val = be32_to_cpu(*list++);
+ input_val = be32_to_cpu(*list++);
+ conf_val = be32_to_cpu(*list++);
+ if (conf_val & IMX_SCMI_PAD_SION)
+ mux_val |= IMX_SCMI_IOMUXC_CONFIG_SION;
+
+ pin_id = mux_reg / 4;
+
+ cfg[j++] = pinconf_to_config_packed(IMX_SCMI_PIN_MUX, mux_val);
+
+ if (!conf_reg || (conf_val & IMX_SCMI_NO_PAD_CTL))
+ ncfg--;
+ else
+ cfg[j++] = pinconf_to_config_packed(IMX_SCMI_PIN_CONFIG, conf_val);
+
+ if (!input_reg) {
+ ncfg -= 2;
+ } else {
+ cfg[j++] = pinconf_to_config_packed(IMX_SCMI_PIN_DAISY_ID,
+ (input_reg - daisy_off) / 4);
+ cfg[j++] = pinconf_to_config_packed(IMX_SCMI_PIN_DAISY_CFG, input_val);
+ }
+
+ configs = kmemdup(cfg, ncfg * sizeof(unsigned long), GFP_KERNEL);
+
+ new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
+ new_map[i].data.configs.group_or_pin = pin_get_name(pctldev, pin_id);
+ new_map[i].data.configs.configs = configs;
+ new_map[i].data.configs.num_configs = ncfg;
+ }
+
+ return 0;
+}
+
+static void pinctrl_scmi_imx_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned int num_maps)
+{
+ kfree(map);
+}
+
+static const struct pinctrl_ops pinctrl_scmi_imx_pinctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinctrl_scmi_imx_dt_node_to_map,
+ .dt_free_map = pinctrl_scmi_imx_dt_free_map,
+};
+
+static int pinctrl_scmi_imx_func_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ /*
+ * For i.MX SCMI PINCTRL , postpone the mux setting
+ * until config is set as they can be set together
+ * in one IPC call
+ */
+ return 0;
+}
+
+static const struct pinmux_ops pinctrl_scmi_imx_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = pinctrl_scmi_imx_func_set_mux,
+};
+
+static int pinctrl_scmi_imx_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ int ret;
+ struct scmi_pinctrl_imx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ u32 config_type, val;
+
+ if (!config)
+ return -EINVAL;
+
+ config_type = pinconf_to_config_param(*config);
+
+ ret = pmx->ops->settings_get_one(pmx->ph, pin, PIN_TYPE, config_type, &val);
+ /* Convert SCMI error code to PINCTRL expected error code */
+ if (ret == -EOPNOTSUPP)
+ return -ENOTSUPP;
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(config_type, val);
+
+ dev_dbg(pmx->dev, "pin:%s, conf:0x%x", pin_get_name(pctldev, pin), val);
+
+ return 0;
+}
+
+static int pinctrl_scmi_imx_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct scmi_pinctrl_imx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum scmi_pinctrl_conf_type config_type[IMX_SCMI_NUM_CFG];
+ u32 config_value[IMX_SCMI_NUM_CFG];
+ enum scmi_pinctrl_conf_type *p_config_type = config_type;
+ u32 *p_config_value = config_value;
+ int ret;
+ int i;
+
+ if (!configs || !num_configs)
+ return -EINVAL;
+
+ if (num_configs > IMX_SCMI_NUM_CFG) {
+ dev_err(pmx->dev, "num_configs(%d) too large\n", num_configs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_configs; i++) {
+ /* cast to avoid build warning */
+ p_config_type[i] =
+ (enum scmi_pinctrl_conf_type)pinconf_to_config_param(configs[i]);
+ p_config_value[i] = pinconf_to_config_argument(configs[i]);
+
+ dev_dbg(pmx->dev, "pin: %u, type: %u, val: 0x%x\n",
+ pin, p_config_type[i], p_config_value[i]);
+ }
+
+ ret = pmx->ops->settings_conf(pmx->ph, pin, PIN_TYPE, num_configs,
+ p_config_type, p_config_value);
+ if (ret)
+ dev_err(pmx->dev, "Error set config %d\n", ret);
+
+ return ret;
+}
+
+static void pinctrl_scmi_imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int pin_id)
+{
+ unsigned long config = pinconf_to_config_packed(IMX_SCMI_PIN_CONFIG, 0);
+ int ret;
+
+ ret = pinctrl_scmi_imx_pinconf_get(pctldev, pin_id, &config);
+ if (ret)
+ config = 0;
+ else
+ config = pinconf_to_config_argument(config);
+
+ seq_printf(s, "0x%lx", config);
+}
+
+static const struct pinconf_ops pinctrl_scmi_imx_pinconf_ops = {
+ .pin_config_get = pinctrl_scmi_imx_pinconf_get,
+ .pin_config_set = pinctrl_scmi_imx_pinconf_set,
+ .pin_config_dbg_show = pinctrl_scmi_imx_pinconf_dbg_show,
+};
+
+static int
+scmi_pinctrl_imx_get_pins(struct scmi_pinctrl_imx *pmx, struct pinctrl_desc *desc)
+{
+ struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ int ret, i;
+
+ npins = pmx->ops->count_get(pmx->ph, PIN_TYPE);
+ pins = devm_kmalloc_array(pmx->dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < npins; i++) {
+ pins[i].number = i;
+ /* no need free name, firmware driver handles it */
+ ret = pmx->ops->name_get(pmx->ph, i, PIN_TYPE, &pins[i].name);
+ if (ret)
+ return dev_err_probe(pmx->dev, ret,
+ "Can't get name for pin %d", i);
+ }
+
+ desc->npins = npins;
+ desc->pins = pins;
+ dev_dbg(pmx->dev, "got pins %u", npins);
+
+ return 0;
+}
+
+static const char * const scmi_pinctrl_imx_allowlist[] = {
+ "fsl,imx95",
+ NULL
+};
+
+static int scmi_pinctrl_imx_probe(struct scmi_device *sdev)
+{
+ struct device *dev = &sdev->dev;
+ const struct scmi_handle *handle = sdev->handle;
+ struct scmi_pinctrl_imx *pmx;
+ struct scmi_protocol_handle *ph;
+ const struct scmi_pinctrl_proto_ops *pinctrl_ops;
+ int ret;
+
+ if (!handle)
+ return -EINVAL;
+
+ if (!of_machine_compatible_match(scmi_pinctrl_imx_allowlist))
+ return -ENODEV;
+
+ pinctrl_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PINCTRL, &ph);
+ if (IS_ERR(pinctrl_ops))
+ return PTR_ERR(pinctrl_ops);
+
+ pmx = devm_kzalloc(dev, sizeof(*pmx), GFP_KERNEL);
+ if (!pmx)
+ return -ENOMEM;
+
+ pmx->ph = ph;
+ pmx->ops = pinctrl_ops;
+
+ pmx->dev = dev;
+ pmx->pctl_desc.name = DRV_NAME;
+ pmx->pctl_desc.owner = THIS_MODULE;
+ pmx->pctl_desc.pctlops = &pinctrl_scmi_imx_pinctrl_ops;
+ pmx->pctl_desc.pmxops = &pinctrl_scmi_imx_pinmux_ops;
+ pmx->pctl_desc.confops = &pinctrl_scmi_imx_pinconf_ops;
+
+ ret = scmi_pinctrl_imx_get_pins(pmx, &pmx->pctl_desc);
+ if (ret)
+ return ret;
+
+ pmx->dev = &sdev->dev;
+
+ ret = devm_pinctrl_register_and_init(dev, &pmx->pctl_desc, pmx,
+ &pmx->pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pinctrl\n");
+
+ return pinctrl_enable(pmx->pctldev);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_PINCTRL, "pinctrl-imx" },
+ { }
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_pinctrl_imx_driver = {
+ .name = DRV_NAME,
+ .probe = scmi_pinctrl_imx_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_pinctrl_imx_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("i.MX SCMI pin controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 2d3d80921c0d..9c2680df082c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -266,7 +266,7 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
npins = grp->grp.npins;
dev_dbg(ipctl->dev, "enable function %s group %s\n",
- func->name, grp->grp.name);
+ func->func.name, grp->grp.name);
for (i = 0; i < npins; i++) {
/*
@@ -580,7 +580,6 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
u32 index)
{
struct pinctrl_dev *pctl = ipctl->pctl;
- struct device_node *child;
struct function_desc *func;
struct group_desc *grp;
const char **group_names;
@@ -593,29 +592,27 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
return -EINVAL;
/* Initialise function */
- func->name = np->name;
- func->num_group_names = of_get_child_count(np);
- if (func->num_group_names == 0) {
+ func->func.name = np->name;
+ func->func.ngroups = of_get_child_count(np);
+ if (func->func.ngroups == 0) {
dev_info(ipctl->dev, "no groups defined in %pOF\n", np);
return -EINVAL;
}
- group_names = devm_kcalloc(ipctl->dev, func->num_group_names,
- sizeof(char *), GFP_KERNEL);
+ group_names = devm_kcalloc(ipctl->dev, func->func.ngroups,
+ sizeof(*func->func.groups), GFP_KERNEL);
if (!group_names)
return -ENOMEM;
i = 0;
- for_each_child_of_node(np, child)
+ for_each_child_of_node_scoped(np, child)
group_names[i++] = child->name;
- func->group_names = group_names;
+ func->func.groups = group_names;
i = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
grp = devm_kzalloc(ipctl->dev, sizeof(*grp), GFP_KERNEL);
- if (!grp) {
- of_node_put(child);
+ if (!grp)
return -ENOMEM;
- }
mutex_lock(&ipctl->mutex);
radix_tree_insert(&pctl->pin_group_tree,
@@ -635,21 +632,13 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
*/
static bool imx_pinctrl_dt_is_flat_functions(struct device_node *np)
{
- struct device_node *function_np;
- struct device_node *pinctrl_np;
-
- for_each_child_of_node(np, function_np) {
- if (of_property_read_bool(function_np, "fsl,pins")) {
- of_node_put(function_np);
+ for_each_child_of_node_scoped(np, function_np) {
+ if (of_property_read_bool(function_np, "fsl,pins"))
return true;
- }
- for_each_child_of_node(function_np, pinctrl_np) {
- if (of_property_read_bool(pinctrl_np, "fsl,pins")) {
- of_node_put(pinctrl_np);
- of_node_put(function_np);
+ for_each_child_of_node_scoped(function_np, pinctrl_np) {
+ if (of_property_read_bool(pinctrl_np, "fsl,pins"))
return false;
- }
}
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 90c696046b38..af1ccfc90bff 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -508,7 +508,6 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
struct imx1_pinctrl_soc_info *info,
u32 index)
{
- struct device_node *child;
struct imx1_pmx_func *func;
struct imx1_pin_group *grp;
int ret;
@@ -531,14 +530,12 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
if (!func->groups)
return -ENOMEM;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
func->groups[i] = child->name;
grp = &info->groups[grp_index++];
ret = imx1_pinctrl_parse_groups(child, grp, info, i++);
- if (ret == -ENOMEM) {
- of_node_put(child);
+ if (ret == -ENOMEM)
return ret;
- }
}
return 0;
@@ -548,7 +545,6 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
struct imx1_pinctrl *pctl, struct imx1_pinctrl_soc_info *info)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *child;
int ret;
u32 nfuncs = 0;
u32 ngroups = 0;
@@ -557,7 +553,7 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
++nfuncs;
ngroups += of_get_child_count(child);
}
@@ -579,12 +575,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
if (!info->functions || !info->groups)
return -ENOMEM;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = imx1_pinctrl_parse_functions(child, info, ifunc++);
- if (ret == -ENOMEM) {
- of_node_put(child);
+ if (ret == -ENOMEM)
return -ENOMEM;
- }
}
return 0;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx91.c b/drivers/pinctrl/freescale/pinctrl-imx91.c
new file mode 100644
index 000000000000..5421141c586a
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx91.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imx91_pads {
+ IMX91_PAD_DAP_TDI = 0,
+ IMX91_PAD_DAP_TMS_SWDIO = 1,
+ IMX91_PAD_DAP_TCLK_SWCLK = 2,
+ IMX91_PAD_DAP_TDO_TRACESWO = 3,
+ IMX91_PAD_GPIO_IO00 = 4,
+ IMX91_PAD_GPIO_IO01 = 5,
+ IMX91_PAD_GPIO_IO02 = 6,
+ IMX91_PAD_GPIO_IO03 = 7,
+ IMX91_PAD_GPIO_IO04 = 8,
+ IMX91_PAD_GPIO_IO05 = 9,
+ IMX91_PAD_GPIO_IO06 = 10,
+ IMX91_PAD_GPIO_IO07 = 11,
+ IMX91_PAD_GPIO_IO08 = 12,
+ IMX91_PAD_GPIO_IO09 = 13,
+ IMX91_PAD_GPIO_IO10 = 14,
+ IMX91_PAD_GPIO_IO11 = 15,
+ IMX91_PAD_GPIO_IO12 = 16,
+ IMX91_PAD_GPIO_IO13 = 17,
+ IMX91_PAD_GPIO_IO14 = 18,
+ IMX91_PAD_GPIO_IO15 = 19,
+ IMX91_PAD_GPIO_IO16 = 20,
+ IMX91_PAD_GPIO_IO17 = 21,
+ IMX91_PAD_GPIO_IO18 = 22,
+ IMX91_PAD_GPIO_IO19 = 23,
+ IMX91_PAD_GPIO_IO20 = 24,
+ IMX91_PAD_GPIO_IO21 = 25,
+ IMX91_PAD_GPIO_IO22 = 26,
+ IMX91_PAD_GPIO_IO23 = 27,
+ IMX91_PAD_GPIO_IO24 = 28,
+ IMX91_PAD_GPIO_IO25 = 29,
+ IMX91_PAD_GPIO_IO26 = 30,
+ IMX91_PAD_GPIO_IO27 = 31,
+ IMX91_PAD_GPIO_IO28 = 32,
+ IMX91_PAD_GPIO_IO29 = 33,
+ IMX91_PAD_CCM_CLKO1 = 34,
+ IMX91_PAD_CCM_CLKO2 = 35,
+ IMX91_PAD_CCM_CLKO3 = 36,
+ IMX91_PAD_CCM_CLKO4 = 37,
+ IMX91_PAD_ENET1_MDC = 38,
+ IMX91_PAD_ENET1_MDIO = 39,
+ IMX91_PAD_ENET1_TD3 = 40,
+ IMX91_PAD_ENET1_TD2 = 41,
+ IMX91_PAD_ENET1_TD1 = 42,
+ IMX91_PAD_ENET1_TD0 = 43,
+ IMX91_PAD_ENET1_TX_CTL = 44,
+ IMX91_PAD_ENET1_TXC = 45,
+ IMX91_PAD_ENET1_RX_CTL = 46,
+ IMX91_PAD_ENET1_RXC = 47,
+ IMX91_PAD_ENET1_RD0 = 48,
+ IMX91_PAD_ENET1_RD1 = 49,
+ IMX91_PAD_ENET1_RD2 = 50,
+ IMX91_PAD_ENET1_RD3 = 51,
+ IMX91_PAD_ENET2_MDC = 52,
+ IMX91_PAD_ENET2_MDIO = 53,
+ IMX91_PAD_ENET2_TD3 = 54,
+ IMX91_PAD_ENET2_TD2 = 55,
+ IMX91_PAD_ENET2_TD1 = 56,
+ IMX91_PAD_ENET2_TD0 = 57,
+ IMX91_PAD_ENET2_TX_CTL = 58,
+ IMX91_PAD_ENET2_TXC = 59,
+ IMX91_PAD_ENET2_RX_CTL = 60,
+ IMX91_PAD_ENET2_RXC = 61,
+ IMX91_PAD_ENET2_RD0 = 62,
+ IMX91_PAD_ENET2_RD1 = 63,
+ IMX91_PAD_ENET2_RD2 = 64,
+ IMX91_PAD_ENET2_RD3 = 65,
+ IMX91_PAD_SD1_CLK = 66,
+ IMX91_PAD_SD1_CMD = 67,
+ IMX91_PAD_SD1_DATA0 = 68,
+ IMX91_PAD_SD1_DATA1 = 69,
+ IMX91_PAD_SD1_DATA2 = 70,
+ IMX91_PAD_SD1_DATA3 = 71,
+ IMX91_PAD_SD1_DATA4 = 72,
+ IMX91_PAD_SD1_DATA5 = 73,
+ IMX91_PAD_SD1_DATA6 = 74,
+ IMX91_PAD_SD1_DATA7 = 75,
+ IMX91_PAD_SD1_STROBE = 76,
+ IMX91_PAD_SD2_VSELECT = 77,
+ IMX91_PAD_SD3_CLK = 78,
+ IMX91_PAD_SD3_CMD = 79,
+ IMX91_PAD_SD3_DATA0 = 80,
+ IMX91_PAD_SD3_DATA1 = 81,
+ IMX91_PAD_SD3_DATA2 = 82,
+ IMX91_PAD_SD3_DATA3 = 83,
+ IMX91_PAD_SD2_CD_B = 84,
+ IMX91_PAD_SD2_CLK = 85,
+ IMX91_PAD_SD2_CMD = 86,
+ IMX91_PAD_SD2_DATA0 = 87,
+ IMX91_PAD_SD2_DATA1 = 88,
+ IMX91_PAD_SD2_DATA2 = 89,
+ IMX91_PAD_SD2_DATA3 = 90,
+ IMX91_PAD_SD2_RESET_B = 91,
+ IMX91_PAD_I2C1_SCL = 92,
+ IMX91_PAD_I2C1_SDA = 93,
+ IMX91_PAD_I2C2_SCL = 94,
+ IMX91_PAD_I2C2_SDA = 95,
+ IMX91_PAD_UART1_RXD = 96,
+ IMX91_PAD_UART1_TXD = 97,
+ IMX91_PAD_UART2_RXD = 98,
+ IMX91_PAD_UART2_TXD = 99,
+ IMX91_PAD_PDM_CLK = 100,
+ IMX91_PAD_PDM_BIT_STREAM0 = 101,
+ IMX91_PAD_PDM_BIT_STREAM1 = 102,
+ IMX91_PAD_SAI1_TXFS = 103,
+ IMX91_PAD_SAI1_TXC = 104,
+ IMX91_PAD_SAI1_TXD0 = 105,
+ IMX91_PAD_SAI1_RXD0 = 106,
+ IMX91_PAD_WDOG_ANY = 107,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx91_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(IMX91_PAD_DAP_TDI),
+ IMX_PINCTRL_PIN(IMX91_PAD_DAP_TMS_SWDIO),
+ IMX_PINCTRL_PIN(IMX91_PAD_DAP_TCLK_SWCLK),
+ IMX_PINCTRL_PIN(IMX91_PAD_DAP_TDO_TRACESWO),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO00),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO01),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO02),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO03),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO04),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO05),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO06),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO07),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO08),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO09),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO10),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO11),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO12),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO13),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO14),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO15),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO16),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO17),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO18),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO19),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO20),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO21),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO22),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO23),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO24),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO25),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO26),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO27),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO28),
+ IMX_PINCTRL_PIN(IMX91_PAD_GPIO_IO29),
+ IMX_PINCTRL_PIN(IMX91_PAD_CCM_CLKO1),
+ IMX_PINCTRL_PIN(IMX91_PAD_CCM_CLKO2),
+ IMX_PINCTRL_PIN(IMX91_PAD_CCM_CLKO3),
+ IMX_PINCTRL_PIN(IMX91_PAD_CCM_CLKO4),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_MDC),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_MDIO),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_TD3),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_TD2),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_TD1),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_TD0),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_TX_CTL),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_TXC),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_RX_CTL),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_RXC),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_RD0),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_RD1),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_RD2),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET1_RD3),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_MDC),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_MDIO),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_TD3),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_TD2),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_TD1),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_TD0),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_TX_CTL),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_TXC),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_RX_CTL),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_RXC),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_RD0),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_RD1),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_RD2),
+ IMX_PINCTRL_PIN(IMX91_PAD_ENET2_RD3),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_CLK),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_CMD),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_DATA0),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_DATA1),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_DATA2),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_DATA3),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_DATA4),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_DATA5),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_DATA6),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_DATA7),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD1_STROBE),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_VSELECT),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD3_CLK),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD3_CMD),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD3_DATA0),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD3_DATA1),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD3_DATA2),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD3_DATA3),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_CD_B),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_CLK),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_CMD),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_DATA0),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_DATA1),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_DATA2),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_DATA3),
+ IMX_PINCTRL_PIN(IMX91_PAD_SD2_RESET_B),
+ IMX_PINCTRL_PIN(IMX91_PAD_I2C1_SCL),
+ IMX_PINCTRL_PIN(IMX91_PAD_I2C1_SDA),
+ IMX_PINCTRL_PIN(IMX91_PAD_I2C2_SCL),
+ IMX_PINCTRL_PIN(IMX91_PAD_I2C2_SDA),
+ IMX_PINCTRL_PIN(IMX91_PAD_UART1_RXD),
+ IMX_PINCTRL_PIN(IMX91_PAD_UART1_TXD),
+ IMX_PINCTRL_PIN(IMX91_PAD_UART2_RXD),
+ IMX_PINCTRL_PIN(IMX91_PAD_UART2_TXD),
+ IMX_PINCTRL_PIN(IMX91_PAD_PDM_CLK),
+ IMX_PINCTRL_PIN(IMX91_PAD_PDM_BIT_STREAM0),
+ IMX_PINCTRL_PIN(IMX91_PAD_PDM_BIT_STREAM1),
+ IMX_PINCTRL_PIN(IMX91_PAD_SAI1_TXFS),
+ IMX_PINCTRL_PIN(IMX91_PAD_SAI1_TXC),
+ IMX_PINCTRL_PIN(IMX91_PAD_SAI1_TXD0),
+ IMX_PINCTRL_PIN(IMX91_PAD_SAI1_RXD0),
+ IMX_PINCTRL_PIN(IMX91_PAD_WDOG_ANY),
+};
+
+static const struct imx_pinctrl_soc_info imx91_pinctrl_info = {
+ .pins = imx91_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx91_pinctrl_pads),
+ .flags = ZERO_OFFSET_VALID,
+};
+
+static int imx91_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imx91_pinctrl_info);
+}
+
+static const struct of_device_id imx91_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx91-iomuxc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx91_pinctrl_of_match);
+
+static struct platform_driver imx91_pinctrl_driver = {
+ .driver = {
+ .name = "imx91-pinctrl",
+ .of_match_table = imx91_pinctrl_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx91_pinctrl_probe,
+};
+
+static int __init imx91_pinctrl_init(void)
+{
+ return platform_driver_register(&imx91_pinctrl_driver);
+}
+arch_initcall(imx91_pinctrl_init);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX91 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index e77311f26262..edb242d30609 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -413,8 +413,8 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
int ret;
u32 val;
- child = of_get_next_child(np, NULL);
- if (!child) {
+ val = of_get_child_count(np);
+ if (val == 0) {
dev_err(&pdev->dev, "no group is defined\n");
return -ENOENT;
}
@@ -490,16 +490,14 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
/* Get groups for each function */
idxf = 0;
fn = fnull;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (is_mxs_gpio(child))
continue;
if (of_property_read_u32(child, "reg", &val)) {
ret = mxs_pinctrl_parse_group(pdev, child,
idxg++, NULL);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
continue;
}
@@ -509,19 +507,15 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev,
f->ngroups,
sizeof(*f->groups),
GFP_KERNEL);
- if (!f->groups) {
- of_node_put(child);
+ if (!f->groups)
return -ENOMEM;
- }
fn = child->name;
i = 0;
}
ret = mxs_pinctrl_parse_group(pdev, child, idxg++,
&f->groups[i++]);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
return 0;
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index d972584c0519..aad4891223d3 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -56,7 +56,7 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->name, grp->grp.name);
+ func->func.name, grp->grp.name);
for (i = 0; i < grp->grp.npins; i++) {
const struct mtk_pin_desc *desc;
@@ -620,12 +620,12 @@ static int mtk_build_functions(struct mtk_pinctrl *hw)
int i, err;
for (i = 0; i < hw->soc->nfuncs ; i++) {
- const struct function_desc *func = hw->soc->funcs + i;
+ const struct function_desc *function = hw->soc->funcs + i;
+ const struct pinfunction *func = &function->func;
err = pinmux_generic_add_function(hw->pctrl, func->name,
- func->group_names,
- func->num_group_names,
- func->data);
+ func->groups, func->ngroups,
+ function->data);
if (err < 0) {
dev_err(hw->dev, "Failed to register function %s\n",
func->name);
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.h b/drivers/pinctrl/mediatek/pinctrl-moore.h
index e0313e7a1fe0..229d19561e22 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.h
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.h
@@ -43,6 +43,12 @@
.data = id##_funcs, \
}
+#define PINCTRL_PIN_FUNCTION(_name_, id) \
+ { \
+ .func = PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups)), \
+ .data = NULL, \
+ }
+
int mtk_moore_pinctrl_probe(struct platform_device *pdev,
const struct mtk_pin_soc *soc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 3c1148d59eff..2dc101991066 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -823,22 +823,22 @@ static const char *mt7622_uart_groups[] = { "uart0_0_tx_rx",
static const char *mt7622_wdt_groups[] = { "watchdog", };
static const struct function_desc mt7622_functions[] = {
- {"antsel", mt7622_antsel_groups, ARRAY_SIZE(mt7622_antsel_groups)},
- {"emmc", mt7622_emmc_groups, ARRAY_SIZE(mt7622_emmc_groups)},
- {"eth", mt7622_ethernet_groups, ARRAY_SIZE(mt7622_ethernet_groups)},
- {"i2c", mt7622_i2c_groups, ARRAY_SIZE(mt7622_i2c_groups)},
- {"i2s", mt7622_i2s_groups, ARRAY_SIZE(mt7622_i2s_groups)},
- {"ir", mt7622_ir_groups, ARRAY_SIZE(mt7622_ir_groups)},
- {"led", mt7622_led_groups, ARRAY_SIZE(mt7622_led_groups)},
- {"flash", mt7622_flash_groups, ARRAY_SIZE(mt7622_flash_groups)},
- {"pcie", mt7622_pcie_groups, ARRAY_SIZE(mt7622_pcie_groups)},
- {"pmic", mt7622_pmic_bus_groups, ARRAY_SIZE(mt7622_pmic_bus_groups)},
- {"pwm", mt7622_pwm_groups, ARRAY_SIZE(mt7622_pwm_groups)},
- {"sd", mt7622_sd_groups, ARRAY_SIZE(mt7622_sd_groups)},
- {"spi", mt7622_spic_groups, ARRAY_SIZE(mt7622_spic_groups)},
- {"tdm", mt7622_tdm_groups, ARRAY_SIZE(mt7622_tdm_groups)},
- {"uart", mt7622_uart_groups, ARRAY_SIZE(mt7622_uart_groups)},
- {"watchdog", mt7622_wdt_groups, ARRAY_SIZE(mt7622_wdt_groups)},
+ PINCTRL_PIN_FUNCTION("antsel", mt7622_antsel),
+ PINCTRL_PIN_FUNCTION("emmc", mt7622_emmc),
+ PINCTRL_PIN_FUNCTION("eth", mt7622_ethernet),
+ PINCTRL_PIN_FUNCTION("i2c", mt7622_i2c),
+ PINCTRL_PIN_FUNCTION("i2s", mt7622_i2s),
+ PINCTRL_PIN_FUNCTION("ir", mt7622_ir),
+ PINCTRL_PIN_FUNCTION("led", mt7622_led),
+ PINCTRL_PIN_FUNCTION("flash", mt7622_flash),
+ PINCTRL_PIN_FUNCTION("pcie", mt7622_pcie),
+ PINCTRL_PIN_FUNCTION("pmic", mt7622_pmic_bus),
+ PINCTRL_PIN_FUNCTION("pwm", mt7622_pwm),
+ PINCTRL_PIN_FUNCTION("sd", mt7622_sd),
+ PINCTRL_PIN_FUNCTION("spi", mt7622_spic),
+ PINCTRL_PIN_FUNCTION("tdm", mt7622_tdm),
+ PINCTRL_PIN_FUNCTION("uart", mt7622_uart),
+ PINCTRL_PIN_FUNCTION("watchdog", mt7622_wdt),
};
static const struct mtk_eint_hw mt7622_eint_hw = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
index 699977074697..3e59eada2825 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7623.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -1341,27 +1341,27 @@ static const char *mt7623_uart_groups[] = { "uart0_0_txd_rxd",
static const char *mt7623_wdt_groups[] = { "watchdog_0", "watchdog_1", };
static const struct function_desc mt7623_functions[] = {
- {"audck", mt7623_aud_clk_groups, ARRAY_SIZE(mt7623_aud_clk_groups)},
- {"disp", mt7623_disp_pwm_groups, ARRAY_SIZE(mt7623_disp_pwm_groups)},
- {"eth", mt7623_ethernet_groups, ARRAY_SIZE(mt7623_ethernet_groups)},
- {"sdio", mt7623_ext_sdio_groups, ARRAY_SIZE(mt7623_ext_sdio_groups)},
- {"hdmi", mt7623_hdmi_groups, ARRAY_SIZE(mt7623_hdmi_groups)},
- {"i2c", mt7623_i2c_groups, ARRAY_SIZE(mt7623_i2c_groups)},
- {"i2s", mt7623_i2s_groups, ARRAY_SIZE(mt7623_i2s_groups)},
- {"ir", mt7623_ir_groups, ARRAY_SIZE(mt7623_ir_groups)},
- {"lcd", mt7623_lcd_groups, ARRAY_SIZE(mt7623_lcd_groups)},
- {"msdc", mt7623_msdc_groups, ARRAY_SIZE(mt7623_msdc_groups)},
- {"nand", mt7623_nandc_groups, ARRAY_SIZE(mt7623_nandc_groups)},
- {"otg", mt7623_otg_groups, ARRAY_SIZE(mt7623_otg_groups)},
- {"pcie", mt7623_pcie_groups, ARRAY_SIZE(mt7623_pcie_groups)},
- {"pcm", mt7623_pcm_groups, ARRAY_SIZE(mt7623_pcm_groups)},
- {"pwm", mt7623_pwm_groups, ARRAY_SIZE(mt7623_pwm_groups)},
- {"pwrap", mt7623_pwrap_groups, ARRAY_SIZE(mt7623_pwrap_groups)},
- {"rtc", mt7623_rtc_groups, ARRAY_SIZE(mt7623_rtc_groups)},
- {"spi", mt7623_spi_groups, ARRAY_SIZE(mt7623_spi_groups)},
- {"spdif", mt7623_spdif_groups, ARRAY_SIZE(mt7623_spdif_groups)},
- {"uart", mt7623_uart_groups, ARRAY_SIZE(mt7623_uart_groups)},
- {"watchdog", mt7623_wdt_groups, ARRAY_SIZE(mt7623_wdt_groups)},
+ PINCTRL_PIN_FUNCTION("audck", mt7623_aud_clk),
+ PINCTRL_PIN_FUNCTION("disp", mt7623_disp_pwm),
+ PINCTRL_PIN_FUNCTION("eth", mt7623_ethernet),
+ PINCTRL_PIN_FUNCTION("sdio", mt7623_ext_sdio),
+ PINCTRL_PIN_FUNCTION("hdmi", mt7623_hdmi),
+ PINCTRL_PIN_FUNCTION("i2c", mt7623_i2c),
+ PINCTRL_PIN_FUNCTION("i2s", mt7623_i2s),
+ PINCTRL_PIN_FUNCTION("ir", mt7623_ir),
+ PINCTRL_PIN_FUNCTION("lcd", mt7623_lcd),
+ PINCTRL_PIN_FUNCTION("msdc", mt7623_msdc),
+ PINCTRL_PIN_FUNCTION("nand", mt7623_nandc),
+ PINCTRL_PIN_FUNCTION("otg", mt7623_otg),
+ PINCTRL_PIN_FUNCTION("pcie", mt7623_pcie),
+ PINCTRL_PIN_FUNCTION("pcm", mt7623_pcm),
+ PINCTRL_PIN_FUNCTION("pwm", mt7623_pwm),
+ PINCTRL_PIN_FUNCTION("pwrap", mt7623_pwrap),
+ PINCTRL_PIN_FUNCTION("rtc", mt7623_rtc),
+ PINCTRL_PIN_FUNCTION("spi", mt7623_spi),
+ PINCTRL_PIN_FUNCTION("spdif", mt7623_spdif),
+ PINCTRL_PIN_FUNCTION("uart", mt7623_uart),
+ PINCTRL_PIN_FUNCTION("watchdog", mt7623_wdt),
};
static const struct mtk_eint_hw mt7623_eint_hw = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7629.c b/drivers/pinctrl/mediatek/pinctrl-mt7629.c
index 2ce411cb9c6e..98142e8c9801 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7629.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7629.c
@@ -385,16 +385,16 @@ static const char *mt7629_wifi_groups[] = { "wf0_5g", "wf0_2g", };
static const char *mt7629_flash_groups[] = { "snfi", "spi_nor" };
static const struct function_desc mt7629_functions[] = {
- {"eth", mt7629_ethernet_groups, ARRAY_SIZE(mt7629_ethernet_groups)},
- {"i2c", mt7629_i2c_groups, ARRAY_SIZE(mt7629_i2c_groups)},
- {"led", mt7629_led_groups, ARRAY_SIZE(mt7629_led_groups)},
- {"pcie", mt7629_pcie_groups, ARRAY_SIZE(mt7629_pcie_groups)},
- {"pwm", mt7629_pwm_groups, ARRAY_SIZE(mt7629_pwm_groups)},
- {"spi", mt7629_spi_groups, ARRAY_SIZE(mt7629_spi_groups)},
- {"uart", mt7629_uart_groups, ARRAY_SIZE(mt7629_uart_groups)},
- {"watchdog", mt7629_wdt_groups, ARRAY_SIZE(mt7629_wdt_groups)},
- {"wifi", mt7629_wifi_groups, ARRAY_SIZE(mt7629_wifi_groups)},
- {"flash", mt7629_flash_groups, ARRAY_SIZE(mt7629_flash_groups)},
+ PINCTRL_PIN_FUNCTION("eth", mt7629_ethernet),
+ PINCTRL_PIN_FUNCTION("i2c", mt7629_i2c),
+ PINCTRL_PIN_FUNCTION("led", mt7629_led),
+ PINCTRL_PIN_FUNCTION("pcie", mt7629_pcie),
+ PINCTRL_PIN_FUNCTION("pwm", mt7629_pwm),
+ PINCTRL_PIN_FUNCTION("spi", mt7629_spi),
+ PINCTRL_PIN_FUNCTION("uart", mt7629_uart),
+ PINCTRL_PIN_FUNCTION("watchdog", mt7629_wdt),
+ PINCTRL_PIN_FUNCTION("wifi", mt7629_wifi),
+ PINCTRL_PIN_FUNCTION("flash", mt7629_flash),
};
static const struct mtk_eint_hw mt7629_eint_hw = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt76x8.c b/drivers/pinctrl/mediatek/pinctrl-mt76x8.c
index e7d6ad2f62e4..2bc8d4409ca2 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt76x8.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt76x8.c
@@ -37,36 +37,30 @@
static struct mtmips_pmx_func pwm1_grp[] = {
FUNC("sdxc d6", 3, 19, 1),
- FUNC("utif", 2, 19, 1),
- FUNC("gpio", 1, 19, 1),
+ FUNC("pwm1 utif", 2, 19, 1),
FUNC("pwm1", 0, 19, 1),
};
static struct mtmips_pmx_func pwm0_grp[] = {
FUNC("sdxc d7", 3, 18, 1),
- FUNC("utif", 2, 18, 1),
- FUNC("gpio", 1, 18, 1),
+ FUNC("pwm0 utif", 2, 18, 1),
FUNC("pwm0", 0, 18, 1),
};
static struct mtmips_pmx_func uart2_grp[] = {
FUNC("sdxc d5 d4", 3, 20, 2),
- FUNC("pwm", 2, 20, 2),
- FUNC("gpio", 1, 20, 2),
+ FUNC("uart2 pwm", 2, 20, 2),
FUNC("uart2", 0, 20, 2),
};
static struct mtmips_pmx_func uart1_grp[] = {
FUNC("sw_r", 3, 45, 2),
- FUNC("pwm", 2, 45, 2),
- FUNC("gpio", 1, 45, 2),
+ FUNC("uart1 pwm", 2, 45, 2),
FUNC("uart1", 0, 45, 2),
};
static struct mtmips_pmx_func i2c_grp[] = {
- FUNC("-", 3, 4, 2),
FUNC("debug", 2, 4, 2),
- FUNC("gpio", 1, 4, 2),
FUNC("i2c", 0, 4, 2),
};
@@ -76,128 +70,100 @@ static struct mtmips_pmx_func wdt_grp[] = { FUNC("wdt", 0, 38, 1) };
static struct mtmips_pmx_func spi_grp[] = { FUNC("spi", 0, 7, 4) };
static struct mtmips_pmx_func sd_mode_grp[] = {
- FUNC("jtag", 3, 22, 8),
- FUNC("utif", 2, 22, 8),
- FUNC("gpio", 1, 22, 8),
+ FUNC("sdxc jtag", 3, 22, 8),
+ FUNC("sdxc utif", 2, 22, 8),
FUNC("sdxc", 0, 22, 8),
};
static struct mtmips_pmx_func uart0_grp[] = {
- FUNC("-", 3, 12, 2),
- FUNC("-", 2, 12, 2),
- FUNC("gpio", 1, 12, 2),
FUNC("uart0", 0, 12, 2),
};
static struct mtmips_pmx_func i2s_grp[] = {
FUNC("antenna", 3, 0, 4),
FUNC("pcm", 2, 0, 4),
- FUNC("gpio", 1, 0, 4),
FUNC("i2s", 0, 0, 4),
};
static struct mtmips_pmx_func spi_cs1_grp[] = {
- FUNC("-", 3, 6, 1),
- FUNC("refclk", 2, 6, 1),
- FUNC("gpio", 1, 6, 1),
+ FUNC("spi refclk", 2, 6, 1),
FUNC("spi cs1", 0, 6, 1),
};
static struct mtmips_pmx_func spis_grp[] = {
FUNC("pwm_uart2", 3, 14, 4),
- FUNC("utif", 2, 14, 4),
- FUNC("gpio", 1, 14, 4),
+ FUNC("spis utif", 2, 14, 4),
FUNC("spis", 0, 14, 4),
};
static struct mtmips_pmx_func gpio_grp[] = {
FUNC("pcie", 3, 11, 1),
- FUNC("refclk", 2, 11, 1),
- FUNC("gpio", 1, 11, 1),
- FUNC("gpio", 0, 11, 1),
+ FUNC("gpio refclk", 2, 11, 1),
};
static struct mtmips_pmx_func p4led_kn_grp[] = {
- FUNC("jtag", 3, 30, 1),
- FUNC("utif", 2, 30, 1),
- FUNC("gpio", 1, 30, 1),
+ FUNC("p4led_kn jtag", 3, 30, 1),
+ FUNC("p4led_kn utif", 2, 30, 1),
FUNC("p4led_kn", 0, 30, 1),
};
static struct mtmips_pmx_func p3led_kn_grp[] = {
- FUNC("jtag", 3, 31, 1),
- FUNC("utif", 2, 31, 1),
- FUNC("gpio", 1, 31, 1),
+ FUNC("p3led_kn jtag", 3, 31, 1),
+ FUNC("p3led_kn utif", 2, 31, 1),
FUNC("p3led_kn", 0, 31, 1),
};
static struct mtmips_pmx_func p2led_kn_grp[] = {
- FUNC("jtag", 3, 32, 1),
- FUNC("utif", 2, 32, 1),
- FUNC("gpio", 1, 32, 1),
+ FUNC("p2led_kn jtag", 3, 32, 1),
+ FUNC("p2led_kn utif", 2, 32, 1),
FUNC("p2led_kn", 0, 32, 1),
};
static struct mtmips_pmx_func p1led_kn_grp[] = {
- FUNC("jtag", 3, 33, 1),
- FUNC("utif", 2, 33, 1),
- FUNC("gpio", 1, 33, 1),
+ FUNC("p1led_kn jtag", 3, 33, 1),
+ FUNC("p1led_kn utif", 2, 33, 1),
FUNC("p1led_kn", 0, 33, 1),
};
static struct mtmips_pmx_func p0led_kn_grp[] = {
- FUNC("jtag", 3, 34, 1),
- FUNC("rsvd", 2, 34, 1),
- FUNC("gpio", 1, 34, 1),
+ FUNC("p0led_kn jtag", 3, 34, 1),
FUNC("p0led_kn", 0, 34, 1),
};
static struct mtmips_pmx_func wled_kn_grp[] = {
- FUNC("rsvd", 3, 35, 1),
- FUNC("rsvd", 2, 35, 1),
- FUNC("gpio", 1, 35, 1),
FUNC("wled_kn", 0, 35, 1),
};
static struct mtmips_pmx_func p4led_an_grp[] = {
- FUNC("jtag", 3, 39, 1),
- FUNC("utif", 2, 39, 1),
- FUNC("gpio", 1, 39, 1),
+ FUNC("p4led_an jtag", 3, 39, 1),
+ FUNC("p4led_an utif", 2, 39, 1),
FUNC("p4led_an", 0, 39, 1),
};
static struct mtmips_pmx_func p3led_an_grp[] = {
- FUNC("jtag", 3, 40, 1),
- FUNC("utif", 2, 40, 1),
- FUNC("gpio", 1, 40, 1),
+ FUNC("p3led_an jtag", 3, 40, 1),
+ FUNC("p3led_an utif", 2, 40, 1),
FUNC("p3led_an", 0, 40, 1),
};
static struct mtmips_pmx_func p2led_an_grp[] = {
- FUNC("jtag", 3, 41, 1),
- FUNC("utif", 2, 41, 1),
- FUNC("gpio", 1, 41, 1),
+ FUNC("p2led_an jtag", 3, 41, 1),
+ FUNC("p2led_an utif", 2, 41, 1),
FUNC("p2led_an", 0, 41, 1),
};
static struct mtmips_pmx_func p1led_an_grp[] = {
- FUNC("jtag", 3, 42, 1),
- FUNC("utif", 2, 42, 1),
- FUNC("gpio", 1, 42, 1),
+ FUNC("p1led_an jtag", 3, 42, 1),
+ FUNC("p1led_an utif", 2, 42, 1),
FUNC("p1led_an", 0, 42, 1),
};
static struct mtmips_pmx_func p0led_an_grp[] = {
- FUNC("jtag", 3, 43, 1),
- FUNC("rsvd", 2, 43, 1),
- FUNC("gpio", 1, 43, 1),
+ FUNC("p0led_an jtag", 3, 43, 1),
FUNC("p0led_an", 0, 43, 1),
};
static struct mtmips_pmx_func wled_an_grp[] = {
- FUNC("rsvd", 3, 44, 1),
- FUNC("rsvd", 2, 44, 1),
- FUNC("gpio", 1, 44, 1),
FUNC("wled_an", 0, 44, 1),
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7981.c b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
index ef6123765885..83092be5b614 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7981.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
@@ -978,23 +978,23 @@ static const char *mt7981_ethernet_groups[] = { "smi_mdc_mdio", "gbe_ext_mdc_mdi
static const char *mt7981_ant_groups[] = { "ant_sel", };
static const struct function_desc mt7981_functions[] = {
- {"wa_aice", mt7981_wa_aice_groups, ARRAY_SIZE(mt7981_wa_aice_groups)},
- {"dfd", mt7981_dfd_groups, ARRAY_SIZE(mt7981_dfd_groups)},
- {"jtag", mt7981_jtag_groups, ARRAY_SIZE(mt7981_jtag_groups)},
- {"pta", mt7981_pta_groups, ARRAY_SIZE(mt7981_pta_groups)},
- {"pcm", mt7981_pcm_groups, ARRAY_SIZE(mt7981_pcm_groups)},
- {"udi", mt7981_udi_groups, ARRAY_SIZE(mt7981_udi_groups)},
- {"usb", mt7981_usb_groups, ARRAY_SIZE(mt7981_usb_groups)},
- {"ant", mt7981_ant_groups, ARRAY_SIZE(mt7981_ant_groups)},
- {"eth", mt7981_ethernet_groups, ARRAY_SIZE(mt7981_ethernet_groups)},
- {"i2c", mt7981_i2c_groups, ARRAY_SIZE(mt7981_i2c_groups)},
- {"led", mt7981_led_groups, ARRAY_SIZE(mt7981_led_groups)},
- {"pwm", mt7981_pwm_groups, ARRAY_SIZE(mt7981_pwm_groups)},
- {"spi", mt7981_spi_groups, ARRAY_SIZE(mt7981_spi_groups)},
- {"uart", mt7981_uart_groups, ARRAY_SIZE(mt7981_uart_groups)},
- {"watchdog", mt7981_wdt_groups, ARRAY_SIZE(mt7981_wdt_groups)},
- {"flash", mt7981_flash_groups, ARRAY_SIZE(mt7981_flash_groups)},
- {"pcie", mt7981_pcie_groups, ARRAY_SIZE(mt7981_pcie_groups)},
+ PINCTRL_PIN_FUNCTION("wa_aice", mt7981_wa_aice),
+ PINCTRL_PIN_FUNCTION("dfd", mt7981_dfd),
+ PINCTRL_PIN_FUNCTION("jtag", mt7981_jtag),
+ PINCTRL_PIN_FUNCTION("pta", mt7981_pta),
+ PINCTRL_PIN_FUNCTION("pcm", mt7981_pcm),
+ PINCTRL_PIN_FUNCTION("udi", mt7981_udi),
+ PINCTRL_PIN_FUNCTION("usb", mt7981_usb),
+ PINCTRL_PIN_FUNCTION("ant", mt7981_ant),
+ PINCTRL_PIN_FUNCTION("eth", mt7981_ethernet),
+ PINCTRL_PIN_FUNCTION("i2c", mt7981_i2c),
+ PINCTRL_PIN_FUNCTION("led", mt7981_led),
+ PINCTRL_PIN_FUNCTION("pwm", mt7981_pwm),
+ PINCTRL_PIN_FUNCTION("spi", mt7981_spi),
+ PINCTRL_PIN_FUNCTION("uart", mt7981_uart),
+ PINCTRL_PIN_FUNCTION("watchdog", mt7981_wdt),
+ PINCTRL_PIN_FUNCTION("flash", mt7981_flash),
+ PINCTRL_PIN_FUNCTION("pcie", mt7981_pcie),
};
static const struct mtk_eint_hw mt7981_eint_hw = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
index 39e80fa644c1..5816b5fdb7ca 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
@@ -879,18 +879,18 @@ static const char *mt7986_wdt_groups[] = { "watchdog", };
static const char *mt7986_wf_groups[] = { "wf_2g", "wf_5g", "wf_dbdc", };
static const struct function_desc mt7986_functions[] = {
- {"audio", mt7986_audio_groups, ARRAY_SIZE(mt7986_audio_groups)},
- {"emmc", mt7986_emmc_groups, ARRAY_SIZE(mt7986_emmc_groups)},
- {"eth", mt7986_ethernet_groups, ARRAY_SIZE(mt7986_ethernet_groups)},
- {"i2c", mt7986_i2c_groups, ARRAY_SIZE(mt7986_i2c_groups)},
- {"led", mt7986_led_groups, ARRAY_SIZE(mt7986_led_groups)},
- {"flash", mt7986_flash_groups, ARRAY_SIZE(mt7986_flash_groups)},
- {"pcie", mt7986_pcie_groups, ARRAY_SIZE(mt7986_pcie_groups)},
- {"pwm", mt7986_pwm_groups, ARRAY_SIZE(mt7986_pwm_groups)},
- {"spi", mt7986_spi_groups, ARRAY_SIZE(mt7986_spi_groups)},
- {"uart", mt7986_uart_groups, ARRAY_SIZE(mt7986_uart_groups)},
- {"watchdog", mt7986_wdt_groups, ARRAY_SIZE(mt7986_wdt_groups)},
- {"wifi", mt7986_wf_groups, ARRAY_SIZE(mt7986_wf_groups)},
+ PINCTRL_PIN_FUNCTION("audio", mt7986_audio),
+ PINCTRL_PIN_FUNCTION("emmc", mt7986_emmc),
+ PINCTRL_PIN_FUNCTION("eth", mt7986_ethernet),
+ PINCTRL_PIN_FUNCTION("i2c", mt7986_i2c),
+ PINCTRL_PIN_FUNCTION("led", mt7986_led),
+ PINCTRL_PIN_FUNCTION("flash", mt7986_flash),
+ PINCTRL_PIN_FUNCTION("pcie", mt7986_pcie),
+ PINCTRL_PIN_FUNCTION("pwm", mt7986_pwm),
+ PINCTRL_PIN_FUNCTION("spi", mt7986_spi),
+ PINCTRL_PIN_FUNCTION("uart", mt7986_uart),
+ PINCTRL_PIN_FUNCTION("watchdog", mt7986_wdt),
+ PINCTRL_PIN_FUNCTION("wifi", mt7986_wf),
};
static const struct mtk_eint_hw mt7986a_eint_hw = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index d39afc122516..91edb539925a 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -621,7 +621,6 @@ static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config,
struct pinctrl_map **map, unsigned *num_maps)
{
- struct device_node *np;
unsigned reserved_maps;
int ret;
@@ -629,12 +628,11 @@ static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
*num_maps = 0;
reserved_maps = 0;
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
pinctrl_utils_free_map(pctldev, *map, *num_maps);
- of_node_put(np);
return ret;
}
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index b19bc391705e..e12316c42698 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -536,7 +536,6 @@ static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map **map,
unsigned *num_maps)
{
- struct device_node *np;
unsigned reserved_maps;
int ret;
@@ -544,13 +543,12 @@ static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
*num_maps = 0;
reserved_maps = 0;
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps,
num_maps);
if (ret < 0) {
pinctrl_utils_free_map(pctldev, *map, *num_maps);
- of_node_put(np);
return ret;
}
}
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index 50a87d9618a8..d2ac9ca72a3e 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -936,4 +936,5 @@ static struct platform_driver meson_a1_pinctrl_driver = {
};
module_platform_driver(meson_a1_pinctrl_driver);
+MODULE_DESCRIPTION("Amlogic Meson A1 SoC pinctrl driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
index ae3f8d0da05f..cad411d90727 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg-pmx.c
@@ -117,4 +117,5 @@ const struct pinmux_ops meson_axg_pmx_ops = {
};
EXPORT_SYMBOL_GPL(meson_axg_pmx_ops);
+MODULE_DESCRIPTION("Amlogic Meson AXG second generation pinmux driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 6667c9d0238f..8f4e7154b73f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -1091,4 +1091,5 @@ static struct platform_driver meson_axg_pinctrl_driver = {
};
module_platform_driver(meson_axg_pinctrl_driver);
+MODULE_DESCRIPTION("Amlogic Meson AXG pinctrl driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 2c17891ba6a9..32830269a5b4 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -1426,4 +1426,5 @@ static struct platform_driver meson_g12a_pinctrl_driver = {
};
module_platform_driver(meson_g12a_pinctrl_driver);
+MODULE_DESCRIPTION("Amlogic Meson G12A SoC pinctrl driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index f51fc3939252..2867f397fec6 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -910,4 +910,5 @@ static struct platform_driver meson_gxbb_pinctrl_driver = {
},
};
module_platform_driver(meson_gxbb_pinctrl_driver);
+MODULE_DESCRIPTION("Amlogic Meson GXBB pinctrl driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 51408996255b..a2f25fa02852 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -871,4 +871,5 @@ static struct platform_driver meson_gxl_pinctrl_driver = {
},
};
module_platform_driver(meson_gxl_pinctrl_driver);
+MODULE_DESCRIPTION("Amlogic Meson GXL pinctrl driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-s4.c b/drivers/pinctrl/meson/pinctrl-meson-s4.c
index cea77864b880..60c7d5003e8a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-s4.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-s4.c
@@ -1230,4 +1230,5 @@ static struct platform_driver meson_s4_pinctrl_driver = {
};
module_platform_driver(meson_s4_pinctrl_driver);
+MODULE_DESCRIPTION("Amlogic Meson S4 SoC pinctrl driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 524424ee6c4e..ef002b9dd464 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -767,4 +767,5 @@ int meson_pinctrl_probe(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(meson_pinctrl_probe);
+MODULE_DESCRIPTION("Amlogic Meson SoCs core pinctrl driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
index f767b6923f9f..7f22aa0f8e36 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
@@ -101,4 +101,5 @@ const struct pinmux_ops meson8_pmx_ops = {
.gpio_request_enable = meson8_pmx_request_gpio,
};
EXPORT_SYMBOL_GPL(meson8_pmx_ops);
+MODULE_DESCRIPTION("Amlogic Meson SoCs first generation pinmux driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 80e3ac333136..47f62c89955a 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -811,19 +811,17 @@ static int abx500_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map **map, unsigned *num_maps)
{
unsigned reserved_maps;
- struct device_node *np;
int ret;
reserved_maps = 0;
*map = NULL;
*num_maps = 0;
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = abx500_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
pinctrl_utils_free_map(pctldev, *map, *num_maps);
- of_node_put(np);
return ret;
}
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index cb0f0d5a5e45..fa78d5ecc685 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -804,19 +804,17 @@ static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
unsigned int *num_maps)
{
unsigned int reserved_maps;
- struct device_node *np;
int ret;
reserved_maps = 0;
*map = NULL;
*num_maps = 0;
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
pinctrl_utils_free_map(pctldev, *map, *num_maps);
- of_node_put(np);
return ret;
}
}
diff --git a/drivers/pinctrl/nuvoton/Kconfig b/drivers/pinctrl/nuvoton/Kconfig
index 2abbfcec1fae..7eadaaf48d6e 100644
--- a/drivers/pinctrl/nuvoton/Kconfig
+++ b/drivers/pinctrl/nuvoton/Kconfig
@@ -45,3 +45,22 @@ config PINCTRL_NPCM8XX
Say Y or M here to enable pin controller and GPIO support for
the Nuvoton NPCM8XX SoC. This is strongly recommended when
building a kernel that will run on this chip.
+
+config PINCTRL_MA35
+ bool
+ depends on (ARCH_MA35 || COMPILE_TEST) && OF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select MFD_SYSCON
+
+config PINCTRL_MA35D1
+ bool "Pinctrl and GPIO driver for Nuvoton MA35D1"
+ depends on (ARCH_MA35 || COMPILE_TEST) && OF
+ select PINCTRL_MA35
+ help
+ Say Y here to enable pin controller and GPIO support
+ for Nuvoton MA35D1 SoC.
diff --git a/drivers/pinctrl/nuvoton/Makefile b/drivers/pinctrl/nuvoton/Makefile
index 08031eab0af6..346c5082bc60 100644
--- a/drivers/pinctrl/nuvoton/Makefile
+++ b/drivers/pinctrl/nuvoton/Makefile
@@ -4,3 +4,5 @@
obj-$(CONFIG_PINCTRL_WPCM450) += pinctrl-wpcm450.o
obj-$(CONFIG_PINCTRL_NPCM7XX) += pinctrl-npcm7xx.o
obj-$(CONFIG_PINCTRL_NPCM8XX) += pinctrl-npcm8xx.o
+obj-$(CONFIG_PINCTRL_MA35) += pinctrl-ma35.o
+obj-$(CONFIG_PINCTRL_MA35D1) += pinctrl-ma35d1.o
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
new file mode 100644
index 000000000000..1fa00a23534a
--- /dev/null
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
@@ -0,0 +1,1187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ *
+ * Author: Shan-Chun Hung <schung@nuvoton.com>
+ * * Jacky Huang <ychuang3@nuvoton.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "../core.h"
+#include "../pinconf.h"
+#include "pinctrl-ma35.h"
+
+#define MA35_MFP_REG_BASE 0x80
+#define MA35_MFP_REG_SZ_PER_BANK 8
+#define MA35_MFP_BITS_PER_PORT 4
+
+#define MA35_GPIO_BANK_MAX 14
+#define MA35_GPIO_PORT_MAX 16
+
+/* GPIO control registers */
+#define MA35_GP_REG_MODE 0x00
+#define MA35_GP_REG_DINOFF 0x04
+#define MA35_GP_REG_DOUT 0x08
+#define MA35_GP_REG_DATMSK 0x0c
+#define MA35_GP_REG_PIN 0x10
+#define MA35_GP_REG_DBEN 0x14
+#define MA35_GP_REG_INTTYPE 0x18
+#define MA35_GP_REG_INTEN 0x1c
+#define MA35_GP_REG_INTSRC 0x20
+#define MA35_GP_REG_SMTEN 0x24
+#define MA35_GP_REG_SLEWCTL 0x28
+#define MA35_GP_REG_SPW 0x2c
+#define MA35_GP_REG_PUSEL 0x30
+#define MA35_GP_REG_DSL 0x38
+#define MA35_GP_REG_DSH 0x3c
+
+/* GPIO mode control */
+#define MA35_GP_MODE_INPUT 0x0
+#define MA35_GP_MODE_OUTPUT 0x1
+#define MA35_GP_MODE_OPEN_DRAIN 0x2
+#define MA35_GP_MODE_QUASI 0x3
+#define MA35_GP_MODE_MASK(n) GENMASK(n * 2 + 1, n * 2)
+
+#define MA35_GP_SLEWCTL_MASK(n) GENMASK(n * 2 + 1, n * 2)
+
+/* GPIO pull-up and pull-down selection control */
+#define MA35_GP_PUSEL_DISABLE 0x0
+#define MA35_GP_PUSEL_PULL_UP 0x1
+#define MA35_GP_PUSEL_PULL_DOWN 0x2
+#define MA35_GP_PUSEL_MASK(n) GENMASK(n * 2 + 1, n * 2)
+
+/*
+ * The MA35_GP_REG_INTEN bits 0 ~ 15 control low-level or falling edge trigger,
+ * while bits 16 ~ 31 control high-level or rising edge trigger.
+ */
+#define MA35_GP_INTEN_L(n) BIT(n)
+#define MA35_GP_INTEN_H(n) BIT(n + 16)
+#define MA35_GP_INTEN_BOTH(n) (MA35_GP_INTEN_H(n) | MA35_GP_INTEN_L(n))
+
+/*
+ * The MA35_GP_REG_DSL register controls ports 0 to 7, while the MA35_GP_REG_DSH
+ * register controls ports 8 to 15. Each port occupies a width of 4 bits, with 3
+ * bits being effective.
+ */
+#define MA35_GP_DS_REG(n) (n < 8 ? MA35_GP_REG_DSL : MA35_GP_REG_DSH)
+#define MA35_GP_DS_MASK(n) GENMASK((n % 8) * 4 + 3, (n % 8) * 4)
+
+#define MVOLT_1800 0
+#define MVOLT_3300 1
+
+/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
+static const char * const gpio_group_name[] = {
+ "gpioa", "gpiob", "gpioc", "gpiod", "gpioe", "gpiof", "gpiog",
+ "gpioh", "gpioi", "gpioj", "gpiok", "gpiol", "gpiom", "gpion",
+};
+
+static const u32 ds_1800mv_tbl[] = {
+ 2900, 4400, 5800, 7300, 8600, 10100, 11500, 13000,
+};
+
+static const u32 ds_3300mv_tbl[] = {
+ 17100, 25600, 34100, 42800, 48000, 56000, 77000, 82000,
+};
+
+struct ma35_pin_func {
+ const char *name;
+ const char **groups;
+ u32 ngroups;
+};
+
+struct ma35_pin_setting {
+ u32 offset;
+ u32 shift;
+ u32 muxval;
+ unsigned long *configs;
+ unsigned int nconfigs;
+};
+
+struct ma35_pin_group {
+ const char *name;
+ unsigned int npins;
+ unsigned int *pins;
+ struct ma35_pin_setting *settings;
+};
+
+struct ma35_pin_bank {
+ void __iomem *reg_base;
+ struct clk *clk;
+ int irq;
+ u8 bank_num;
+ u8 nr_pins;
+ bool valid;
+ const char *name;
+ struct fwnode_handle *fwnode;
+ struct gpio_chip chip;
+ u32 irqtype;
+ u32 irqinten;
+ struct regmap *regmap;
+ struct device *dev;
+};
+
+struct ma35_pin_ctrl {
+ struct ma35_pin_bank *pin_banks;
+ u32 nr_banks;
+ u32 nr_pins;
+};
+
+struct ma35_pinctrl {
+ struct device *dev;
+ struct ma35_pin_ctrl *ctrl;
+ struct pinctrl_dev *pctl;
+ const struct ma35_pinctrl_soc_info *info;
+ struct regmap *regmap;
+ struct ma35_pin_group *groups;
+ unsigned int ngroups;
+ struct ma35_pin_func *functions;
+ unsigned int nfunctions;
+};
+
+static DEFINE_RAW_SPINLOCK(ma35_lock);
+
+static int ma35_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return npctl->ngroups;
+}
+
+static const char *ma35_get_group_name(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return npctl->groups[selector].name;
+}
+
+static int ma35_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins, unsigned int *npins)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+
+ if (selector >= npctl->ngroups)
+ return -EINVAL;
+
+ *pins = npctl->groups[selector].pins;
+ *npins = npctl->groups[selector].npins;
+
+ return 0;
+}
+
+static struct ma35_pin_group *ma35_pinctrl_find_group_by_name(
+ const struct ma35_pinctrl *npctl, const char *name)
+{
+ int i;
+
+ for (i = 0; i < npctl->ngroups; i++) {
+ if (!strcmp(npctl->groups[i].name, name))
+ return &npctl->groups[i];
+ }
+ return NULL;
+}
+
+static int ma35_pinctrl_dt_node_to_map_func(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+ struct ma35_pin_group *grp;
+ struct pinctrl_map *new_map;
+ struct device_node *parent;
+ int map_num = 1;
+ int i;
+
+ /*
+ * first find the group of this node and check if we need create
+ * config maps for pins
+ */
+ grp = ma35_pinctrl_find_group_by_name(npctl, np->name);
+ if (!grp) {
+ dev_err(npctl->dev, "unable to find group for node %s\n", np->name);
+ return -EINVAL;
+ }
+
+ map_num += grp->npins;
+ new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map), GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+
+ *map = new_map;
+ *num_maps = map_num;
+ /* create mux map */
+ parent = of_get_parent(np);
+ if (!parent)
+ return -EINVAL;
+
+ new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
+ new_map[0].data.mux.function = parent->name;
+ new_map[0].data.mux.group = np->name;
+ of_node_put(parent);
+
+ new_map++;
+ for (i = 0; i < grp->npins; i++) {
+ new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
+ new_map[i].data.configs.group_or_pin = pin_get_name(pctldev, grp->pins[i]);
+ new_map[i].data.configs.configs = grp->settings[i].configs;
+ new_map[i].data.configs.num_configs = grp->settings[i].nconfigs;
+ }
+ dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
+ (*map)->data.mux.function, (*map)->data.mux.group, map_num);
+
+ return 0;
+}
+
+static const struct pinctrl_ops ma35_pctrl_ops = {
+ .get_groups_count = ma35_get_groups_count,
+ .get_group_name = ma35_get_group_name,
+ .get_group_pins = ma35_get_group_pins,
+ .dt_node_to_map = ma35_pinctrl_dt_node_to_map_func,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int ma35_pinmux_get_func_count(struct pinctrl_dev *pctldev)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return npctl->nfunctions;
+}
+
+static const char *ma35_pinmux_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return npctl->functions[selector].name;
+}
+
+static int ma35_pinmux_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ const char *const **groups,
+ unsigned int *const num_groups)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = npctl->functions[function].groups;
+ *num_groups = npctl->functions[function].ngroups;
+
+ return 0;
+}
+
+static int ma35_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+ struct ma35_pin_group *grp = &npctl->groups[group];
+ struct ma35_pin_setting *setting = grp->settings;
+ u32 i, regval;
+
+ dev_dbg(npctl->dev, "enable function %s group %s\n",
+ npctl->functions[selector].name, npctl->groups[group].name);
+
+ for (i = 0; i < grp->npins; i++) {
+ regmap_read(npctl->regmap, setting->offset, &regval);
+ regval &= ~GENMASK(setting->shift + MA35_MFP_BITS_PER_PORT - 1,
+ setting->shift);
+ regval |= setting->muxval << setting->shift;
+ regmap_write(npctl->regmap, setting->offset, regval);
+ setting++;
+ }
+ return 0;
+}
+
+static const struct pinmux_ops ma35_pmx_ops = {
+ .get_functions_count = ma35_pinmux_get_func_count,
+ .get_function_name = ma35_pinmux_get_func_name,
+ .get_function_groups = ma35_pinmux_get_func_groups,
+ .set_mux = ma35_pinmux_set_mux,
+ .strict = true,
+};
+
+static void ma35_gpio_set_mode(void __iomem *reg_mode, unsigned int gpio, u32 mode)
+{
+ u32 regval = readl(reg_mode);
+
+ regval &= ~MA35_GP_MODE_MASK(gpio);
+ regval |= field_prep(MA35_GP_MODE_MASK(gpio), mode);
+
+ writel(regval, reg_mode);
+}
+
+static u32 ma35_gpio_get_mode(void __iomem *reg_mode, unsigned int gpio)
+{
+ u32 regval = readl(reg_mode);
+
+ return field_get(MA35_GP_MODE_MASK(gpio), regval);
+}
+
+static int ma35_gpio_core_direction_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(gc);
+ void __iomem *reg_mode = bank->reg_base + MA35_GP_REG_MODE;
+
+ guard(raw_spinlock_irqsave)(&ma35_lock);
+
+ ma35_gpio_set_mode(reg_mode, gpio, MA35_GP_MODE_INPUT);
+
+ return 0;
+}
+
+static int ma35_gpio_core_direction_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(gc);
+ void __iomem *reg_dout = bank->reg_base + MA35_GP_REG_DOUT;
+ void __iomem *reg_mode = bank->reg_base + MA35_GP_REG_MODE;
+ unsigned int regval;
+
+ guard(raw_spinlock_irqsave)(&ma35_lock);
+
+ regval = readl(reg_dout);
+ if (val)
+ regval |= BIT(gpio);
+ else
+ regval &= ~BIT(gpio);
+ writel(regval, reg_dout);
+
+ ma35_gpio_set_mode(reg_mode, gpio, MA35_GP_MODE_OUTPUT);
+
+ return 0;
+}
+
+static int ma35_gpio_core_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(gc);
+ void __iomem *reg_pin = bank->reg_base + MA35_GP_REG_PIN;
+
+ return !!(readl(reg_pin) & BIT(gpio));
+}
+
+static void ma35_gpio_core_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(gc);
+ void __iomem *reg_dout = bank->reg_base + MA35_GP_REG_DOUT;
+ u32 regval;
+
+ if (val)
+ regval = readl(reg_dout) | BIT(gpio);
+ else
+ regval = readl(reg_dout) & ~BIT(gpio);
+
+ writel(regval, reg_dout);
+}
+
+static int ma35_gpio_core_to_request(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(gc);
+ u32 reg_offs, bit_offs, regval;
+
+ if (gpio < 8) {
+ /* The MFP low register controls port 0 ~ 7 */
+ reg_offs = bank->bank_num * MA35_MFP_REG_SZ_PER_BANK;
+ bit_offs = gpio * MA35_MFP_BITS_PER_PORT;
+ } else {
+ /* The MFP high register controls port 8 ~ 15 */
+ reg_offs = bank->bank_num * MA35_MFP_REG_SZ_PER_BANK + 4;
+ bit_offs = (gpio - 8) * MA35_MFP_BITS_PER_PORT;
+ }
+
+ regmap_read(bank->regmap, MA35_MFP_REG_BASE + reg_offs, &regval);
+ regval &= ~GENMASK(bit_offs + MA35_MFP_BITS_PER_PORT - 1, bit_offs);
+ regmap_write(bank->regmap, MA35_MFP_REG_BASE + reg_offs, regval);
+
+ return 0;
+}
+
+static void ma35_irq_gpio_ack(struct irq_data *d)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ void __iomem *reg_intsrc = bank->reg_base + MA35_GP_REG_INTSRC;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ writel(BIT(hwirq), reg_intsrc);
+}
+
+static void ma35_irq_gpio_mask(struct irq_data *d)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ void __iomem *reg_ien = bank->reg_base + MA35_GP_REG_INTEN;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 regval;
+
+ regval = readl(reg_ien);
+
+ regval &= ~MA35_GP_INTEN_BOTH(hwirq);
+
+ writel(regval, reg_ien);
+}
+
+static void ma35_irq_gpio_unmask(struct irq_data *d)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ void __iomem *reg_itype = bank->reg_base + MA35_GP_REG_INTTYPE;
+ void __iomem *reg_ien = bank->reg_base + MA35_GP_REG_INTEN;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 bval, regval;
+
+ bval = bank->irqtype & BIT(hwirq);
+ regval = readl(reg_itype);
+ regval &= ~BIT(hwirq);
+ writel(regval | bval, reg_itype);
+
+ bval = bank->irqinten & MA35_GP_INTEN_BOTH(hwirq);
+ regval = readl(reg_ien);
+ regval &= ~MA35_GP_INTEN_BOTH(hwirq);
+ writel(regval | bval, reg_ien);
+}
+
+static int ma35_irq_irqtype(struct irq_data *d, unsigned int type)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler_locked(d, handle_edge_irq);
+ bank->irqtype &= ~BIT(hwirq);
+ bank->irqinten |= MA35_GP_INTEN_BOTH(hwirq);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler_locked(d, handle_edge_irq);
+ bank->irqtype &= ~BIT(hwirq);
+ bank->irqinten |= MA35_GP_INTEN_H(hwirq);
+ bank->irqinten &= ~MA35_GP_INTEN_L(hwirq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(d, handle_edge_irq);
+ bank->irqtype &= ~BIT(hwirq);
+ bank->irqinten |= MA35_GP_INTEN_L(hwirq);
+ bank->irqinten &= ~MA35_GP_INTEN_H(hwirq);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(bank->irqtype, bank->reg_base + MA35_GP_REG_INTTYPE);
+ writel(bank->irqinten, bank->reg_base + MA35_GP_REG_INTEN);
+
+ return 0;
+}
+
+static struct irq_chip ma35_gpio_irqchip = {
+ .name = "MA35-GPIO-IRQ",
+ .irq_disable = ma35_irq_gpio_mask,
+ .irq_enable = ma35_irq_gpio_unmask,
+ .irq_ack = ma35_irq_gpio_ack,
+ .irq_mask = ma35_irq_gpio_mask,
+ .irq_unmask = ma35_irq_gpio_unmask,
+ .irq_set_type = ma35_irq_irqtype,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void ma35_irq_demux_intgroup(struct irq_desc *desc)
+{
+ struct ma35_pin_bank *bank = gpiochip_get_data(irq_desc_get_handler_data(desc));
+ struct irq_domain *irqdomain = bank->chip.irq.domain;
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long isr;
+ int offset;
+
+ chained_irq_enter(irqchip, desc);
+
+ isr = readl(bank->reg_base + MA35_GP_REG_INTSRC);
+
+ for_each_set_bit(offset, &isr, bank->nr_pins)
+ generic_handle_irq(irq_find_mapping(irqdomain, offset));
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int ma35_gpiolib_register(struct platform_device *pdev, struct ma35_pinctrl *npctl)
+{
+ struct ma35_pin_ctrl *ctrl = npctl->ctrl;
+ struct ma35_pin_bank *bank = ctrl->pin_banks;
+ int ret;
+ int i;
+
+ for (i = 0; i < ctrl->nr_banks; i++, bank++) {
+ if (!bank->valid) {
+ dev_warn(&pdev->dev, "%pfw: bank is not valid\n", bank->fwnode);
+ continue;
+ }
+ bank->irqtype = 0;
+ bank->irqinten = 0;
+ bank->chip.label = bank->name;
+ bank->chip.of_gpio_n_cells = 2;
+ bank->chip.parent = &pdev->dev;
+ bank->chip.request = ma35_gpio_core_to_request;
+ bank->chip.direction_input = ma35_gpio_core_direction_in;
+ bank->chip.direction_output = ma35_gpio_core_direction_out;
+ bank->chip.get = ma35_gpio_core_get;
+ bank->chip.set = ma35_gpio_core_set;
+ bank->chip.base = -1;
+ bank->chip.ngpio = bank->nr_pins;
+ bank->chip.can_sleep = false;
+
+ if (bank->irq > 0) {
+ struct gpio_irq_chip *girq;
+
+ girq = &bank->chip.irq;
+ gpio_irq_chip_set_chip(girq, &ma35_gpio_irqchip);
+ girq->parent_handler = ma35_irq_demux_intgroup;
+ girq->num_parents = 1;
+
+ girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->parents[0] = bank->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ }
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &bank->chip, bank);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register gpio_chip %s, error code: %d\n",
+ bank->chip.label, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ma35_get_bank_data(struct ma35_pin_bank *bank)
+{
+ bank->reg_base = fwnode_iomap(bank->fwnode, 0);
+ if (!bank->reg_base)
+ return -ENOMEM;
+
+ bank->irq = fwnode_irq_get(bank->fwnode, 0);
+
+ bank->nr_pins = MA35_GPIO_PORT_MAX;
+
+ bank->clk = of_clk_get(to_of_node(bank->fwnode), 0);
+ if (IS_ERR(bank->clk))
+ return PTR_ERR(bank->clk);
+
+ return clk_prepare_enable(bank->clk);
+}
+
+static int ma35_pinctrl_get_soc_data(struct ma35_pinctrl *pctl, struct platform_device *pdev)
+{
+ struct fwnode_handle *child;
+ struct ma35_pin_ctrl *ctrl;
+ struct ma35_pin_bank *bank;
+ int i, id = 0;
+
+ ctrl = pctl->ctrl;
+ ctrl->nr_banks = MA35_GPIO_BANK_MAX;
+
+ ctrl->pin_banks = devm_kcalloc(&pdev->dev, ctrl->nr_banks,
+ sizeof(*ctrl->pin_banks), GFP_KERNEL);
+ if (!ctrl->pin_banks)
+ return -ENOMEM;
+
+ for (i = 0; i < ctrl->nr_banks; i++) {
+ ctrl->pin_banks[i].bank_num = i;
+ ctrl->pin_banks[i].name = gpio_group_name[i];
+ }
+
+ for_each_gpiochip_node(&pdev->dev, child) {
+ bank = &ctrl->pin_banks[id];
+ bank->fwnode = child;
+ bank->regmap = pctl->regmap;
+ bank->dev = &pdev->dev;
+ if (!ma35_get_bank_data(bank))
+ bank->valid = true;
+ id++;
+ }
+ return 0;
+}
+
+static void ma35_gpio_cla_port(unsigned int gpio_num, unsigned int *group,
+ unsigned int *num)
+{
+ *group = gpio_num / MA35_GPIO_PORT_MAX;
+ *num = gpio_num % MA35_GPIO_PORT_MAX;
+}
+
+static int ma35_pinconf_set_pull(struct ma35_pinctrl *npctl, unsigned int pin,
+ int pull_up)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval, pull_sel = MA35_GP_PUSEL_DISABLE;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_REG_PUSEL);
+ regval &= ~MA35_GP_PUSEL_MASK(port);
+
+ switch (pull_up) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pull_sel = MA35_GP_PUSEL_PULL_UP;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pull_sel = MA35_GP_PUSEL_PULL_DOWN;
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ pull_sel = MA35_GP_PUSEL_DISABLE;
+ break;
+ }
+
+ regval |= field_prep(MA35_GP_PUSEL_MASK(port), pull_sel);
+ writel(regval, base + MA35_GP_REG_PUSEL);
+
+ return 0;
+}
+
+static int ma35_pinconf_get_output(struct ma35_pinctrl *npctl, unsigned int pin)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 mode;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ mode = ma35_gpio_get_mode(base + MA35_GP_REG_MODE, port);
+ if (mode == MA35_GP_MODE_OUTPUT)
+ return 1;
+
+ return 0;
+}
+
+static int ma35_pinconf_get_pull(struct ma35_pinctrl *npctl, unsigned int pin)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval, pull_sel;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_REG_PUSEL);
+
+ pull_sel = field_get(MA35_GP_PUSEL_MASK(port), regval);
+
+ switch (pull_sel) {
+ case MA35_GP_PUSEL_PULL_UP:
+ return PIN_CONFIG_BIAS_PULL_UP;
+
+ case MA35_GP_PUSEL_PULL_DOWN:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+
+ case MA35_GP_PUSEL_DISABLE:
+ return PIN_CONFIG_BIAS_DISABLE;
+ }
+
+ return PIN_CONFIG_BIAS_DISABLE;
+}
+
+static int ma35_pinconf_set_output(struct ma35_pinctrl *npctl, unsigned int pin, bool out)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ ma35_gpio_set_mode(base + MA35_GP_REG_MODE, port, MA35_GP_MODE_OUTPUT);
+
+ return 0;
+}
+
+static int ma35_pinconf_get_power_source(struct ma35_pinctrl *npctl, unsigned int pin)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_REG_SPW);
+
+ if (regval & BIT(port))
+ return MVOLT_3300;
+ else
+ return MVOLT_1800;
+}
+
+static int ma35_pinconf_set_power_source(struct ma35_pinctrl *npctl,
+ unsigned int pin, int arg)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval;
+
+ if ((arg != MVOLT_1800) && (arg != MVOLT_3300))
+ return -EINVAL;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_REG_SPW);
+
+ if (arg == MVOLT_1800)
+ regval &= ~BIT(port);
+ else
+ regval |= BIT(port);
+
+ writel(regval, base + MA35_GP_REG_SPW);
+
+ return 0;
+}
+
+static int ma35_pinconf_get_drive_strength(struct ma35_pinctrl *npctl, unsigned int pin,
+ u32 *strength)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval, ds_val;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_DS_REG(port));
+ ds_val = field_get(MA35_GP_DS_MASK(port), regval);
+
+ if (ma35_pinconf_get_power_source(npctl, pin) == MVOLT_1800)
+ *strength = ds_1800mv_tbl[ds_val];
+ else
+ *strength = ds_3300mv_tbl[ds_val];
+
+ return 0;
+}
+
+static int ma35_pinconf_set_drive_strength(struct ma35_pinctrl *npctl, unsigned int pin,
+ int strength)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ int i, ds_val = -1;
+ u32 regval;
+
+ if (ma35_pinconf_get_power_source(npctl, pin) == MVOLT_1800) {
+ for (i = 0; i < ARRAY_SIZE(ds_1800mv_tbl); i++) {
+ if (ds_1800mv_tbl[i] == strength) {
+ ds_val = i;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(ds_3300mv_tbl); i++) {
+ if (ds_3300mv_tbl[i] == strength) {
+ ds_val = i;
+ break;
+ }
+ }
+ }
+ if (ds_val == -1)
+ return -EINVAL;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_DS_REG(port));
+ regval &= ~MA35_GP_DS_MASK(port);
+ regval |= field_prep(MA35_GP_DS_MASK(port), ds_val);
+
+ writel(regval, base + MA35_GP_DS_REG(port));
+
+ return 0;
+}
+
+static int ma35_pinconf_get_schmitt_enable(struct ma35_pinctrl *npctl, unsigned int pin)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_REG_SMTEN);
+
+ return !!(regval & BIT(port));
+}
+
+static int ma35_pinconf_set_schmitt(struct ma35_pinctrl *npctl, unsigned int pin, int enable)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_REG_SMTEN);
+
+ if (enable)
+ regval |= BIT(port);
+ else
+ regval &= ~BIT(port);
+
+ writel(regval, base + MA35_GP_REG_SMTEN);
+
+ return 0;
+}
+
+static int ma35_pinconf_get_slew_rate(struct ma35_pinctrl *npctl, unsigned int pin)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_REG_SLEWCTL);
+
+ return field_get(MA35_GP_SLEWCTL_MASK(port), regval);
+}
+
+static int ma35_pinconf_set_slew_rate(struct ma35_pinctrl *npctl, unsigned int pin, int rate)
+{
+ unsigned int port, group_num;
+ void __iomem *base;
+ u32 regval;
+
+ ma35_gpio_cla_port(pin, &group_num, &port);
+ base = npctl->ctrl->pin_banks[group_num].reg_base;
+
+ regval = readl(base + MA35_GP_REG_SLEWCTL);
+ regval &= ~MA35_GP_SLEWCTL_MASK(port);
+ regval |= field_prep(MA35_GP_SLEWCTL_MASK(port), rate);
+
+ writel(regval, base + MA35_GP_REG_SLEWCTL);
+
+ return 0;
+}
+
+static int ma35_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *config)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 arg;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (ma35_pinconf_get_pull(npctl, pin) != param)
+ return -EINVAL;
+ arg = 1;
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = ma35_pinconf_get_drive_strength(npctl, pin, &arg);
+ if (ret)
+ return ret;
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ arg = ma35_pinconf_get_schmitt_enable(npctl, pin);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ arg = ma35_pinconf_get_slew_rate(npctl, pin);
+ break;
+
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ arg = ma35_pinconf_get_output(npctl, pin);
+ break;
+
+ case PIN_CONFIG_POWER_SOURCE:
+ arg = ma35_pinconf_get_power_source(npctl, pin);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int ma35_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct ma35_pinctrl *npctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ unsigned int arg = 0;
+ int i, ret = 0;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = ma35_pinconf_set_pull(npctl, pin, param);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = ma35_pinconf_set_drive_strength(npctl, pin, arg);
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ ret = ma35_pinconf_set_schmitt(npctl, pin, 1);
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT:
+ ret = ma35_pinconf_set_schmitt(npctl, pin, arg);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ ret = ma35_pinconf_set_slew_rate(npctl, pin, arg);
+ break;
+
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ ret = ma35_pinconf_set_output(npctl, pin, arg);
+ break;
+
+ case PIN_CONFIG_POWER_SOURCE:
+ ret = ma35_pinconf_set_power_source(npctl, pin, arg);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static const struct pinconf_ops ma35_pinconf_ops = {
+ .pin_config_get = ma35_pinconf_get,
+ .pin_config_set = ma35_pinconf_set,
+ .is_generic = true,
+};
+
+static int ma35_pinctrl_parse_groups(struct device_node *np, struct ma35_pin_group *grp,
+ struct ma35_pinctrl *npctl, u32 index)
+{
+ struct ma35_pin_setting *pin;
+ unsigned long *configs;
+ unsigned int nconfigs;
+ int i, j, count, ret;
+ u32 *elems;
+
+ grp->name = np->name;
+
+ ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &nconfigs);
+ if (ret)
+ return ret;
+
+ count = of_property_count_elems_of_size(np, "nuvoton,pins", sizeof(u32));
+ if (!count || count % 3)
+ return -EINVAL;
+
+ elems = devm_kmalloc_array(npctl->dev, count, sizeof(u32), GFP_KERNEL);
+ if (!elems)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, "nuvoton,pins", elems, count);
+ if (ret)
+ return -EINVAL;
+
+ grp->npins = count / 3;
+
+ grp->pins = devm_kcalloc(npctl->dev, grp->npins, sizeof(*grp->pins), GFP_KERNEL);
+ if (!grp->pins)
+ return -ENOMEM;
+
+ grp->settings = devm_kcalloc(npctl->dev, grp->npins, sizeof(*grp->settings), GFP_KERNEL);
+ if (!grp->settings)
+ return -ENOMEM;
+
+ pin = grp->settings;
+
+ for (i = 0, j = 0; i < count; i += 3, j++) {
+ pin->offset = elems[i] * MA35_MFP_REG_SZ_PER_BANK + MA35_MFP_REG_BASE;
+ pin->shift = (elems[i + 1] * MA35_MFP_BITS_PER_PORT) % 32;
+ pin->muxval = elems[i + 2];
+ pin->configs = configs;
+ pin->nconfigs = nconfigs;
+ grp->pins[j] = npctl->info->get_pin_num(pin->offset, pin->shift);
+ pin++;
+ }
+ return 0;
+}
+
+static int ma35_pinctrl_parse_functions(struct device_node *np, struct ma35_pinctrl *npctl,
+ u32 index)
+{
+ struct device_node *child;
+ struct ma35_pin_func *func;
+ struct ma35_pin_group *grp;
+ static u32 grp_index;
+ u32 ret, i = 0;
+
+ dev_dbg(npctl->dev, "parse function(%d): %s\n", index, np->name);
+
+ func = &npctl->functions[index];
+ func->name = np->name;
+ func->ngroups = of_get_child_count(np);
+
+ if (func->ngroups <= 0)
+ return 0;
+
+ func->groups = devm_kcalloc(npctl->dev, func->ngroups, sizeof(char *), GFP_KERNEL);
+ if (!func->groups)
+ return -ENOMEM;
+
+ for_each_child_of_node(np, child) {
+ func->groups[i] = child->name;
+ grp = &npctl->groups[grp_index++];
+ ret = ma35_pinctrl_parse_groups(child, grp, npctl, i++);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ma35_pinctrl_probe_dt(struct platform_device *pdev, struct ma35_pinctrl *npctl)
+{
+ struct fwnode_handle *child;
+ u32 idx = 0;
+ int ret;
+
+ device_for_each_child_node(&pdev->dev, child) {
+ if (fwnode_property_present(child, "gpio-controller"))
+ continue;
+ npctl->nfunctions++;
+ npctl->ngroups += of_get_child_count(to_of_node(child));
+ }
+
+ if (!npctl->nfunctions)
+ return -EINVAL;
+
+ npctl->functions = devm_kcalloc(&pdev->dev, npctl->nfunctions,
+ sizeof(*npctl->functions), GFP_KERNEL);
+ if (!npctl->functions)
+ return -ENOMEM;
+
+ npctl->groups = devm_kcalloc(&pdev->dev, npctl->ngroups,
+ sizeof(*npctl->groups), GFP_KERNEL);
+ if (!npctl->groups)
+ return -ENOMEM;
+
+ device_for_each_child_node(&pdev->dev, child) {
+ if (fwnode_property_present(child, "gpio-controller"))
+ continue;
+
+ ret = ma35_pinctrl_parse_functions(to_of_node(child), npctl, idx++);
+ if (ret) {
+ fwnode_handle_put(child);
+ dev_err(&pdev->dev, "failed to parse function\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+int ma35_pinctrl_probe(struct platform_device *pdev, const struct ma35_pinctrl_soc_info *info)
+{
+ struct pinctrl_desc *ma35_pinctrl_desc;
+ struct device *dev = &pdev->dev;
+ struct ma35_pinctrl *npctl;
+ int ret;
+
+ if (!info || !info->pins || !info->npins) {
+ dev_err(&pdev->dev, "wrong pinctrl info\n");
+ return -EINVAL;
+ }
+
+ npctl = devm_kzalloc(&pdev->dev, sizeof(*npctl), GFP_KERNEL);
+ if (!npctl)
+ return -ENOMEM;
+
+ ma35_pinctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*ma35_pinctrl_desc), GFP_KERNEL);
+ if (!ma35_pinctrl_desc)
+ return -ENOMEM;
+
+ npctl->ctrl = devm_kzalloc(&pdev->dev, sizeof(*npctl->ctrl), GFP_KERNEL);
+ if (!npctl->ctrl)
+ return -ENOMEM;
+
+ ma35_pinctrl_desc->name = dev_name(&pdev->dev);
+ ma35_pinctrl_desc->pins = info->pins;
+ ma35_pinctrl_desc->npins = info->npins;
+ ma35_pinctrl_desc->pctlops = &ma35_pctrl_ops;
+ ma35_pinctrl_desc->pmxops = &ma35_pmx_ops;
+ ma35_pinctrl_desc->confops = &ma35_pinconf_ops;
+ ma35_pinctrl_desc->owner = THIS_MODULE;
+
+ npctl->info = info;
+ npctl->dev = &pdev->dev;
+
+ npctl->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "nuvoton,sys");
+ if (IS_ERR(npctl->regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(npctl->regmap),
+ "No syscfg phandle specified\n");
+
+ ret = ma35_pinctrl_get_soc_data(npctl, pdev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "fail to get soc data\n");
+
+ platform_set_drvdata(pdev, npctl);
+
+ ret = ma35_pinctrl_probe_dt(pdev, npctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "fail to probe MA35 pinctrl dt\n");
+
+ ret = devm_pinctrl_register_and_init(dev, ma35_pinctrl_desc, npctl, &npctl->pctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "fail to register MA35 pinctrl\n");
+
+ ret = pinctrl_enable(npctl->pctl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "fail to enable MA35 pinctrl\n");
+
+ return ma35_gpiolib_register(pdev, npctl);
+}
+
+int ma35_pinctrl_suspend(struct device *dev)
+{
+ struct ma35_pinctrl *npctl = dev_get_drvdata(dev);
+
+ return pinctrl_force_sleep(npctl->pctl);
+}
+
+int ma35_pinctrl_resume(struct device *dev)
+{
+ struct ma35_pinctrl *npctl = dev_get_drvdata(dev);
+
+ return pinctrl_force_default(npctl->pctl);
+}
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.h b/drivers/pinctrl/nuvoton/pinctrl-ma35.h
new file mode 100644
index 000000000000..218084100541
--- /dev/null
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ *
+ * Author: Shan-Chun Hung <schung@nuvoton.com>
+ * * Jacky Huang <ychuang3@nuvoton.com>
+ */
+#ifndef __PINCTRL_MA35_H
+#define __PINCTRL_MA35_H
+
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+
+struct ma35_mux_desc {
+ const char *name;
+ u32 muxval;
+};
+
+struct ma35_pin_data {
+ u32 offset;
+ u32 shift;
+ struct ma35_mux_desc *muxes;
+};
+
+struct ma35_pinctrl_soc_info {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ int (*get_pin_num)(int offset, int shift);
+};
+
+#define MA35_PIN(num, n, o, s, ...) { \
+ .number = num, \
+ .name = #n, \
+ .drv_data = &(struct ma35_pin_data) { \
+ .offset = o, \
+ .shift = s, \
+ .muxes = (struct ma35_mux_desc[]) { \
+ __VA_ARGS__, { } }, \
+ }, \
+}
+
+#define MA35_MUX(_val, _name) { \
+ .name = _name, \
+ .muxval = _val, \
+}
+
+int ma35_pinctrl_probe(struct platform_device *pdev, const struct ma35_pinctrl_soc_info *info);
+int ma35_pinctrl_suspend(struct device *dev);
+int ma35_pinctrl_resume(struct device *dev);
+
+#endif /* __PINCTRL_MA35_H */
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35d1.c b/drivers/pinctrl/nuvoton/pinctrl-ma35d1.c
new file mode 100644
index 000000000000..8bb9a5a35954
--- /dev/null
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35d1.c
@@ -0,0 +1,1799 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ *
+ * Author: Shan-Chun Hung <schung@nuvoton.com>
+ * * Jacky Huang <ychuang3@nuvoton.com>
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-ma35.h"
+
+static const struct pinctrl_pin_desc ma35d1_pins[] = {
+ MA35_PIN(0, PA0, 0x80, 0x0,
+ MA35_MUX(0x0, "GPA0"),
+ MA35_MUX(0x2, "UART1_nCTS"),
+ MA35_MUX(0x3, "UART16_RXD"),
+ MA35_MUX(0x6, "NAND_DATA0"),
+ MA35_MUX(0x7, "EBI_AD0"),
+ MA35_MUX(0x9, "EBI_ADR0")),
+ MA35_PIN(1, PA1, 0x80, 0x4,
+ MA35_MUX(0x0, "GPA1"),
+ MA35_MUX(0x2, "UART1_nRTS"),
+ MA35_MUX(0x3, "UART16_TXD"),
+ MA35_MUX(0x6, "NAND_DATA1"),
+ MA35_MUX(0x7, "EBI_AD1"),
+ MA35_MUX(0x9, "EBI_ADR1")),
+ MA35_PIN(2, PA2, 0x80, 0x8,
+ MA35_MUX(0x0, "GPA2"),
+ MA35_MUX(0x2, "UART1_RXD"),
+ MA35_MUX(0x6, "NAND_DATA2"),
+ MA35_MUX(0x7, "EBI_AD2"),
+ MA35_MUX(0x9, "EBI_ADR2")),
+ MA35_PIN(3, PA3, 0x80, 0xc,
+ MA35_MUX(0x0, "GPA3"),
+ MA35_MUX(0x2, "UART1_TXD"),
+ MA35_MUX(0x6, "NAND_DATA3"),
+ MA35_MUX(0x7, "EBI_AD3"),
+ MA35_MUX(0x9, "EBI_ADR3")),
+ MA35_PIN(4, PA4, 0x80, 0x10,
+ MA35_MUX(0x0, "GPA4"),
+ MA35_MUX(0x2, "UART3_nCTS"),
+ MA35_MUX(0x3, "UART2_RXD"),
+ MA35_MUX(0x6, "NAND_DATA4"),
+ MA35_MUX(0x7, "EBI_AD4"),
+ MA35_MUX(0x9, "EBI_ADR4")),
+ MA35_PIN(5, PA5, 0x80, 0x14,
+ MA35_MUX(0x0, "GPA5"),
+ MA35_MUX(0x2, "UART3_nRTS"),
+ MA35_MUX(0x3, "UART2_TXD"),
+ MA35_MUX(0x6, "NAND_DATA5"),
+ MA35_MUX(0x7, "EBI_AD5"),
+ MA35_MUX(0x9, "EBI_ADR5")),
+ MA35_PIN(6, PA6, 0x80, 0x18,
+ MA35_MUX(0x0, "GPA6"),
+ MA35_MUX(0x2, "UART3_RXD"),
+ MA35_MUX(0x6, "NAND_DATA6"),
+ MA35_MUX(0x7, "EBI_AD6"),
+ MA35_MUX(0x9, "EBI_ADR6")),
+ MA35_PIN(7, PA7, 0x80, 0x1c,
+ MA35_MUX(0x0, "GPA7"),
+ MA35_MUX(0x2, "UART3_TXD"),
+ MA35_MUX(0x6, "NAND_DATA7"),
+ MA35_MUX(0x7, "EBI_AD7"),
+ MA35_MUX(0x9, "EBI_ADR7")),
+ MA35_PIN(8, PA8, 0x84, 0x0,
+ MA35_MUX(0x0, "GPA8"),
+ MA35_MUX(0x2, "UART5_nCTS"),
+ MA35_MUX(0x3, "UART4_RXD"),
+ MA35_MUX(0x6, "NAND_RDY0"),
+ MA35_MUX(0x7, "EBI_AD8"),
+ MA35_MUX(0x9, "EBI_ADR8")),
+ MA35_PIN(9, PA9, 0x84, 0x4,
+ MA35_MUX(0x0, "GPA9"),
+ MA35_MUX(0x2, "UART5_nRTS"),
+ MA35_MUX(0x3, "UART4_TXD"),
+ MA35_MUX(0x6, "NAND_nRE"),
+ MA35_MUX(0x7, "EBI_AD9"),
+ MA35_MUX(0x9, "EBI_ADR9")),
+ MA35_PIN(10, PA10, 0x84, 0x8,
+ MA35_MUX(0x0, "GPA10"),
+ MA35_MUX(0x2, "UART5_RXD"),
+ MA35_MUX(0x6, "NAND_nWE"),
+ MA35_MUX(0x7, "EBI_AD10"),
+ MA35_MUX(0x9, "EBI_ADR10")),
+ MA35_PIN(11, PA11, 0x84, 0xc,
+ MA35_MUX(0x0, "GPA11"),
+ MA35_MUX(0x2, "UART5_TXD"),
+ MA35_MUX(0x6, "NAND_CLE"),
+ MA35_MUX(0x7, "EBI_AD11"),
+ MA35_MUX(0x9, "EBI_ADR11")),
+ MA35_PIN(12, PA12, 0x84, 0x10,
+ MA35_MUX(0x0, "GPA12"),
+ MA35_MUX(0x2, "UART7_nCTS"),
+ MA35_MUX(0x3, "UART8_RXD"),
+ MA35_MUX(0x6, "NAND_ALE"),
+ MA35_MUX(0x7, "EBI_AD12"),
+ MA35_MUX(0x9, "EBI_ADR12")),
+ MA35_PIN(13, PA13, 0x84, 0x14,
+ MA35_MUX(0x0, "GPA13"),
+ MA35_MUX(0x2, "UART7_nRTS"),
+ MA35_MUX(0x3, "UART8_TXD"),
+ MA35_MUX(0x6, "NAND_nCS0"),
+ MA35_MUX(0x7, "EBI_AD13"),
+ MA35_MUX(0x9, "EBI_ADR13")),
+ MA35_PIN(14, PA14, 0x84, 0x18,
+ MA35_MUX(0x0, "GPA14"),
+ MA35_MUX(0x2, "UART7_RXD"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x6, "NAND_nWP"),
+ MA35_MUX(0x7, "EBI_AD14"),
+ MA35_MUX(0x9, "EBI_ADR14")),
+ MA35_PIN(15, PA15, 0x84, 0x1c,
+ MA35_MUX(0x0, "GPA15"),
+ MA35_MUX(0x1, "EPWM0_CH2"),
+ MA35_MUX(0x2, "UART9_nCTS"),
+ MA35_MUX(0x3, "UART6_RXD"),
+ MA35_MUX(0x4, "I2C4_SDA"),
+ MA35_MUX(0x5, "CAN2_RXD"),
+ MA35_MUX(0x7, "EBI_ALE"),
+ MA35_MUX(0x9, "QEI0_A"),
+ MA35_MUX(0xb, "TM1"),
+ MA35_MUX(0xe, "RGMII0_PPS"),
+ MA35_MUX(0xf, "RMII0_PPS")),
+ MA35_PIN(16, PB0, 0x88, 0x0,
+ MA35_MUX(0x0, "GPB0"),
+ MA35_MUX(0x8, "EADC0_CH0")),
+ MA35_PIN(17, PB1, 0x88, 0x4,
+ MA35_MUX(0x0, "GPB1"),
+ MA35_MUX(0x8, "EADC0_CH1")),
+ MA35_PIN(18, PB2, 0x88, 0x8,
+ MA35_MUX(0x0, "GPB2"),
+ MA35_MUX(0x8, "EADC0_CH2")),
+ MA35_PIN(19, PB3, 0x88, 0xc,
+ MA35_MUX(0x0, "GPB3"),
+ MA35_MUX(0x8, "EADC0_CH3")),
+ MA35_PIN(20, PB4, 0x88, 0x10,
+ MA35_MUX(0x0, "GPB4"),
+ MA35_MUX(0x8, "EADC0_CH4")),
+ MA35_PIN(21, PB5, 0x88, 0x14,
+ MA35_MUX(0x0, "GPB5"),
+ MA35_MUX(0x8, "EADC0_CH5")),
+ MA35_PIN(22, PB6, 0x88, 0x18,
+ MA35_MUX(0x0, "GPB6"),
+ MA35_MUX(0x8, "EADC0_CH6")),
+ MA35_PIN(23, PB7, 0x88, 0x1c,
+ MA35_MUX(0x0, "GPB7"),
+ MA35_MUX(0x8, "EADC0_CH7")),
+ MA35_PIN(24, PB8, 0x8c, 0x0,
+ MA35_MUX(0x0, "GPB8"),
+ MA35_MUX(0x1, "EPWM2_BRAKE0"),
+ MA35_MUX(0x2, "UART2_nCTS"),
+ MA35_MUX(0x3, "UART1_RXD"),
+ MA35_MUX(0x4, "I2C2_SDA"),
+ MA35_MUX(0x5, "SPI0_SS1"),
+ MA35_MUX(0x6, "SPI0_I2SMCLK"),
+ MA35_MUX(0x8, "ADC0_CH0"),
+ MA35_MUX(0x9, "EBI_nCS0"),
+ MA35_MUX(0xb, "TM4"),
+ MA35_MUX(0xe, "QEI2_INDEX"),
+ MA35_MUX(0xf, "KPI_ROW6")),
+ MA35_PIN(25, PB9, 0x8c, 0x4,
+ MA35_MUX(0x0, "GPB9"),
+ MA35_MUX(0x1, "EPWM2_CH4"),
+ MA35_MUX(0x2, "UART2_nRTS"),
+ MA35_MUX(0x3, "UART1_TXD"),
+ MA35_MUX(0x4, "I2C2_SCL"),
+ MA35_MUX(0x5, "SPI0_CLK"),
+ MA35_MUX(0x6, "I2S0_MCLK"),
+ MA35_MUX(0x7, "CCAP1_HSYNC"),
+ MA35_MUX(0x8, "ADC0_CH1"),
+ MA35_MUX(0x9, "EBI_ALE"),
+ MA35_MUX(0xa, "EBI_AD13"),
+ MA35_MUX(0xb, "TM0_EXT"),
+ MA35_MUX(0xc, "I2S1_MCLK"),
+ MA35_MUX(0xd, "SC0_nCD"),
+ MA35_MUX(0xe, "QEI2_A"),
+ MA35_MUX(0xf, "KPI_ROW7")),
+ MA35_PIN(26, PB10, 0x8c, 0x8,
+ MA35_MUX(0x0, "GPB10"),
+ MA35_MUX(0x1, "EPWM2_CH5"),
+ MA35_MUX(0x2, "UART2_RXD"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x5, "SPI0_MOSI"),
+ MA35_MUX(0x6, "EBI_MCLK"),
+ MA35_MUX(0x7, "CCAP1_VSYNC"),
+ MA35_MUX(0x8, "ADC0_CH2"),
+ MA35_MUX(0x9, "EBI_ADR15"),
+ MA35_MUX(0xa, "EBI_AD14"),
+ MA35_MUX(0xb, "TM5"),
+ MA35_MUX(0xc, "I2C1_SDA"),
+ MA35_MUX(0xd, "INT1"),
+ MA35_MUX(0xe, "QEI2_B")),
+ MA35_PIN(27, PB11, 0x8c, 0xc,
+ MA35_MUX(0x0, "GPB11"),
+ MA35_MUX(0x1, "EPWM2_BRAKE1"),
+ MA35_MUX(0x2, "UART2_TXD"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x5, "SPI0_MISO"),
+ MA35_MUX(0x6, "I2S1_MCLK"),
+ MA35_MUX(0x7, "CCAP1_SFIELD"),
+ MA35_MUX(0x8, "ADC0_CH3"),
+ MA35_MUX(0x9, "EBI_nCS2"),
+ MA35_MUX(0xa, "EBI_ALE"),
+ MA35_MUX(0xb, "TM5_EXT"),
+ MA35_MUX(0xc, "I2C1_SCL"),
+ MA35_MUX(0xd, "INT2"),
+ MA35_MUX(0xe, "QEI2_INDEX")),
+ MA35_PIN(28, PB12, 0x8c, 0x10,
+ MA35_MUX(0x0, "GPB12"),
+ MA35_MUX(0x1, "EPWM2_CH0"),
+ MA35_MUX(0x2, "UART4_nCTS"),
+ MA35_MUX(0x3, "UART3_RXD"),
+ MA35_MUX(0x4, "I2C3_SDA"),
+ MA35_MUX(0x5, "CAN2_RXD"),
+ MA35_MUX(0x6, "I2S1_LRCK"),
+ MA35_MUX(0x8, "ADC0_CH4"),
+ MA35_MUX(0x9, "EBI_ADR16"),
+ MA35_MUX(0xe, "ECAP2_IC0")),
+ MA35_PIN(29, PB13, 0x8c, 0x14,
+ MA35_MUX(0x0, "GPB13"),
+ MA35_MUX(0x1, "EPWM2_CH1"),
+ MA35_MUX(0x2, "UART4_nRTS"),
+ MA35_MUX(0x3, "UART3_TXD"),
+ MA35_MUX(0x4, "I2C3_SCL"),
+ MA35_MUX(0x5, "CAN2_TXD"),
+ MA35_MUX(0x6, "I2S1_BCLK"),
+ MA35_MUX(0x8, "ADC0_CH5"),
+ MA35_MUX(0x9, "EBI_ADR17"),
+ MA35_MUX(0xe, "ECAP2_IC1")),
+ MA35_PIN(30, PB14, 0x8c, 0x18,
+ MA35_MUX(0x0, "GPB14"),
+ MA35_MUX(0x1, "EPWM2_CH2"),
+ MA35_MUX(0x2, "UART4_RXD"),
+ MA35_MUX(0x3, "CAN1_RXD"),
+ MA35_MUX(0x5, "I2C4_SDA"),
+ MA35_MUX(0x6, "I2S1_DI"),
+ MA35_MUX(0x8, "ADC0_CH6"),
+ MA35_MUX(0x9, "EBI_ADR18"),
+ MA35_MUX(0xe, "ECAP2_IC2")),
+ MA35_PIN(31, PB15, 0x8c, 0x1c,
+ MA35_MUX(0x0, "GPB15"),
+ MA35_MUX(0x1, "EPWM2_CH3"),
+ MA35_MUX(0x2, "UART4_TXD"),
+ MA35_MUX(0x3, "CAN1_TXD"),
+ MA35_MUX(0x5, "I2C4_SCL"),
+ MA35_MUX(0x6, "I2S1_DO"),
+ MA35_MUX(0x8, "ADC0_CH7"),
+ MA35_MUX(0x9, "EBI_ADR19")),
+ MA35_PIN(32, PC0, 0x90, 0x0,
+ MA35_MUX(0x0, "GPC0"),
+ MA35_MUX(0x4, "I2C4_SDA"),
+ MA35_MUX(0x6, "SD0_CMD/eMMC0_CMD")),
+ MA35_PIN(33, PC1, 0x90, 0x4,
+ MA35_MUX(0x0, "GPC1"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x6, "SD0_CLK/eMMC0_CLK")),
+ MA35_PIN(34, PC2, 0x90, 0x8,
+ MA35_MUX(0x0, "GPC2"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x6, "SD0_DAT0/eMMC0_DAT0")),
+ MA35_PIN(35, PC3, 0x90, 0xc,
+ MA35_MUX(0x0, "GPC3"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x6, "SD0_DAT1/eMMC0_DAT1")),
+ MA35_PIN(36, PC4, 0x90, 0x10,
+ MA35_MUX(0x0, "GPC4"),
+ MA35_MUX(0x4, "I2C5_SDA"),
+ MA35_MUX(0x6, "SD0_DAT2/eMMC0_DAT2")),
+ MA35_PIN(37, PC5, 0x90, 0x14,
+ MA35_MUX(0x0, "GPC5"),
+ MA35_MUX(0x4, "I2C5_SCL"),
+ MA35_MUX(0x6, "SD0_DAT3/eMMC0_DAT3")),
+ MA35_PIN(38, PC6, 0x90, 0x18,
+ MA35_MUX(0x0, "GPC6"),
+ MA35_MUX(0x3, "CAN1_RXD"),
+ MA35_MUX(0x6, "SD0_nCD")),
+ MA35_PIN(39, PC7, 0x90, 0x1c,
+ MA35_MUX(0x0, "GPC7"),
+ MA35_MUX(0x3, "CAN1_TXD"),
+ MA35_MUX(0x6, "SD0_WP")),
+ MA35_PIN(40, PC12, 0x94, 0x10,
+ MA35_MUX(0x0, "GPC12"),
+ MA35_MUX(0x2, "UART12_nCTS"),
+ MA35_MUX(0x3, "UART11_RXD"),
+ MA35_MUX(0x6, "LCM_DATA16")),
+ MA35_PIN(41, PC13, 0x94, 0x14,
+ MA35_MUX(0x0, "GPC13"),
+ MA35_MUX(0x2, "UART12_nRTS"),
+ MA35_MUX(0x3, "UART11_TXD"),
+ MA35_MUX(0x6, "LCM_DATA17")),
+ MA35_PIN(42, PC14, 0x94, 0x18,
+ MA35_MUX(0x0, "GPC14"),
+ MA35_MUX(0x2, "UART12_RXD"),
+ MA35_MUX(0x6, "LCM_DATA18")),
+ MA35_PIN(43, PC15, 0x94, 0x1c,
+ MA35_MUX(0x0, "GPC15"),
+ MA35_MUX(0x2, "UART12_TXD"),
+ MA35_MUX(0x6, "LCM_DATA19"),
+ MA35_MUX(0x7, "LCM_MPU_TE"),
+ MA35_MUX(0x8, "LCM_MPU_VSYNC")),
+ MA35_PIN(44, PD0, 0x98, 0x0,
+ MA35_MUX(0x0, "GPD0"),
+ MA35_MUX(0x2, "UART3_nCTS"),
+ MA35_MUX(0x3, "UART4_RXD"),
+ MA35_MUX(0x5, "QSPI0_SS0")),
+ MA35_PIN(45, PD1, 0x98, 0x4,
+ MA35_MUX(0x0, "GPD1"),
+ MA35_MUX(0x2, "UART3_nRTS"),
+ MA35_MUX(0x3, "UART4_TXD"),
+ MA35_MUX(0x5, "QSPI0_CLK")),
+ MA35_PIN(46, PD2, 0x98, 0x8,
+ MA35_MUX(0x0, "GPD2"),
+ MA35_MUX(0x2, "UART3_RXD"),
+ MA35_MUX(0x5, "QSPI0_MOSI0")),
+ MA35_PIN(47, PD3, 0x98, 0xc,
+ MA35_MUX(0x0, "GPD3"),
+ MA35_MUX(0x2, "UART3_TXD"),
+ MA35_MUX(0x5, "QSPI0_MISO0")),
+ MA35_PIN(48, PD4, 0x98, 0x10,
+ MA35_MUX(0x0, "GPD4"),
+ MA35_MUX(0x2, "UART1_nCTS"),
+ MA35_MUX(0x3, "UART2_RXD"),
+ MA35_MUX(0x4, "I2C2_SDA"),
+ MA35_MUX(0x5, "QSPI0_MOSI1")),
+ MA35_PIN(49, PD5, 0x98, 0x14,
+ MA35_MUX(0x0, "GPD5"),
+ MA35_MUX(0x2, "UART1_nRTS"),
+ MA35_MUX(0x3, "UART2_TXD"),
+ MA35_MUX(0x4, "I2C2_SCL"),
+ MA35_MUX(0x5, "QSPI0_MISO1")),
+ MA35_PIN(50, PD6, 0x98, 0x18,
+ MA35_MUX(0x0, "GPD6"),
+ MA35_MUX(0x1, "EPWM0_SYNC_IN"),
+ MA35_MUX(0x2, "UART1_RXD"),
+ MA35_MUX(0x5, "QSPI1_MOSI1"),
+ MA35_MUX(0x6, "I2C0_SDA"),
+ MA35_MUX(0x7, "I2S0_MCLK"),
+ MA35_MUX(0x8, "EPWM0_CH0"),
+ MA35_MUX(0x9, "EBI_AD5"),
+ MA35_MUX(0xa, "SPI3_SS1"),
+ MA35_MUX(0xb, "TRACE_CLK")),
+ MA35_PIN(51, PD7, 0x98, 0x1c,
+ MA35_MUX(0x0, "GPD7"),
+ MA35_MUX(0x1, "EPWM0_SYNC_OUT"),
+ MA35_MUX(0x2, "UART1_TXD"),
+ MA35_MUX(0x5, "QSPI1_MISO1"),
+ MA35_MUX(0x6, "I2C0_SCL"),
+ MA35_MUX(0x7, "I2S1_MCLK"),
+ MA35_MUX(0x8, "EPWM0_CH1"),
+ MA35_MUX(0x9, "EBI_AD6"),
+ MA35_MUX(0xa, "SC1_nCD"),
+ MA35_MUX(0xb, "EADC0_ST")),
+ MA35_PIN(52, PD8, 0x9c, 0x0,
+ MA35_MUX(0x0, "GPD8"),
+ MA35_MUX(0x1, "EPWM0_BRAKE0"),
+ MA35_MUX(0x2, "UART16_nCTS"),
+ MA35_MUX(0x3, "UART15_RXD"),
+ MA35_MUX(0x5, "QSPI1_SS0"),
+ MA35_MUX(0x7, "I2S1_LRCK"),
+ MA35_MUX(0x8, "EPWM0_CH2"),
+ MA35_MUX(0x9, "EBI_AD7"),
+ MA35_MUX(0xa, "SC1_CLK"),
+ MA35_MUX(0xb, "TM0")),
+ MA35_PIN(53, PD9, 0x9c, 0x4,
+ MA35_MUX(0x0, "GPD9"),
+ MA35_MUX(0x1, "EPWM0_BRAKE1"),
+ MA35_MUX(0x2, "UART16_nRTS"),
+ MA35_MUX(0x3, "UART15_TXD"),
+ MA35_MUX(0x5, "QSPI1_CLK"),
+ MA35_MUX(0x7, "I2S1_BCLK"),
+ MA35_MUX(0x8, "EPWM0_CH3"),
+ MA35_MUX(0x9, "EBI_AD8"),
+ MA35_MUX(0xa, "SC1_DAT"),
+ MA35_MUX(0xb, "TM0_EXT")),
+ MA35_PIN(54, PD10, 0x9c, 0x8,
+ MA35_MUX(0x0, "GPD10"),
+ MA35_MUX(0x1, "EPWM1_BRAKE0"),
+ MA35_MUX(0x2, "UART16_RXD"),
+ MA35_MUX(0x5, "QSPI1_MOSI0"),
+ MA35_MUX(0x7, "I2S1_DI"),
+ MA35_MUX(0x8, "EPWM0_CH4"),
+ MA35_MUX(0x9, "EBI_AD9"),
+ MA35_MUX(0xa, "SC1_RST"),
+ MA35_MUX(0xb, "TM2")),
+ MA35_PIN(55, PD11, 0x9c, 0xc,
+ MA35_MUX(0x0, "GPD11"),
+ MA35_MUX(0x1, "EPWM1_BRAKE1"),
+ MA35_MUX(0x2, "UART16_TXD"),
+ MA35_MUX(0x5, "QSPI1_MISO0"),
+ MA35_MUX(0x7, "I2S1_DO"),
+ MA35_MUX(0x8, "EPWM0_CH5"),
+ MA35_MUX(0x9, "EBI_AD10"),
+ MA35_MUX(0xa, "SC1_PWR"),
+ MA35_MUX(0xb, "TM2_EXT")),
+ MA35_PIN(56, PD12, 0x9c, 0x10,
+ MA35_MUX(0x0, "GPD12"),
+ MA35_MUX(0x1, "EPWM0_BRAKE0"),
+ MA35_MUX(0x2, "UART11_TXD"),
+ MA35_MUX(0x3, "UART10_RXD"),
+ MA35_MUX(0x4, "I2C4_SDA"),
+ MA35_MUX(0x6, "TRACE_DATA0"),
+ MA35_MUX(0x7, "EBI_nCS1"),
+ MA35_MUX(0x8, "EBI_AD4"),
+ MA35_MUX(0x9, "QEI0_INDEX"),
+ MA35_MUX(0xb, "TM5"),
+ MA35_MUX(0xc, "I2S1_LRCK"),
+ MA35_MUX(0xd, "INT1")),
+ MA35_PIN(57, PD13, 0x9c, 0x14,
+ MA35_MUX(0x0, "GPD13"),
+ MA35_MUX(0x1, "EPWM0_BRAKE1"),
+ MA35_MUX(0x2, "UART11_RXD"),
+ MA35_MUX(0x3, "UART10_TXD"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x6, "TRACE_DATA1"),
+ MA35_MUX(0x7, "EBI_nCS2"),
+ MA35_MUX(0x8, "EBI_AD5"),
+ MA35_MUX(0x9, "ECAP0_IC0"),
+ MA35_MUX(0xb, "TM5_EXT"),
+ MA35_MUX(0xc, "I2S1_BCLK")),
+ MA35_PIN(58, PD14, 0x9c, 0x18,
+ MA35_MUX(0x0, "GPD14"),
+ MA35_MUX(0x1, "EPWM0_SYNC_IN"),
+ MA35_MUX(0x2, "UART11_nCTS"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x6, "TRACE_DATA2"),
+ MA35_MUX(0x7, "EBI_MCLK"),
+ MA35_MUX(0x8, "EBI_AD6"),
+ MA35_MUX(0x9, "ECAP0_IC1"),
+ MA35_MUX(0xb, "TM6"),
+ MA35_MUX(0xc, "I2S1_DI"),
+ MA35_MUX(0xd, "INT3")),
+ MA35_PIN(59, PD15, 0x9c, 0x1c,
+ MA35_MUX(0x0, "GPD15"),
+ MA35_MUX(0x1, "EPWM0_SYNC_OUT"),
+ MA35_MUX(0x2, "UART11_nRTS"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x6, "TRACE_DATA3"),
+ MA35_MUX(0x7, "EBI_ALE"),
+ MA35_MUX(0x8, "EBI_AD7"),
+ MA35_MUX(0x9, "ECAP0_IC2"),
+ MA35_MUX(0xb, "TM6_EXT"),
+ MA35_MUX(0xc, "I2S1_DO")),
+ MA35_PIN(60, PE0, 0xa0, 0x0,
+ MA35_MUX(0x0, "GPE0"),
+ MA35_MUX(0x2, "UART9_nCTS"),
+ MA35_MUX(0x3, "UART8_RXD"),
+ MA35_MUX(0x7, "CCAP1_DATA0"),
+ MA35_MUX(0x8, "RGMII0_MDC"),
+ MA35_MUX(0x9, "RMII0_MDC")),
+ MA35_PIN(61, PE1, 0xa0, 0x4,
+ MA35_MUX(0x0, "GPE1"),
+ MA35_MUX(0x2, "UART9_nRTS"),
+ MA35_MUX(0x3, "UART8_TXD"),
+ MA35_MUX(0x7, "CCAP1_DATA1"),
+ MA35_MUX(0x8, "RGMII0_MDIO"),
+ MA35_MUX(0x9, "RMII0_MDIO")),
+ MA35_PIN(62, PE2, 0xa0, 0x8,
+ MA35_MUX(0x0, "GPE2"),
+ MA35_MUX(0x2, "UART9_RXD"),
+ MA35_MUX(0x7, "CCAP1_DATA2"),
+ MA35_MUX(0x8, "RGMII0_TXCTL"),
+ MA35_MUX(0x9, "RMII0_TXEN")),
+ MA35_PIN(63, PE3, 0xa0, 0xc,
+ MA35_MUX(0x0, "GPE3"),
+ MA35_MUX(0x2, "UART9_TXD"),
+ MA35_MUX(0x7, "CCAP1_DATA3"),
+ MA35_MUX(0x8, "RGMII0_TXD0"),
+ MA35_MUX(0x9, "RMII0_TXD0")),
+ MA35_PIN(64, PE4, 0xa0, 0x10,
+ MA35_MUX(0x0, "GPE4"),
+ MA35_MUX(0x2, "UART4_nCTS"),
+ MA35_MUX(0x3, "UART3_RXD"),
+ MA35_MUX(0x7, "CCAP1_DATA4"),
+ MA35_MUX(0x8, "RGMII0_TXD1"),
+ MA35_MUX(0x9, "RMII0_TXD1")),
+ MA35_PIN(65, PE5, 0xa0, 0x14,
+ MA35_MUX(0x0, "GPE5"),
+ MA35_MUX(0x2, "UART4_nRTS"),
+ MA35_MUX(0x3, "UART3_TXD"),
+ MA35_MUX(0x7, "CCAP1_DATA5"),
+ MA35_MUX(0x8, "RGMII0_RXCLK"),
+ MA35_MUX(0x9, "RMII0_REFCLK")),
+ MA35_PIN(66, PE6, 0xa0, 0x18,
+ MA35_MUX(0x0, "GPE6"),
+ MA35_MUX(0x2, "UART4_RXD"),
+ MA35_MUX(0x7, "CCAP1_DATA6"),
+ MA35_MUX(0x8, "RGMII0_RXCTL"),
+ MA35_MUX(0x9, "RMII0_CRSDV")),
+ MA35_PIN(67, PE7, 0xa0, 0x1c,
+ MA35_MUX(0x0, "GPE7"),
+ MA35_MUX(0x2, "UART4_TXD"),
+ MA35_MUX(0x7, "CCAP1_DATA7"),
+ MA35_MUX(0x8, "RGMII0_RXD0"),
+ MA35_MUX(0x9, "RMII0_RXD0")),
+ MA35_PIN(68, PE8, 0xa4, 0x0,
+ MA35_MUX(0x0, "GPE8"),
+ MA35_MUX(0x2, "UART13_nCTS"),
+ MA35_MUX(0x3, "UART12_RXD"),
+ MA35_MUX(0x7, "CCAP1_SCLK"),
+ MA35_MUX(0x8, "RGMII0_RXD1"),
+ MA35_MUX(0x9, "RMII0_RXD1")),
+ MA35_PIN(69, PE9, 0xa4, 0x4,
+ MA35_MUX(0x0, "GPE9"),
+ MA35_MUX(0x2, "UART13_nRTS"),
+ MA35_MUX(0x3, "UART12_TXD"),
+ MA35_MUX(0x7, "CCAP1_PIXCLK"),
+ MA35_MUX(0x8, "RGMII0_RXD2"),
+ MA35_MUX(0x9, "RMII0_RXERR")),
+ MA35_PIN(70, PE10, 0xa4, 0x8,
+ MA35_MUX(0x0, "GPE10"),
+ MA35_MUX(0x2, "UART15_nCTS"),
+ MA35_MUX(0x3, "UART14_RXD"),
+ MA35_MUX(0x5, "SPI1_SS0"),
+ MA35_MUX(0x7, "CCAP1_HSYNC"),
+ MA35_MUX(0x8, "RGMII0_RXD3")),
+ MA35_PIN(71, PE11, 0xa4, 0xc,
+ MA35_MUX(0x0, "GPE11"),
+ MA35_MUX(0x2, "UART15_nRTS"),
+ MA35_MUX(0x3, "UART14_TXD"),
+ MA35_MUX(0x5, "SPI1_CLK"),
+ MA35_MUX(0x7, "CCAP1_VSYNC"),
+ MA35_MUX(0x8, "RGMII0_TXCLK")),
+ MA35_PIN(72, PE12, 0xa4, 0x10,
+ MA35_MUX(0x0, "GPE12"),
+ MA35_MUX(0x2, "UART15_RXD"),
+ MA35_MUX(0x5, "SPI1_MOSI"),
+ MA35_MUX(0x7, "CCAP1_DATA8"),
+ MA35_MUX(0x8, "RGMII0_TXD2")),
+ MA35_PIN(73, PE13, 0xa4, 0x14,
+ MA35_MUX(0x0, "GPE13"),
+ MA35_MUX(0x2, "UART15_TXD"),
+ MA35_MUX(0x5, "SPI1_MISO"),
+ MA35_MUX(0x7, "CCAP1_DATA9"),
+ MA35_MUX(0x8, "RGMII0_TXD3")),
+ MA35_PIN(74, PE14, 0xa4, 0x18,
+ MA35_MUX(0x0, "GPE14"),
+ MA35_MUX(0x1, "UART0_TXD")),
+ MA35_PIN(75, PE15, 0xa4, 0x1c,
+ MA35_MUX(0x0, "GPE15"),
+ MA35_MUX(0x1, "UART0_RXD")),
+ MA35_PIN(76, PF0, 0xa8, 0x0,
+ MA35_MUX(0x0, "GPF0"),
+ MA35_MUX(0x2, "UART2_nCTS"),
+ MA35_MUX(0x3, "UART1_RXD"),
+ MA35_MUX(0x6, "RGMII0_RXD3"),
+ MA35_MUX(0x8, "RGMII1_MDC"),
+ MA35_MUX(0x9, "RMII1_MDC"),
+ MA35_MUX(0xe, "KPI_COL0")),
+ MA35_PIN(77, PF1, 0xa8, 0x4,
+ MA35_MUX(0x0, "GPF1"),
+ MA35_MUX(0x2, "UART2_nRTS"),
+ MA35_MUX(0x3, "UART1_TXD"),
+ MA35_MUX(0x6, "RGMII0_TXCLK"),
+ MA35_MUX(0x8, "RGMII1_MDIO"),
+ MA35_MUX(0x9, "RMII1_MDIO"),
+ MA35_MUX(0xe, "KPI_COL1")),
+ MA35_PIN(78, PF2, 0xa8, 0x8,
+ MA35_MUX(0x0, "GPF2"),
+ MA35_MUX(0x2, "UART2_RXD"),
+ MA35_MUX(0x6, "RGMII0_TXD2"),
+ MA35_MUX(0x8, "RGMII1_TXCTL"),
+ MA35_MUX(0x9, "RMII1_TXEN"),
+ MA35_MUX(0xe, "KPI_COL2")),
+ MA35_PIN(79, PF3, 0xa8, 0xc,
+ MA35_MUX(0x0, "GPF3"),
+ MA35_MUX(0x2, "UART2_TXD"),
+ MA35_MUX(0x6, "RGMII0_TXD3"),
+ MA35_MUX(0x8, "RGMII1_TXD0"),
+ MA35_MUX(0x9, "RMII1_TXD0"),
+ MA35_MUX(0xe, "KPI_COL3")),
+ MA35_PIN(80, PF4, 0xa8, 0x10,
+ MA35_MUX(0x0, "GPF4"),
+ MA35_MUX(0x2, "UART11_nCTS"),
+ MA35_MUX(0x3, "UART10_RXD"),
+ MA35_MUX(0x4, "I2S0_LRCK"),
+ MA35_MUX(0x5, "SPI1_SS0"),
+ MA35_MUX(0x8, "RGMII1_TXD1"),
+ MA35_MUX(0x9, "RMII1_TXD1"),
+ MA35_MUX(0xd, "CAN2_RXD"),
+ MA35_MUX(0xe, "KPI_ROW0")),
+ MA35_PIN(81, PF5, 0xa8, 0x14,
+ MA35_MUX(0x0, "GPF5"),
+ MA35_MUX(0x2, "UART11_nRTS"),
+ MA35_MUX(0x3, "UART10_TXD"),
+ MA35_MUX(0x4, "I2S0_BCLK"),
+ MA35_MUX(0x5, "SPI1_CLK"),
+ MA35_MUX(0x8, "RGMII1_RXCLK"),
+ MA35_MUX(0x9, "RMII1_REFCLK"),
+ MA35_MUX(0xd, "CAN2_TXD"),
+ MA35_MUX(0xe, "KPI_ROW1")),
+ MA35_PIN(82, PF6, 0xa8, 0x18,
+ MA35_MUX(0x0, "GPF6"),
+ MA35_MUX(0x2, "UART11_RXD"),
+ MA35_MUX(0x4, "I2S0_DI"),
+ MA35_MUX(0x5, "SPI1_MOSI"),
+ MA35_MUX(0x8, "RGMII1_RXCTL"),
+ MA35_MUX(0x9, "RMII1_CRSDV"),
+ MA35_MUX(0xa, "I2C4_SDA"),
+ MA35_MUX(0xd, "SC0_CLK"),
+ MA35_MUX(0xe, "KPI_ROW2")),
+ MA35_PIN(83, PF7, 0xa8, 0x1c,
+ MA35_MUX(0x0, "GPF7"),
+ MA35_MUX(0x2, "UART11_TXD"),
+ MA35_MUX(0x4, "I2S0_DO"),
+ MA35_MUX(0x5, "SPI1_MISO"),
+ MA35_MUX(0x8, "RGMII1_RXD0"),
+ MA35_MUX(0x9, "RMII1_RXD0"),
+ MA35_MUX(0xa, "I2C4_SCL"),
+ MA35_MUX(0xd, "SC0_DAT"),
+ MA35_MUX(0xe, "KPI_ROW3")),
+ MA35_PIN(84, PF8, 0xac, 0x0,
+ MA35_MUX(0x0, "GPF8"),
+ MA35_MUX(0x2, "UART13_RXD"),
+ MA35_MUX(0x4, "I2C5_SDA"),
+ MA35_MUX(0x5, "SPI0_SS0"),
+ MA35_MUX(0x8, "RGMII1_RXD1"),
+ MA35_MUX(0x9, "RMII1_RXD1"),
+ MA35_MUX(0xd, "SC0_RST"),
+ MA35_MUX(0xe, "KPI_COL4")),
+ MA35_PIN(85, PF9, 0xac, 0x4,
+ MA35_MUX(0x0, "GPF9"),
+ MA35_MUX(0x2, "UART13_TXD"),
+ MA35_MUX(0x4, "I2C5_SCL"),
+ MA35_MUX(0x5, "SPI0_SS1"),
+ MA35_MUX(0x8, "RGMII1_RXD2"),
+ MA35_MUX(0x9, "RMII1_RXERR"),
+ MA35_MUX(0xd, "SC0_PWR"),
+ MA35_MUX(0xe, "KPI_COL5")),
+ MA35_PIN(86, PF10, 0xac, 0x8,
+ MA35_MUX(0x0, "GPF10"),
+ MA35_MUX(0x2, "UART13_nCTS"),
+ MA35_MUX(0x5, "I2S0_LRCK"),
+ MA35_MUX(0x6, "SPI1_SS0"),
+ MA35_MUX(0x8, "RGMII1_RXD3"),
+ MA35_MUX(0x9, "SC0_CLK"),
+ MA35_MUX(0xe, "KPI_COL6")),
+ MA35_PIN(87, PF11, 0xac, 0xc,
+ MA35_MUX(0x0, "GPF11"),
+ MA35_MUX(0x2, "UART13_nRTS"),
+ MA35_MUX(0x5, "I2S0_BCLK"),
+ MA35_MUX(0x6, "SPI1_CLK"),
+ MA35_MUX(0x8, "RGMII1_TXCLK"),
+ MA35_MUX(0x9, "SC0_DAT"),
+ MA35_MUX(0xe, "KPI_COL7")),
+ MA35_PIN(88, PF12, 0xac, 0x10,
+ MA35_MUX(0x0, "GPF12"),
+ MA35_MUX(0x5, "I2S0_DI"),
+ MA35_MUX(0x6, "SPI1_MOSI"),
+ MA35_MUX(0x8, "RGMII1_TXD2"),
+ MA35_MUX(0x9, "SC0_RST"),
+ MA35_MUX(0xe, "KPI_ROW4")),
+ MA35_PIN(89, PF13, 0xac, 0x14,
+ MA35_MUX(0x0, "GPF13"),
+ MA35_MUX(0x5, "I2S0_DO"),
+ MA35_MUX(0x6, "SPI1_MISO"),
+ MA35_MUX(0x8, "RGMII1_TXD3"),
+ MA35_MUX(0x9, "SC0_PWR"),
+ MA35_MUX(0xe, "KPI_ROW5")),
+ MA35_PIN(90, PF14, 0xac, 0x18,
+ MA35_MUX(0x0, "GPF14"),
+ MA35_MUX(0x1, "EPWM2_BRAKE0"),
+ MA35_MUX(0x2, "EADC0_ST"),
+ MA35_MUX(0x3, "RGMII1_PPS"),
+ MA35_MUX(0x4, "RMII1_PPS"),
+ MA35_MUX(0x5, "SPI0_I2SMCLK"),
+ MA35_MUX(0x6, "SPI1_I2SMCLK"),
+ MA35_MUX(0x7, "CCAP1_SFIELD"),
+ MA35_MUX(0x8, "RGMII0_PPS"),
+ MA35_MUX(0x9, "RMII0_PPS"),
+ MA35_MUX(0xb, "TM0"),
+ MA35_MUX(0xc, "INT0"),
+ MA35_MUX(0xd, "SPI1_SS1"),
+ MA35_MUX(0xe, "QEI2_INDEX"),
+ MA35_MUX(0xf, "I2S0_MCLK")),
+ MA35_PIN(91, PF15, 0xac, 0x1c,
+ MA35_MUX(0x0, "GPF15"),
+ MA35_MUX(0x1, "HSUSB0_VBUSVLD")),
+ MA35_PIN(92, PG0, 0xb0, 0x0,
+ MA35_MUX(0x0, "GPG0"),
+ MA35_MUX(0x1, "EPWM0_CH0"),
+ MA35_MUX(0x2, "UART7_TXD"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x5, "SPI0_SS0"),
+ MA35_MUX(0x6, "EADC0_ST"),
+ MA35_MUX(0x7, "EBI_AD15"),
+ MA35_MUX(0x9, "I2S1_MCLK"),
+ MA35_MUX(0xa, "QEI0_INDEX"),
+ MA35_MUX(0xb, "TM1"),
+ MA35_MUX(0xc, "CLKO"),
+ MA35_MUX(0xd, "INT0"),
+ MA35_MUX(0xf, "EBI_ADR15")),
+ MA35_PIN(93, PG1, 0xb0, 0x4,
+ MA35_MUX(0x0, "GPG1"),
+ MA35_MUX(0x1, "EPWM0_CH3"),
+ MA35_MUX(0x2, "UART9_nRTS"),
+ MA35_MUX(0x3, "UART6_TXD"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x5, "CAN2_TXD"),
+ MA35_MUX(0x7, "EBI_nCS0"),
+ MA35_MUX(0x9, "QEI0_B"),
+ MA35_MUX(0xb, "TM1_EXT"),
+ MA35_MUX(0xe, "RGMII1_PPS"),
+ MA35_MUX(0xf, "RMII1_PPS")),
+ MA35_PIN(94, PG2, 0xb0, 0x8,
+ MA35_MUX(0x0, "GPG2"),
+ MA35_MUX(0x1, "EPWM0_CH4"),
+ MA35_MUX(0x2, "UART9_RXD"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x5, "SPI0_SS1"),
+ MA35_MUX(0x7, "EBI_ADR16"),
+ MA35_MUX(0x8, "EBI_nCS2"),
+ MA35_MUX(0xa, "QEI0_A"),
+ MA35_MUX(0xb, "TM3"),
+ MA35_MUX(0xd, "INT1")),
+ MA35_PIN(95, PG3, 0xb0, 0xc,
+ MA35_MUX(0x0, "GPG3"),
+ MA35_MUX(0x1, "EPWM0_CH5"),
+ MA35_MUX(0x2, "UART9_TXD"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x5, "SPI0_I2SMCLK"),
+ MA35_MUX(0x7, "EBI_ADR17"),
+ MA35_MUX(0x8, "EBI_nCS1"),
+ MA35_MUX(0x9, "EBI_MCLK"),
+ MA35_MUX(0xa, "QEI0_B"),
+ MA35_MUX(0xb, "TM3_EXT"),
+ MA35_MUX(0xc, "I2S1_MCLK")),
+ MA35_PIN(96, PG4, 0xb0, 0x10,
+ MA35_MUX(0x0, "GPG4"),
+ MA35_MUX(0x1, "EPWM1_CH0"),
+ MA35_MUX(0x2, "UART5_nCTS"),
+ MA35_MUX(0x3, "UART6_RXD"),
+ MA35_MUX(0x5, "SPI3_SS0"),
+ MA35_MUX(0x6, "QEI1_INDEX"),
+ MA35_MUX(0x7, "EBI_ADR18"),
+ MA35_MUX(0x8, "EBI_nCS0"),
+ MA35_MUX(0x9, "I2S1_DO"),
+ MA35_MUX(0xa, "SC1_CLK"),
+ MA35_MUX(0xb, "TM4"),
+ MA35_MUX(0xd, "INT2"),
+ MA35_MUX(0xe, "ECAP1_IC2")),
+ MA35_PIN(97, PG5, 0xb0, 0x14,
+ MA35_MUX(0x0, "GPG5"),
+ MA35_MUX(0x1, "EPWM1_CH1"),
+ MA35_MUX(0x2, "UART5_nRTS"),
+ MA35_MUX(0x3, "UART6_TXD"),
+ MA35_MUX(0x5, "SPI3_CLK"),
+ MA35_MUX(0x6, "ECAP0_IC0"),
+ MA35_MUX(0x7, "EBI_ADR19"),
+ MA35_MUX(0x8, "EBI_ALE"),
+ MA35_MUX(0x9, "I2S1_DI"),
+ MA35_MUX(0xa, "SC1_DAT"),
+ MA35_MUX(0xb, "TM4_EXT")),
+ MA35_PIN(98, PG6, 0xb0, 0x18,
+ MA35_MUX(0x0, "GPG6"),
+ MA35_MUX(0x1, "EPWM1_CH2"),
+ MA35_MUX(0x2, "UART5_RXD"),
+ MA35_MUX(0x3, "CAN1_RXD"),
+ MA35_MUX(0x5, "SPI3_MOSI"),
+ MA35_MUX(0x6, "ECAP0_IC1"),
+ MA35_MUX(0x7, "EBI_nRD"),
+ MA35_MUX(0x9, "I2S1_BCLK"),
+ MA35_MUX(0xa, "SC1_RST"),
+ MA35_MUX(0xb, "TM7"),
+ MA35_MUX(0xd, "INT3")),
+ MA35_PIN(99, PG7, 0xb0, 0x1c,
+ MA35_MUX(0x0, "GPG7"),
+ MA35_MUX(0x1, "EPWM1_CH3"),
+ MA35_MUX(0x2, "UART5_TXD"),
+ MA35_MUX(0x3, "CAN1_TXD"),
+ MA35_MUX(0x5, "SPI3_MISO"),
+ MA35_MUX(0x6, "ECAP0_IC2"),
+ MA35_MUX(0x7, "EBI_nWR"),
+ MA35_MUX(0x9, "I2S1_LRCK"),
+ MA35_MUX(0xa, "SC1_PWR"),
+ MA35_MUX(0xb, "TM7_EXT")),
+ MA35_PIN(100, PG8, 0xb4, 0x0,
+ MA35_MUX(0x0, "GPG8"),
+ MA35_MUX(0x1, "EPWM1_CH4"),
+ MA35_MUX(0x2, "UART12_RXD"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x5, "SPI2_SS0"),
+ MA35_MUX(0x6, "LCM_VSYNC"),
+ MA35_MUX(0x7, "I2C3_SDA"),
+ MA35_MUX(0xc, "EBI_AD7"),
+ MA35_MUX(0xd, "EBI_nCS0")),
+ MA35_PIN(101, PG9, 0xb4, 0x4,
+ MA35_MUX(0x0, "GPG9"),
+ MA35_MUX(0x1, "EPWM1_CH5"),
+ MA35_MUX(0x2, "UART12_TXD"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x5, "SPI2_CLK"),
+ MA35_MUX(0x6, "LCM_HSYNC"),
+ MA35_MUX(0x7, "I2C3_SCL"),
+ MA35_MUX(0xc, "EBI_AD8"),
+ MA35_MUX(0xd, "EBI_nCS1")),
+ MA35_PIN(102, PG10, 0xb4, 0x8,
+ MA35_MUX(0x0, "GPG10"),
+ MA35_MUX(0x2, "UART12_nRTS"),
+ MA35_MUX(0x3, "UART13_TXD"),
+ MA35_MUX(0x5, "SPI2_MOSI"),
+ MA35_MUX(0x6, "LCM_CLK"),
+ MA35_MUX(0xc, "EBI_AD9"),
+ MA35_MUX(0xd, "EBI_nWRH")),
+ MA35_PIN(103, PG11, 0xb4, 0xc,
+ MA35_MUX(0x0, "GPG11"),
+ MA35_MUX(0x3, "JTAG_TDO"),
+ MA35_MUX(0x5, "I2S0_MCLK"),
+ MA35_MUX(0x6, "NAND_RDY1"),
+ MA35_MUX(0x7, "EBI_nWRH"),
+ MA35_MUX(0x8, "EBI_nCS1"),
+ MA35_MUX(0xa, "EBI_AD0")),
+ MA35_PIN(104, PG12, 0xb4, 0x10,
+ MA35_MUX(0x0, "GPG12"),
+ MA35_MUX(0x3, "JTAG_TCK/SW_CLK"),
+ MA35_MUX(0x5, "I2S0_LRCK"),
+ MA35_MUX(0x7, "EBI_nWRL"),
+ MA35_MUX(0xa, "EBI_AD1")),
+ MA35_PIN(105, PG13, 0xb4, 0x14,
+ MA35_MUX(0x0, "GPG13"),
+ MA35_MUX(0x3, "JTAG_TMS/SW_DIO"),
+ MA35_MUX(0x5, "I2S0_BCLK"),
+ MA35_MUX(0x7, "EBI_MCLK"),
+ MA35_MUX(0xa, "EBI_AD2")),
+ MA35_PIN(106, PG14, 0xb4, 0x18,
+ MA35_MUX(0x0, "GPG14"),
+ MA35_MUX(0x3, "JTAG_TDI"),
+ MA35_MUX(0x5, "I2S0_DI"),
+ MA35_MUX(0x6, "NAND_nCS1"),
+ MA35_MUX(0x7, "EBI_ALE"),
+ MA35_MUX(0xa, "EBI_AD3")),
+ MA35_PIN(107, PG15, 0xb4, 0x1c,
+ MA35_MUX(0x0, "GPG15"),
+ MA35_MUX(0x3, "JTAG_nTRST"),
+ MA35_MUX(0x5, "I2S0_DO"),
+ MA35_MUX(0x7, "EBI_nCS0"),
+ MA35_MUX(0xa, "EBI_AD4")),
+ MA35_PIN(108, PH0, 0xb8, 0x0,
+ MA35_MUX(0x0, "GPH0"),
+ MA35_MUX(0x2, "UART8_nCTS"),
+ MA35_MUX(0x3, "UART7_RXD"),
+ MA35_MUX(0x6, "LCM_DATA8")),
+ MA35_PIN(109, PH1, 0xb8, 0x4,
+ MA35_MUX(0x0, "GPH1"),
+ MA35_MUX(0x2, "UART8_nRTS"),
+ MA35_MUX(0x3, "UART7_TXD"),
+ MA35_MUX(0x6, "LCM_DATA9")),
+ MA35_PIN(110, PH2, 0xb8, 0x8,
+ MA35_MUX(0x0, "GPH2"),
+ MA35_MUX(0x2, "UART8_RXD"),
+ MA35_MUX(0x6, "LCM_DATA10")),
+ MA35_PIN(111, PH3, 0xb8, 0xc,
+ MA35_MUX(0x0, "GPH3"),
+ MA35_MUX(0x2, "UART8_TXD"),
+ MA35_MUX(0x6, "LCM_DATA11")),
+ MA35_PIN(112, PH4, 0xb8, 0x10,
+ MA35_MUX(0x0, "GPH4"),
+ MA35_MUX(0x2, "UART10_nCTS"),
+ MA35_MUX(0x3, "UART9_RXD"),
+ MA35_MUX(0x6, "LCM_DATA12")),
+ MA35_PIN(113, PH5, 0xb8, 0x14,
+ MA35_MUX(0x0, "GPH5"),
+ MA35_MUX(0x2, "UART10_nRTS"),
+ MA35_MUX(0x3, "UART9_TXD"),
+ MA35_MUX(0x6, "LCM_DATA13")),
+ MA35_PIN(114, PH6, 0xb8, 0x18,
+ MA35_MUX(0x0, "GPH6"),
+ MA35_MUX(0x2, "UART10_RXD"),
+ MA35_MUX(0x6, "LCM_DATA14")),
+ MA35_PIN(115, PH7, 0xb8, 0x1c,
+ MA35_MUX(0x0, "GPH7"),
+ MA35_MUX(0x2, "UART10_TXD"),
+ MA35_MUX(0x6, "LCM_DATA15")),
+ MA35_PIN(116, PH8, 0xbc, 0x0,
+ MA35_MUX(0x0, "GPH8"),
+ MA35_MUX(0x6, "TAMPER0")),
+ MA35_PIN(117, PH9, 0xbc, 0x4,
+ MA35_MUX(0x0, "GPH9"),
+ MA35_MUX(0x4, "CLK_32KOUT"),
+ MA35_MUX(0x6, "TAMPER1")),
+ MA35_PIN(118, PH12, 0xbc, 0x10,
+ MA35_MUX(0x0, "GPH12"),
+ MA35_MUX(0x2, "UART14_nCTS"),
+ MA35_MUX(0x3, "UART13_RXD"),
+ MA35_MUX(0x6, "LCM_DATA20")),
+ MA35_PIN(119, PH13, 0xbc, 0x14,
+ MA35_MUX(0x0, "GPH13"),
+ MA35_MUX(0x2, "UART14_nRTS"),
+ MA35_MUX(0x3, "UART13_TXD"),
+ MA35_MUX(0x6, "LCM_DATA21")),
+ MA35_PIN(120, PH14, 0xbc, 0x18,
+ MA35_MUX(0x0, "GPH14"),
+ MA35_MUX(0x2, "UART14_RXD"),
+ MA35_MUX(0x6, "LCM_DATA22")),
+ MA35_PIN(121, PH15, 0xbc, 0x1c,
+ MA35_MUX(0x0, "GPH15"),
+ MA35_MUX(0x2, "UART14_TXD"),
+ MA35_MUX(0x6, "LCM_DATA23")),
+ MA35_PIN(122, PI0, 0xc0, 0x0,
+ MA35_MUX(0x0, "GPI0"),
+ MA35_MUX(0x1, "EPWM0_CH0"),
+ MA35_MUX(0x2, "UART12_nCTS"),
+ MA35_MUX(0x3, "UART11_RXD"),
+ MA35_MUX(0x4, "I2C2_SDA"),
+ MA35_MUX(0x5, "SPI3_SS0"),
+ MA35_MUX(0x7, "SC0_nCD"),
+ MA35_MUX(0x8, "EBI_ADR0"),
+ MA35_MUX(0xb, "TM0"),
+ MA35_MUX(0xc, "ECAP1_IC0")),
+ MA35_PIN(123, PI1, 0xc0, 0x4,
+ MA35_MUX(0x0, "GPI1"),
+ MA35_MUX(0x1, "EPWM0_CH1"),
+ MA35_MUX(0x2, "UART12_nRTS"),
+ MA35_MUX(0x3, "UART11_TXD"),
+ MA35_MUX(0x4, "I2C2_SCL"),
+ MA35_MUX(0x5, "SPI3_CLK"),
+ MA35_MUX(0x7, "SC0_CLK"),
+ MA35_MUX(0x8, "EBI_ADR1"),
+ MA35_MUX(0xb, "TM0_EXT"),
+ MA35_MUX(0xc, "ECAP1_IC1")),
+ MA35_PIN(124, PI2, 0xc0, 0x8,
+ MA35_MUX(0x0, "GPI2"),
+ MA35_MUX(0x1, "EPWM0_CH2"),
+ MA35_MUX(0x2, "UART12_RXD"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x5, "SPI3_MOSI"),
+ MA35_MUX(0x7, "SC0_DAT"),
+ MA35_MUX(0x8, "EBI_ADR2"),
+ MA35_MUX(0xb, "TM1"),
+ MA35_MUX(0xc, "ECAP1_IC2")),
+ MA35_PIN(125, PI3, 0xc0, 0xc,
+ MA35_MUX(0x0, "GPI3"),
+ MA35_MUX(0x1, "EPWM0_CH3"),
+ MA35_MUX(0x2, "UART12_TXD"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x5, "SPI3_MISO"),
+ MA35_MUX(0x7, "SC0_RST"),
+ MA35_MUX(0x8, "EBI_ADR3"),
+ MA35_MUX(0xb, "TM1_EXT")),
+ MA35_PIN(126, PI4, 0xc0, 0x10,
+ MA35_MUX(0x0, "GPI4"),
+ MA35_MUX(0x1, "EPWM0_CH4"),
+ MA35_MUX(0x2, "UART14_nCTS"),
+ MA35_MUX(0x3, "UART13_RXD"),
+ MA35_MUX(0x4, "I2C3_SDA"),
+ MA35_MUX(0x5, "SPI2_SS1"),
+ MA35_MUX(0x6, "I2S1_LRCK"),
+ MA35_MUX(0x8, "EBI_ADR4"),
+ MA35_MUX(0xd, "INT0")),
+ MA35_PIN(127, PI5, 0xc0, 0x14,
+ MA35_MUX(0x0, "GPI5"),
+ MA35_MUX(0x1, "EPWM0_CH5"),
+ MA35_MUX(0x2, "UART14_nRTS"),
+ MA35_MUX(0x3, "UART13_TXD"),
+ MA35_MUX(0x4, "I2C3_SCL"),
+ MA35_MUX(0x6, "I2S1_BCLK"),
+ MA35_MUX(0x8, "EBI_ADR5"),
+ MA35_MUX(0xd, "INT1")),
+ MA35_PIN(128, PI6, 0xc0, 0x18,
+ MA35_MUX(0x0, "GPI6"),
+ MA35_MUX(0x1, "EPWM0_BRAKE0"),
+ MA35_MUX(0x2, "UART14_RXD"),
+ MA35_MUX(0x3, "CAN1_RXD"),
+ MA35_MUX(0x6, "I2S1_DI"),
+ MA35_MUX(0x8, "EBI_ADR6"),
+ MA35_MUX(0xc, "QEI1_INDEX"),
+ MA35_MUX(0xd, "INT2")),
+ MA35_PIN(129, PI7, 0xc0, 0x1c,
+ MA35_MUX(0x0, "GPI7"),
+ MA35_MUX(0x1, "EPWM0_BRAKE1"),
+ MA35_MUX(0x2, "UART14_TXD"),
+ MA35_MUX(0x3, "CAN1_TXD"),
+ MA35_MUX(0x6, "I2S1_DO"),
+ MA35_MUX(0x8, "EBI_ADR7"),
+ MA35_MUX(0xc, "ECAP0_IC0"),
+ MA35_MUX(0xd, "INT3")),
+ MA35_PIN(130, PI8, 0xc4, 0x0,
+ MA35_MUX(0x0, "GPI8"),
+ MA35_MUX(0x2, "UART4_nCTS"),
+ MA35_MUX(0x3, "UART3_RXD"),
+ MA35_MUX(0x6, "LCM_DATA0"),
+ MA35_MUX(0xc, "EBI_AD11")),
+ MA35_PIN(131, PI9, 0xc4, 0x4,
+ MA35_MUX(0x0, "GPI9"),
+ MA35_MUX(0x2, "UART4_nRTS"),
+ MA35_MUX(0x3, "UART3_TXD"),
+ MA35_MUX(0x6, "LCM_DATA1"),
+ MA35_MUX(0xc, "EBI_AD12")),
+ MA35_PIN(132, PI10, 0xc4, 0x8,
+ MA35_MUX(0x0, "GPI10"),
+ MA35_MUX(0x2, "UART4_RXD"),
+ MA35_MUX(0x6, "LCM_DATA2"),
+ MA35_MUX(0xc, "EBI_AD13")),
+ MA35_PIN(133, PI11, 0xC4, 0xc,
+ MA35_MUX(0x0, "GPI11"),
+ MA35_MUX(0x2, "UART4_TXD"),
+ MA35_MUX(0x6, "LCM_DATA3"),
+ MA35_MUX(0xc, "EBI_AD14")),
+ MA35_PIN(134, PI12, 0xc4, 0x10,
+ MA35_MUX(0x0, "GPI12"),
+ MA35_MUX(0x2, "UART6_nCTS"),
+ MA35_MUX(0x3, "UART5_RXD"),
+ MA35_MUX(0x6, "LCM_DATA4")),
+ MA35_PIN(135, PI13, 0xc4, 0x14,
+ MA35_MUX(0x0, "GPI13"),
+ MA35_MUX(0x2, "UART6_nRTS"),
+ MA35_MUX(0x3, "UART5_TXD"),
+ MA35_MUX(0x6, "LCM_DATA5")),
+ MA35_PIN(136, PI14, 0xc4, 0x18,
+ MA35_MUX(0x0, "GPI14"),
+ MA35_MUX(0x2, "UART6_RXD"),
+ MA35_MUX(0x6, "LCM_DATA6")),
+ MA35_PIN(137, PI15, 0xc4, 0x1c,
+ MA35_MUX(0x0, "GPI15"),
+ MA35_MUX(0x2, "UART6_TXD"),
+ MA35_MUX(0x6, "LCM_DATA7")),
+ MA35_PIN(138, PJ0, 0xc8, 0x0,
+ MA35_MUX(0x0, "GPJ0"),
+ MA35_MUX(0x1, "EPWM1_BRAKE0"),
+ MA35_MUX(0x2, "UART8_nCTS"),
+ MA35_MUX(0x3, "UART7_RXD"),
+ MA35_MUX(0x4, "I2C2_SDA"),
+ MA35_MUX(0x5, "SPI2_SS0"),
+ MA35_MUX(0x6, "eMMC1_DAT4"),
+ MA35_MUX(0x7, "I2S0_LRCK"),
+ MA35_MUX(0x8, "SC0_CLK"),
+ MA35_MUX(0x9, "EBI_AD11"),
+ MA35_MUX(0xa, "EBI_ADR16"),
+ MA35_MUX(0xb, "EBI_nCS0"),
+ MA35_MUX(0xc, "EBI_AD7")),
+ MA35_PIN(139, PJ1, 0xc8, 0x4,
+ MA35_MUX(0x0, "GPJ1"),
+ MA35_MUX(0x1, "EPWM1_BRAKE1"),
+ MA35_MUX(0x2, "UART8_nRTS"),
+ MA35_MUX(0x3, "UART7_TXD"),
+ MA35_MUX(0x4, "I2C2_SCL"),
+ MA35_MUX(0x5, "SPI2_CLK"),
+ MA35_MUX(0x6, "eMMC1_DAT5"),
+ MA35_MUX(0x7, "I2S0_BCLK"),
+ MA35_MUX(0x8, "SC0_DAT"),
+ MA35_MUX(0x9, "EBI_AD12"),
+ MA35_MUX(0xa, "EBI_ADR17"),
+ MA35_MUX(0xb, "EBI_nCS1"),
+ MA35_MUX(0xc, "EBI_AD8")),
+ MA35_PIN(140, PJ2, 0xc8, 0x8,
+ MA35_MUX(0x0, "GPJ2"),
+ MA35_MUX(0x1, "EPWM1_CH4"),
+ MA35_MUX(0x2, "UART8_RXD"),
+ MA35_MUX(0x3, "CAN1_RXD"),
+ MA35_MUX(0x5, "SPI2_MOSI"),
+ MA35_MUX(0x6, "eMMC1_DAT6"),
+ MA35_MUX(0x7, "I2S0_DI"),
+ MA35_MUX(0x8, "SC0_RST"),
+ MA35_MUX(0x9, "EBI_AD13"),
+ MA35_MUX(0xa, "EBI_ADR18"),
+ MA35_MUX(0xb, "EBI_nWRH"),
+ MA35_MUX(0xc, "EBI_AD9")),
+ MA35_PIN(141, PJ3, 0xc8, 0xc,
+ MA35_MUX(0x0, "GPJ3"),
+ MA35_MUX(0x1, "EPWM1_CH5"),
+ MA35_MUX(0x2, "UART8_TXD"),
+ MA35_MUX(0x3, "CAN1_TXD"),
+ MA35_MUX(0x5, "SPI2_MISO"),
+ MA35_MUX(0x6, "eMMC1_DAT7"),
+ MA35_MUX(0x7, "I2S0_DO"),
+ MA35_MUX(0x8, "SC0_PWR"),
+ MA35_MUX(0x9, "EBI_AD14"),
+ MA35_MUX(0xa, "EBI_ADR19"),
+ MA35_MUX(0xb, "EBI_nWRL"),
+ MA35_MUX(0xc, "EBI_AD10")),
+ MA35_PIN(142, PJ4, 0xc8, 0x10,
+ MA35_MUX(0x0, "GPJ4"),
+ MA35_MUX(0x4, "I2C3_SDA"),
+ MA35_MUX(0x6, "SD1_WP")),
+ MA35_PIN(143, PJ5, 0xc8, 0x14,
+ MA35_MUX(0x0, "GPJ5"),
+ MA35_MUX(0x4, "I2C3_SCL"),
+ MA35_MUX(0x6, "SD1_nCD")),
+ MA35_PIN(144, PJ6, 0xc8, 0x18,
+ MA35_MUX(0x0, "GPJ6"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x6, "SD1_CMD/eMMC1_CMD")),
+ MA35_PIN(145, PJ7, 0xc8, 0x1c,
+ MA35_MUX(0x0, "GPJ7"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x6, "SD1_CLK/eMMC1_CLK")),
+ MA35_PIN(146, PJ8, 0xcc, 0x0,
+ MA35_MUX(0x0, "GPJ8"),
+ MA35_MUX(0x4, "I2C4_SDA"),
+ MA35_MUX(0x6, "SD1_DAT0/eMMC1_DAT0")),
+ MA35_PIN(147, PJ9, 0xcc, 0x4,
+ MA35_MUX(0x0, "GPJ9"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x6, "SD1_DAT1/eMMC1_DAT1")),
+ MA35_PIN(148, PJ10, 0xcc, 0x8,
+ MA35_MUX(0x0, "GPJ10"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x6, "SD1_DAT2/eMMC1_DAT2")),
+ MA35_PIN(149, PJ11, 0xcc, 0xc,
+ MA35_MUX(0x0, "GPJ11"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x6, "SD1_DAT3/eMMC1_DAT3")),
+ MA35_PIN(150, PJ12, 0xcc, 0x10,
+ MA35_MUX(0x0, "GPJ12"),
+ MA35_MUX(0x1, "EPWM1_CH2"),
+ MA35_MUX(0x2, "UART2_nCTS"),
+ MA35_MUX(0x3, "UART1_RXD"),
+ MA35_MUX(0x4, "I2C5_SDA"),
+ MA35_MUX(0x5, "SPI3_SS0"),
+ MA35_MUX(0x7, "SC1_CLK"),
+ MA35_MUX(0x8, "EBI_ADR12"),
+ MA35_MUX(0xb, "TM2"),
+ MA35_MUX(0xc, "QEI0_INDEX")),
+ MA35_PIN(151, PJ13, 0xcc, 0x14,
+ MA35_MUX(0x0, "GPJ13"),
+ MA35_MUX(0x1, "EPWM1_CH3"),
+ MA35_MUX(0x2, "UART2_nRTS"),
+ MA35_MUX(0x3, "UART1_TXD"),
+ MA35_MUX(0x4, "I2C5_SCL"),
+ MA35_MUX(0x5, "SPI3_MOSI"),
+ MA35_MUX(0x7, "SC1_DAT"),
+ MA35_MUX(0x8, "EBI_ADR13"),
+ MA35_MUX(0xb, "TM2_EXT")),
+ MA35_PIN(152, PJ14, 0xcc, 0x18,
+ MA35_MUX(0x0, "GPJ14"),
+ MA35_MUX(0x1, "EPWM1_CH4"),
+ MA35_MUX(0x2, "UART2_RXD"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x5, "SPI3_MISO"),
+ MA35_MUX(0x7, "SC1_RST"),
+ MA35_MUX(0x8, "EBI_ADR14"),
+ MA35_MUX(0xb, "TM3")),
+ MA35_PIN(153, PJ15, 0xcc, 0x1c,
+ MA35_MUX(0x0, "GPJ15"),
+ MA35_MUX(0x1, "EPWM1_CH5"),
+ MA35_MUX(0x2, "UART2_TXD"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x5, "SPI3_CLK"),
+ MA35_MUX(0x6, "EADC0_ST"),
+ MA35_MUX(0x7, "SC1_PWR"),
+ MA35_MUX(0x8, "EBI_ADR15"),
+ MA35_MUX(0xb, "TM3_EXT"),
+ MA35_MUX(0xd, "INT1")),
+ MA35_PIN(154, PK0, 0xd0, 0x0,
+ MA35_MUX(0x0, "GPK0"),
+ MA35_MUX(0x1, "EPWM0_SYNC_IN"),
+ MA35_MUX(0x2, "UART16_nCTS"),
+ MA35_MUX(0x3, "UART15_RXD"),
+ MA35_MUX(0x4, "I2C4_SDA"),
+ MA35_MUX(0x6, "I2S1_MCLK"),
+ MA35_MUX(0x8, "EBI_ADR8"),
+ MA35_MUX(0xb, "TM7"),
+ MA35_MUX(0xc, "ECAP0_IC1")),
+ MA35_PIN(155, PK1, 0xd0, 0x4,
+ MA35_MUX(0x0, "GPK1"),
+ MA35_MUX(0x1, "EPWM0_SYNC_OUT"),
+ MA35_MUX(0x2, "UART16_nRTS"),
+ MA35_MUX(0x3, "UART15_TXD"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x6, "EADC0_ST"),
+ MA35_MUX(0x8, "EBI_ADR9"),
+ MA35_MUX(0xb, "TM7_EXT"),
+ MA35_MUX(0xc, "ECAP0_IC2")),
+ MA35_PIN(156, PK2, 0xd0, 0x8,
+ MA35_MUX(0x0, "GPK2"),
+ MA35_MUX(0x1, "EPWM1_CH0"),
+ MA35_MUX(0x2, "UART16_RXD"),
+ MA35_MUX(0x3, "CAN2_RXD"),
+ MA35_MUX(0x5, "SPI3_I2SMCLK"),
+ MA35_MUX(0x7, "SC0_PWR"),
+ MA35_MUX(0x8, "EBI_ADR10"),
+ MA35_MUX(0xc, "QEI0_A")),
+ MA35_PIN(157, PK3, 0xd0, 0xc,
+ MA35_MUX(0x0, "GPK3"),
+ MA35_MUX(0x1, "EPWM1_CH1"),
+ MA35_MUX(0x2, "UART16_TXD"),
+ MA35_MUX(0x3, "CAN2_TXD"),
+ MA35_MUX(0x5, "SPI3_SS1"),
+ MA35_MUX(0x7, "SC1_nCD"),
+ MA35_MUX(0x8, "EBI_ADR11"),
+ MA35_MUX(0xc, "QEI0_B")),
+ MA35_PIN(158, PK4, 0xd0, 0x10,
+ MA35_MUX(0x0, "GPK4"),
+ MA35_MUX(0x2, "UART12_nCTS"),
+ MA35_MUX(0x3, "UART13_RXD"),
+ MA35_MUX(0x5, "SPI2_MISO"),
+ MA35_MUX(0x6, "LCM_DEN"),
+ MA35_MUX(0xc, "EBI_AD10"),
+ MA35_MUX(0xd, "EBI_nWRL")),
+ MA35_PIN(159, PK5, 0xd0, 0x14,
+ MA35_MUX(0x0, "GPK5"),
+ MA35_MUX(0x1, "EPWM1_CH1"),
+ MA35_MUX(0x2, "UART12_nRTS"),
+ MA35_MUX(0x3, "UART13_TXD"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x5, "SPI2_CLK"),
+ MA35_MUX(0x7, "I2S1_DI"),
+ MA35_MUX(0x8, "SC0_DAT"),
+ MA35_MUX(0x9, "EADC0_ST"),
+ MA35_MUX(0xb, "TM8_EXT"),
+ MA35_MUX(0xd, "INT1")),
+ MA35_PIN(160, PK6, 0xd0, 0x18,
+ MA35_MUX(0x0, "GPK6"),
+ MA35_MUX(0x1, "EPWM1_CH2"),
+ MA35_MUX(0x2, "UART12_RXD"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x5, "SPI2_MOSI"),
+ MA35_MUX(0x7, "I2S1_BCLK"),
+ MA35_MUX(0x8, "SC0_RST"),
+ MA35_MUX(0xb, "TM6"),
+ MA35_MUX(0xd, "INT2")),
+ MA35_PIN(161, PK7, 0xd0, 0x1c,
+ MA35_MUX(0x0, "GPK7"),
+ MA35_MUX(0x1, "EPWM1_CH3"),
+ MA35_MUX(0x2, "UART12_TXD"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x5, "SPI2_MISO"),
+ MA35_MUX(0x7, "I2S1_LRCK"),
+ MA35_MUX(0x8, "SC0_PWR"),
+ MA35_MUX(0x9, "CLKO"),
+ MA35_MUX(0xb, "TM6_EXT"),
+ MA35_MUX(0xd, "INT3")),
+ MA35_PIN(162, PK8, 0xd4, 0x0,
+ MA35_MUX(0x0, "GPK8"),
+ MA35_MUX(0x1, "EPWM1_CH0"),
+ MA35_MUX(0x4, "I2C3_SDA"),
+ MA35_MUX(0x5, "SPI3_CLK"),
+ MA35_MUX(0x7, "EADC0_ST"),
+ MA35_MUX(0x8, "EBI_AD15"),
+ MA35_MUX(0x9, "EBI_MCLK"),
+ MA35_MUX(0xa, "EBI_ADR15"),
+ MA35_MUX(0xb, "TM8"),
+ MA35_MUX(0xc, "QEI1_INDEX")),
+ MA35_PIN(163, PK9, 0xd4, 0x4,
+ MA35_MUX(0x0, "GPK9"),
+ MA35_MUX(0x4, "I2C3_SCL"),
+ MA35_MUX(0x6, "CCAP0_SCLK"),
+ MA35_MUX(0x8, "EBI_AD0"),
+ MA35_MUX(0xa, "EBI_ADR0")),
+ MA35_PIN(164, PK10, 0xd4, 0x8,
+ MA35_MUX(0x0, "GPK10"),
+ MA35_MUX(0x3, "CAN1_RXD"),
+ MA35_MUX(0x6, "CCAP0_PIXCLK"),
+ MA35_MUX(0x8, "EBI_AD1"),
+ MA35_MUX(0xa, "EBI_ADR1")),
+ MA35_PIN(165, PK11, 0xd4, 0xc,
+ MA35_MUX(0x0, "GPK11"),
+ MA35_MUX(0x3, "CAN1_TXD"),
+ MA35_MUX(0x6, "CCAP0_HSYNC"),
+ MA35_MUX(0x8, "EBI_AD2"),
+ MA35_MUX(0xa, "EBI_ADR2")),
+ MA35_PIN(166, PK12, 0xd4, 0x10,
+ MA35_MUX(0x0, "GPK12"),
+ MA35_MUX(0x1, "EPWM2_CH0"),
+ MA35_MUX(0x2, "UART1_nCTS"),
+ MA35_MUX(0x3, "UART13_RXD"),
+ MA35_MUX(0x4, "I2C4_SDA"),
+ MA35_MUX(0x5, "I2S0_LRCK"),
+ MA35_MUX(0x6, "SPI1_SS0"),
+ MA35_MUX(0x8, "SC0_CLK"),
+ MA35_MUX(0xb, "TM10"),
+ MA35_MUX(0xd, "INT2")),
+ MA35_PIN(167, PK13, 0xd4, 0x14,
+ MA35_MUX(0x0, "GPK13"),
+ MA35_MUX(0x1, "EPWM2_CH1"),
+ MA35_MUX(0x2, "UART1_nRTS"),
+ MA35_MUX(0x3, "UART13_TXD"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x5, "I2S0_BCLK"),
+ MA35_MUX(0x6, "SPI1_CLK"),
+ MA35_MUX(0x8, "SC0_DAT"),
+ MA35_MUX(0xb, "TM10_EXT")),
+ MA35_PIN(168, PK14, 0xd4, 0x18,
+ MA35_MUX(0x0, "GPK14"),
+ MA35_MUX(0x1, "EPWM2_CH2"),
+ MA35_MUX(0x2, "UART1_RXD"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x5, "I2S0_DI"),
+ MA35_MUX(0x6, "SPI1_MOSI"),
+ MA35_MUX(0x8, "SC0_RST"),
+ MA35_MUX(0xa, "I2C5_SDA"),
+ MA35_MUX(0xb, "TM11"),
+ MA35_MUX(0xd, "INT3")),
+ MA35_PIN(169, PK15, 0xd4, 0x1c,
+ MA35_MUX(0x0, "GPK15"),
+ MA35_MUX(0x1, "EPWM2_CH3"),
+ MA35_MUX(0x2, "UART1_TXD"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x5, "I2S0_DO"),
+ MA35_MUX(0x6, "SPI1_MISO"),
+ MA35_MUX(0x8, "SC0_PWR"),
+ MA35_MUX(0xa, "I2C5_SCL"),
+ MA35_MUX(0xb, "TM11_EXT")),
+ MA35_PIN(170, PL0, 0xd8, 0x0,
+ MA35_MUX(0x0, "GPL0"),
+ MA35_MUX(0x1, "EPWM1_CH0"),
+ MA35_MUX(0x2, "UART11_nCTS"),
+ MA35_MUX(0x3, "UART10_RXD"),
+ MA35_MUX(0x4, "I2C3_SDA"),
+ MA35_MUX(0x5, "SPI2_MOSI"),
+ MA35_MUX(0x6, "QSPI1_MOSI1"),
+ MA35_MUX(0x7, "I2S0_LRCK"),
+ MA35_MUX(0x8, "EBI_AD11"),
+ MA35_MUX(0x9, "SC1_CLK"),
+ MA35_MUX(0xb, "TM5"),
+ MA35_MUX(0xc, "QEI1_A")),
+ MA35_PIN(171, PL1, 0xd8, 0x4,
+ MA35_MUX(0x0, "GPL1"),
+ MA35_MUX(0x1, "EPWM1_CH1"),
+ MA35_MUX(0x2, "UART11_nRTS"),
+ MA35_MUX(0x3, "UART10_TXD"),
+ MA35_MUX(0x4, "I2C3_SCL"),
+ MA35_MUX(0x5, "SPI2_MISO"),
+ MA35_MUX(0x6, "QSPI1_MISO1"),
+ MA35_MUX(0x7, "I2S0_BCLK"),
+ MA35_MUX(0x8, "EBI_AD12"),
+ MA35_MUX(0x9, "SC1_DAT"),
+ MA35_MUX(0xb, "TM5_EXT"),
+ MA35_MUX(0xc, "QEI1_B")),
+ MA35_PIN(172, PL2, 0xd8, 0x8,
+ MA35_MUX(0x0, "GPL2"),
+ MA35_MUX(0x1, "EPWM1_CH2"),
+ MA35_MUX(0x2, "UART11_RXD"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x5, "SPI2_SS0"),
+ MA35_MUX(0x6, "QSPI1_SS1"),
+ MA35_MUX(0x7, "I2S0_DI"),
+ MA35_MUX(0x8, "EBI_AD13"),
+ MA35_MUX(0x9, "SC1_RST"),
+ MA35_MUX(0xb, "TM7"),
+ MA35_MUX(0xc, "QEI1_INDEX")),
+ MA35_PIN(173, PL3, 0xd8, 0xc,
+ MA35_MUX(0x0, "GPL3"),
+ MA35_MUX(0x1, "EPWM1_CH3"),
+ MA35_MUX(0x2, "UART11_TXD"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x5, "SPI2_CLK"),
+ MA35_MUX(0x6, "QSPI1_CLK"),
+ MA35_MUX(0x7, "I2S0_DO"),
+ MA35_MUX(0x8, "EBI_AD14"),
+ MA35_MUX(0x9, "SC1_PWR"),
+ MA35_MUX(0xb, "TM7_EXT"),
+ MA35_MUX(0xc, "ECAP0_IC0")),
+ MA35_PIN(174, PL4, 0xd8, 0x10,
+ MA35_MUX(0x0, "GPL4"),
+ MA35_MUX(0x1, "EPWM1_CH4"),
+ MA35_MUX(0x2, "UART2_nCTS"),
+ MA35_MUX(0x3, "UART1_RXD"),
+ MA35_MUX(0x4, "I2C4_SDA"),
+ MA35_MUX(0x5, "SPI3_MOSI"),
+ MA35_MUX(0x6, "QSPI1_MOSI0"),
+ MA35_MUX(0x7, "I2S0_MCLK"),
+ MA35_MUX(0x8, "EBI_nRD"),
+ MA35_MUX(0x9, "SC1_nCD"),
+ MA35_MUX(0xb, "TM9"),
+ MA35_MUX(0xc, "ECAP0_IC1")),
+ MA35_PIN(175, PL5, 0xd8, 0x14,
+ MA35_MUX(0x0, "GPL5"),
+ MA35_MUX(0x1, "EPWM1_CH5"),
+ MA35_MUX(0x2, "UART2_nRTS"),
+ MA35_MUX(0x3, "UART1_TXD"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x5, "SPI3_MISO"),
+ MA35_MUX(0x6, "QSPI1_MISO0"),
+ MA35_MUX(0x7, "I2S1_MCLK"),
+ MA35_MUX(0x8, "EBI_nWR"),
+ MA35_MUX(0x9, "SC0_nCD"),
+ MA35_MUX(0xb, "TM9_EXT"),
+ MA35_MUX(0xc, "ECAP0_IC2")),
+ MA35_PIN(176, PL6, 0xd8, 0x18,
+ MA35_MUX(0x0, "GPL6"),
+ MA35_MUX(0x1, "EPWM0_CH0"),
+ MA35_MUX(0x2, "UART2_RXD"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x6, "QSPI1_MOSI1"),
+ MA35_MUX(0x7, "TRACE_CLK"),
+ MA35_MUX(0x8, "EBI_AD5"),
+ MA35_MUX(0xb, "TM3"),
+ MA35_MUX(0xc, "ECAP1_IC0"),
+ MA35_MUX(0xd, "INT0")),
+ MA35_PIN(177, PL7, 0xd8, 0x1c,
+ MA35_MUX(0x0, "GPL7"),
+ MA35_MUX(0x1, "EPWM0_CH1"),
+ MA35_MUX(0x2, "UART2_TXD"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x6, "QSPI1_MISO1"),
+ MA35_MUX(0x8, "EBI_AD6"),
+ MA35_MUX(0xb, "TM3_EXT"),
+ MA35_MUX(0xc, "ECAP1_IC1"),
+ MA35_MUX(0xd, "INT1")),
+ MA35_PIN(178, PL8, 0xdc, 0x0,
+ MA35_MUX(0x0, "GPL8"),
+ MA35_MUX(0x1, "EPWM0_CH2"),
+ MA35_MUX(0x2, "UART14_nCTS"),
+ MA35_MUX(0x3, "UART13_RXD"),
+ MA35_MUX(0x4, "I2C5_SDA"),
+ MA35_MUX(0x5, "SPI3_SS0"),
+ MA35_MUX(0x6, "EPWM0_CH4"),
+ MA35_MUX(0x7, "I2S1_LRCK"),
+ MA35_MUX(0x8, "EBI_AD7"),
+ MA35_MUX(0x9, "SC0_CLK"),
+ MA35_MUX(0xb, "TM4"),
+ MA35_MUX(0xc, "ECAP1_IC2"),
+ MA35_MUX(0xd, "INT2")),
+ MA35_PIN(179, PL9, 0xdc, 0x4,
+ MA35_MUX(0x0, "GPL9"),
+ MA35_MUX(0x1, "EPWM0_CH3"),
+ MA35_MUX(0x2, "UART14_nRTS"),
+ MA35_MUX(0x3, "UART13_TXD"),
+ MA35_MUX(0x4, "I2C5_SCL"),
+ MA35_MUX(0x5, "SPI3_CLK"),
+ MA35_MUX(0x6, "EPWM1_CH4"),
+ MA35_MUX(0x7, "I2S1_BCLK"),
+ MA35_MUX(0x8, "EBI_AD8"),
+ MA35_MUX(0x9, "SC0_DAT"),
+ MA35_MUX(0xb, "TM4_EXT"),
+ MA35_MUX(0xc, "QEI0_A"),
+ MA35_MUX(0xd, "INT3")),
+ MA35_PIN(180, PL10, 0xdc, 0x8,
+ MA35_MUX(0x0, "GPL10"),
+ MA35_MUX(0x1, "EPWM0_CH4"),
+ MA35_MUX(0x2, "UART14_RXD"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x5, "SPI3_MOSI"),
+ MA35_MUX(0x6, "EPWM0_CH5"),
+ MA35_MUX(0x7, "I2S1_DI"),
+ MA35_MUX(0x8, "EBI_AD9"),
+ MA35_MUX(0x9, "SC0_RST"),
+ MA35_MUX(0xb, "EBI_nWRH"),
+ MA35_MUX(0xc, "QEI0_B")),
+ MA35_PIN(181, PL11, 0xdc, 0xc,
+ MA35_MUX(0x0, "GPL11"),
+ MA35_MUX(0x1, "EPWM0_CH5"),
+ MA35_MUX(0x2, "UART14_TXD"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x5, "SPI3_MISO"),
+ MA35_MUX(0x6, "EPWM1_CH5"),
+ MA35_MUX(0x7, "I2S1_DO"),
+ MA35_MUX(0x8, "EBI_AD10"),
+ MA35_MUX(0x9, "SC0_PWR"),
+ MA35_MUX(0xb, "EBI_nWRL"),
+ MA35_MUX(0xc, "QEI0_INDEX")),
+ MA35_PIN(182, PL12, 0xdc, 0x10,
+ MA35_MUX(0x0, "GPL12"),
+ MA35_MUX(0x1, "EPWM0_SYNC_IN"),
+ MA35_MUX(0x2, "UART7_nCTS"),
+ MA35_MUX(0x3, "ECAP1_IC0"),
+ MA35_MUX(0x4, "UART14_RXD"),
+ MA35_MUX(0x5, "SPI0_SS0"),
+ MA35_MUX(0x6, "I2S1_LRCK"),
+ MA35_MUX(0x7, "SC1_CLK"),
+ MA35_MUX(0x8, "EBI_AD0"),
+ MA35_MUX(0x9, "HSUSBH_PWREN"),
+ MA35_MUX(0xa, "I2C2_SDA"),
+ MA35_MUX(0xb, "TM0"),
+ MA35_MUX(0xc, "EPWM0_CH2"),
+ MA35_MUX(0xd, "EBI_AD11"),
+ MA35_MUX(0xe, "RGMII0_PPS"),
+ MA35_MUX(0xf, "RMII0_PPS")),
+ MA35_PIN(183, PL13, 0xdc, 0x14,
+ MA35_MUX(0x0, "GPL13"),
+ MA35_MUX(0x1, "EPWM0_SYNC_OUT"),
+ MA35_MUX(0x2, "UART7_nRTS"),
+ MA35_MUX(0x3, "ECAP1_IC1"),
+ MA35_MUX(0x4, "UART14_TXD"),
+ MA35_MUX(0x5, "SPI0_CLK"),
+ MA35_MUX(0x6, "I2S1_BCLK"),
+ MA35_MUX(0x7, "SC1_DAT"),
+ MA35_MUX(0x8, "EBI_AD1"),
+ MA35_MUX(0x9, "HSUSBH_OVC"),
+ MA35_MUX(0xa, "I2C2_SCL"),
+ MA35_MUX(0xb, "TM0_EXT"),
+ MA35_MUX(0xc, "EPWM0_CH3"),
+ MA35_MUX(0xd, "EBI_AD12"),
+ MA35_MUX(0xe, "RGMII1_PPS"),
+ MA35_MUX(0xf, "RMII1_PPS")),
+ MA35_PIN(184, PL14, 0xdc, 0x18,
+ MA35_MUX(0x0, "GPL14"),
+ MA35_MUX(0x1, "EPWM0_CH2"),
+ MA35_MUX(0x2, "UART7_RXD"),
+ MA35_MUX(0x4, "CAN1_RXD"),
+ MA35_MUX(0x5, "SPI0_MOSI"),
+ MA35_MUX(0x6, "I2S1_DI"),
+ MA35_MUX(0x7, "SC1_RST"),
+ MA35_MUX(0x8, "EBI_AD2"),
+ MA35_MUX(0xb, "TM2"),
+ MA35_MUX(0xc, "INT0"),
+ MA35_MUX(0xd, "EBI_AD13")),
+ MA35_PIN(185, PL15, 0xdc, 0x1c,
+ MA35_MUX(0x0, "GPL15"),
+ MA35_MUX(0x1, "EPWM0_CH1"),
+ MA35_MUX(0x2, "UART7_TXD"),
+ MA35_MUX(0x3, "TRACE_CLK"),
+ MA35_MUX(0x4, "CAN1_TXD"),
+ MA35_MUX(0x5, "SPI0_MISO"),
+ MA35_MUX(0x6, "I2S1_DO"),
+ MA35_MUX(0x7, "SC1_PWR"),
+ MA35_MUX(0x8, "EBI_AD3"),
+ MA35_MUX(0xb, "TM2_EXT"),
+ MA35_MUX(0xc, "INT2"),
+ MA35_MUX(0xd, "EBI_AD14")),
+ MA35_PIN(186, PM0, 0xe0, 0x0,
+ MA35_MUX(0x0, "GPM0"),
+ MA35_MUX(0x4, "I2C4_SDA"),
+ MA35_MUX(0x6, "CCAP0_VSYNC"),
+ MA35_MUX(0x8, "EBI_AD3"),
+ MA35_MUX(0xa, "EBI_ADR3")),
+ MA35_PIN(187, PM1, 0xe0, 0x4,
+ MA35_MUX(0x0, "GPM1"),
+ MA35_MUX(0x4, "I2C4_SCL"),
+ MA35_MUX(0x5, "SPI3_I2SMCLK"),
+ MA35_MUX(0x6, "CCAP0_SFIELD"),
+ MA35_MUX(0x8, "EBI_AD4"),
+ MA35_MUX(0xa, "EBI_ADR4")),
+ MA35_PIN(188, PM2, 0xe0, 0x8,
+ MA35_MUX(0x0, "GPM2"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x6, "CCAP0_DATA0"),
+ MA35_MUX(0x8, "EBI_AD5"),
+ MA35_MUX(0xa, "EBI_ADR5")),
+ MA35_PIN(189, PM3, 0xe0, 0xc,
+ MA35_MUX(0x0, "GPM3"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x6, "CCAP0_DATA1"),
+ MA35_MUX(0x8, "EBI_AD6"),
+ MA35_MUX(0xa, "EBI_ADR6")),
+ MA35_PIN(190, PM4, 0xe0, 0x10,
+ MA35_MUX(0x0, "GPM4"),
+ MA35_MUX(0x4, "I2C5_SDA"),
+ MA35_MUX(0x6, "CCAP0_DATA2"),
+ MA35_MUX(0x8, "EBI_AD7"),
+ MA35_MUX(0xa, "EBI_ADR7")),
+ MA35_PIN(191, PM5, 0xe0, 0x14,
+ MA35_MUX(0x0, "GPM5"),
+ MA35_MUX(0x4, "I2C5_SCL"),
+ MA35_MUX(0x6, "CCAP0_DATA3"),
+ MA35_MUX(0x8, "EBI_AD8"),
+ MA35_MUX(0xa, "EBI_ADR8")),
+ MA35_PIN(192, PM6, 0xe0, 0x18,
+ MA35_MUX(0x0, "GPM6"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x6, "CCAP0_DATA4"),
+ MA35_MUX(0x8, "EBI_AD9"),
+ MA35_MUX(0xa, "EBI_ADR9")),
+ MA35_PIN(193, PM7, 0xe0, 0x1c,
+ MA35_MUX(0x0, "GPM7"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x6, "CCAP0_DATA5"),
+ MA35_MUX(0x8, "EBI_AD10"),
+ MA35_MUX(0xa, "EBI_ADR10")),
+ MA35_PIN(194, PM8, 0xe4, 0x0,
+ MA35_MUX(0x0, "GPM8"),
+ MA35_MUX(0x4, "I2C0_SDA"),
+ MA35_MUX(0x6, "CCAP0_DATA6"),
+ MA35_MUX(0x8, "EBI_AD11"),
+ MA35_MUX(0xa, "EBI_ADR11")),
+ MA35_PIN(195, PM9, 0xe4, 0x4,
+ MA35_MUX(0x0, "GPM9"),
+ MA35_MUX(0x4, "I2C0_SCL"),
+ MA35_MUX(0x6, "CCAP0_DATA7"),
+ MA35_MUX(0x8, "EBI_AD12"),
+ MA35_MUX(0xa, "EBI_ADR12")),
+ MA35_PIN(196, PM10, 0xe4, 0x8,
+ MA35_MUX(0x0, "GPM10"),
+ MA35_MUX(0x1, "EPWM1_CH2"),
+ MA35_MUX(0x3, "CAN2_RXD"),
+ MA35_MUX(0x5, "SPI3_SS0"),
+ MA35_MUX(0x6, "CCAP0_DATA8"),
+ MA35_MUX(0x7, "SPI2_I2SMCLK"),
+ MA35_MUX(0x8, "EBI_AD13"),
+ MA35_MUX(0xa, "EBI_ADR13")),
+ MA35_PIN(197, PM11, 0xe4, 0xc,
+ MA35_MUX(0x0, "GPM11"),
+ MA35_MUX(0x1, "EPWM1_CH3"),
+ MA35_MUX(0x3, "CAN2_TXD"),
+ MA35_MUX(0x5, "SPI3_SS1"),
+ MA35_MUX(0x6, "CCAP0_DATA9"),
+ MA35_MUX(0x7, "SPI2_SS1"),
+ MA35_MUX(0x8, "EBI_AD14"),
+ MA35_MUX(0xa, "EBI_ADR14")),
+ MA35_PIN(198, PM12, 0xe4, 0x10,
+ MA35_MUX(0x0, "GPM12"),
+ MA35_MUX(0x1, "EPWM1_CH4"),
+ MA35_MUX(0x2, "UART10_nCTS"),
+ MA35_MUX(0x3, "TRACE_DATA0"),
+ MA35_MUX(0x4, "UART11_RXD"),
+ MA35_MUX(0x5, "I2C2_SDA"),
+ MA35_MUX(0x7, "SC1_nCD"),
+ MA35_MUX(0x8, "EBI_AD8"),
+ MA35_MUX(0x9, "I2S1_MCLK"),
+ MA35_MUX(0xb, "TM8")),
+ MA35_PIN(199, PM13, 0xe4, 0x14,
+ MA35_MUX(0x0, "GPM13"),
+ MA35_MUX(0x1, "EPWM1_CH5"),
+ MA35_MUX(0x2, "UART10_nRTS"),
+ MA35_MUX(0x3, "TRACE_DATA1"),
+ MA35_MUX(0x4, "UART11_TXD"),
+ MA35_MUX(0x5, "I2C2_SCL"),
+ MA35_MUX(0x8, "EBI_AD9"),
+ MA35_MUX(0x9, "ECAP1_IC0"),
+ MA35_MUX(0xb, "TM8_EXT")),
+ MA35_PIN(200, PM14, 0xe4, 0x18,
+ MA35_MUX(0x0, "GPM14"),
+ MA35_MUX(0x1, "EPWM1_BRAKE0"),
+ MA35_MUX(0x2, "UART10_RXD"),
+ MA35_MUX(0x3, "TRACE_DATA2"),
+ MA35_MUX(0x4, "CAN2_RXD"),
+ MA35_MUX(0x6, "I2C3_SDA"),
+ MA35_MUX(0x8, "EBI_AD10"),
+ MA35_MUX(0x9, "ECAP1_IC1"),
+ MA35_MUX(0xb, "TM10"),
+ MA35_MUX(0xd, "INT1")),
+ MA35_PIN(201, PM15, 0xe4, 0x1c,
+ MA35_MUX(0x0, "GPM15"),
+ MA35_MUX(0x1, "EPWM1_BRAKE1"),
+ MA35_MUX(0x2, "UART10_TXD"),
+ MA35_MUX(0x3, "TRACE_DATA3"),
+ MA35_MUX(0x4, "CAN2_TXD"),
+ MA35_MUX(0x6, "I2C3_SCL"),
+ MA35_MUX(0x8, "EBI_AD11"),
+ MA35_MUX(0x9, "ECAP1_IC2"),
+ MA35_MUX(0xb, "TM10_EXT"),
+ MA35_MUX(0xd, "INT2")),
+ MA35_PIN(202, PN0, 0xe8, 0x0,
+ MA35_MUX(0x0, "GPN0"),
+ MA35_MUX(0x4, "I2C2_SDA"),
+ MA35_MUX(0x6, "CCAP1_DATA0")),
+ MA35_PIN(203, PN1, 0xe8, 0x4,
+ MA35_MUX(0x0, "GPN1"),
+ MA35_MUX(0x4, "I2C2_SCL"),
+ MA35_MUX(0x6, "CCAP1_DATA1")),
+ MA35_PIN(204, PN2, 0xe8, 0x8,
+ MA35_MUX(0x0, "GPN2"),
+ MA35_MUX(0x3, "CAN0_RXD"),
+ MA35_MUX(0x6, "CCAP1_DATA2")),
+ MA35_PIN(205, PN3, 0xe8, 0xc,
+ MA35_MUX(0x0, "GPN3"),
+ MA35_MUX(0x3, "CAN0_TXD"),
+ MA35_MUX(0x6, "CCAP1_DATA3")),
+ MA35_PIN(206, PN4, 0xe8, 0x10,
+ MA35_MUX(0x0, "GPN4"),
+ MA35_MUX(0x4, "I2C1_SDA"),
+ MA35_MUX(0x6, "CCAP1_DATA4")),
+ MA35_PIN(207, PN5, 0xe8, 0x14,
+ MA35_MUX(0x0, "GPN5"),
+ MA35_MUX(0x4, "I2C1_SCL"),
+ MA35_MUX(0x6, "CCAP1_DATA5")),
+ MA35_PIN(208, PN6, 0xe8, 0x18,
+ MA35_MUX(0x0, "GPN6"),
+ MA35_MUX(0x3, "CAN1_RXD"),
+ MA35_MUX(0x6, "CCAP1_DATA6")),
+ MA35_PIN(209, PN7, 0xe8, 0x1c,
+ MA35_MUX(0x0, "GPN7"),
+ MA35_MUX(0x3, "CAN1_TXD"),
+ MA35_MUX(0x6, "CCAP1_DATA7")),
+ MA35_PIN(210, PN10, 0xec, 0x8,
+ MA35_MUX(0x0, "GPN10"),
+ MA35_MUX(0x3, "CAN2_RXD"),
+ MA35_MUX(0x6, "CCAP1_SCLK")),
+ MA35_PIN(211, PN11, 0xec, 0xc,
+ MA35_MUX(0x0, "GPN11"),
+ MA35_MUX(0x3, "CAN2_TXD"),
+ MA35_MUX(0x6, "CCAP1_PIXCLK")),
+ MA35_PIN(212, PN12, 0xec, 0x10,
+ MA35_MUX(0x0, "GPN12"),
+ MA35_MUX(0x2, "UART6_nCTS"),
+ MA35_MUX(0x3, "UART12_RXD"),
+ MA35_MUX(0x4, "I2C5_SDA"),
+ MA35_MUX(0x6, "CCAP1_HSYNC")),
+ MA35_PIN(213, PN13, 0xec, 0x14,
+ MA35_MUX(0x0, "GPN13"),
+ MA35_MUX(0x2, "UART6_nRTS"),
+ MA35_MUX(0x3, "UART12_TXD"),
+ MA35_MUX(0x4, "I2C5_SCL"),
+ MA35_MUX(0x6, "CCAP1_VSYNC")),
+ MA35_PIN(214, PN14, 0xec, 0x18,
+ MA35_MUX(0x0, "GPN14"),
+ MA35_MUX(0x2, "UART6_RXD"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x5, "SPI1_SS1"),
+ MA35_MUX(0x6, "CCAP1_SFIELD"),
+ MA35_MUX(0x7, "SPI1_I2SMCLK")),
+ MA35_PIN(215, PN15, 0xec, 0x1c,
+ MA35_MUX(0x0, "GPN15"),
+ MA35_MUX(0x1, "EPWM2_CH4"),
+ MA35_MUX(0x2, "UART6_TXD"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x5, "I2S0_MCLK"),
+ MA35_MUX(0x6, "SPI1_SS1"),
+ MA35_MUX(0x7, "SPI1_I2SMCLK"),
+ MA35_MUX(0x8, "SC0_nCD"),
+ MA35_MUX(0x9, "EADC0_ST"),
+ MA35_MUX(0xa, "CLKO"),
+ MA35_MUX(0xb, "TM6")),
+ MA35_PIN(216, PN8, 0xec, 0x0,
+ MA35_MUX(0x0, "GPN8"),
+ MA35_MUX(0x1, "EPWM2_CH4"),
+ MA35_MUX(0x4, "I2C0_SDA"),
+ MA35_MUX(0x5, "SPI2_I2SMCLK"),
+ MA35_MUX(0x6, "CCAP1_DATA8")),
+ MA35_PIN(217, PN9, 0xec, 0x4,
+ MA35_MUX(0x0, "GPN9"),
+ MA35_MUX(0x1, "EPWM2_CH5"),
+ MA35_MUX(0x4, "I2C0_SCL"),
+ MA35_MUX(0x5, "SPI1_I2SMCLK"),
+ MA35_MUX(0x6, "CCAP1_DATA9")),
+ MA35_PIN(218, PN10, 0xec, 0x8,
+ MA35_MUX(0x0, "GPN10"),
+ MA35_MUX(0x3, "CAN2_RXD"),
+ MA35_MUX(0x4, "USBHL2_DM"),
+ MA35_MUX(0x6, "CCAP1_SCLK")),
+ MA35_PIN(219, PN11, 0xec, 0xc,
+ MA35_MUX(0x0, "GPN11"),
+ MA35_MUX(0x3, "CAN2_TXD"),
+ MA35_MUX(0x4, "USBHL2_DP"),
+ MA35_MUX(0x6, "CCAP1_PIXCLK")),
+ MA35_PIN(220, PN12, 0xec, 0x10,
+ MA35_MUX(0x0, "GPN12"),
+ MA35_MUX(0x2, "UART6_nCTS"),
+ MA35_MUX(0x3, "UART12_RXD"),
+ MA35_MUX(0x4, "I2C5_SDA"),
+ MA35_MUX(0x6, "CCAP1_HSYNC")),
+ MA35_PIN(221, PN13, 0xec, 0x14,
+ MA35_MUX(0x0, "GPN13"),
+ MA35_MUX(0x2, "UART6_nRTS"),
+ MA35_MUX(0x3, "UART12_TXD"),
+ MA35_MUX(0x4, "I2C5_SCL"),
+ MA35_MUX(0x6, "CCAP1_VSYNC")),
+ MA35_PIN(222, PN14, 0xec, 0x18,
+ MA35_MUX(0x0, "GPN14"),
+ MA35_MUX(0x2, "UART6_RXD"),
+ MA35_MUX(0x3, "CAN3_RXD"),
+ MA35_MUX(0x4, "USBHL3_DM"),
+ MA35_MUX(0x5, "SPI1_SS1"),
+ MA35_MUX(0x6, "CCAP1_SFIELD"),
+ MA35_MUX(0x7, "SPI1_I2SMCLK")),
+ MA35_PIN(223, PN15, 0xec, 0x1c,
+ MA35_MUX(0x0, "GPN15"),
+ MA35_MUX(0x1, "EPWM2_CH4"),
+ MA35_MUX(0x2, "UART6_TXD"),
+ MA35_MUX(0x3, "CAN3_TXD"),
+ MA35_MUX(0x4, "USBHL3_DP"),
+ MA35_MUX(0x5, "I2S0_MCLK"),
+ MA35_MUX(0x6, "SPI1_SS1"),
+ MA35_MUX(0x7, "SPI1_I2SMCLK"),
+ MA35_MUX(0x8, "SC0_nCD"),
+ MA35_MUX(0x9, "EADC0_ST"),
+ MA35_MUX(0xa, "CLKO"),
+ MA35_MUX(0xb, "TM6")),
+};
+
+static int ma35d1_get_pin_num(int offset, int shift)
+{
+ return (offset - 0x80) * 2 + shift / 4;
+}
+
+static struct ma35_pinctrl_soc_info ma35d1_pinctrl_info = {
+ .pins = ma35d1_pins,
+ .npins = ARRAY_SIZE(ma35d1_pins),
+ .get_pin_num = ma35d1_get_pin_num,
+};
+
+static DEFINE_NOIRQ_DEV_PM_OPS(ma35_pinctrl_pm_ops, ma35_pinctrl_suspend, ma35_pinctrl_resume);
+
+static int ma35d1_pinctrl_probe(struct platform_device *pdev)
+{
+ return ma35_pinctrl_probe(pdev, &ma35d1_pinctrl_info);
+}
+
+static const struct of_device_id ma35d1_pinctrl_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-pinctrl" },
+ { },
+};
+
+static struct platform_driver ma35d1_pinctrl_driver = {
+ .probe = ma35d1_pinctrl_probe,
+ .driver = {
+ .name = "ma35d1-pinctrl",
+ .pm = pm_sleep_ptr(&ma35_pinctrl_pm_ops),
+ .of_match_table = ma35d1_pinctrl_of_match,
+ },
+};
+
+static int __init ma35d1_pinctrl_init(void)
+{
+ return platform_driver_register(&ma35d1_pinctrl_driver);
+}
+arch_initcall(ma35d1_pinctrl_init);
+
+MODULE_AUTHOR("schung@nuvoton.com");
+MODULE_DESCRIPTION("Nuvoton MA35D1 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c
index f0cad2c501f7..df3e5d82da4b 100644
--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c
+++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c
@@ -268,28 +268,23 @@ static int s32_dt_node_to_map(struct pinctrl_dev *pctldev,
unsigned int *num_maps)
{
unsigned int reserved_maps;
- struct device_node *np;
- int ret = 0;
+ int ret;
reserved_maps = 0;
*map = NULL;
*num_maps = 0;
- for_each_available_child_of_node(np_config, np) {
+ for_each_available_child_of_node_scoped(np_config, np) {
ret = s32_dt_group_node_to_map(pctldev, np, map,
&reserved_maps, num_maps,
np_config->name);
if (ret < 0) {
- of_node_put(np);
- break;
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
+ return ret;
}
}
- if (ret)
- pinctrl_utils_free_map(pctldev, *map, *num_maps);
-
- return ret;
-
+ return 0;
}
static const struct pinctrl_ops s32_pctrl_ops = {
@@ -786,7 +781,6 @@ static int s32_pinctrl_parse_functions(struct device_node *np,
struct s32_pinctrl_soc_info *info,
u32 index)
{
- struct device_node *child;
struct pinfunction *func;
struct s32_pin_group *grp;
const char **groups;
@@ -810,14 +804,12 @@ static int s32_pinctrl_parse_functions(struct device_node *np,
if (!groups)
return -ENOMEM;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
groups[i] = child->name;
grp = &info->groups[info->grp_index++];
ret = s32_pinctrl_parse_groups(child, grp, info);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
i++;
}
@@ -831,7 +823,6 @@ static int s32_pinctrl_probe_dt(struct platform_device *pdev,
{
struct s32_pinctrl_soc_info *info = ipctl->info;
struct device_node *np = pdev->dev.of_node;
- struct device_node *child;
struct resource *res;
struct regmap *map;
void __iomem *base;
@@ -889,7 +880,7 @@ static int s32_pinctrl_probe_dt(struct platform_device *pdev,
return -ENOMEM;
info->ngroups = 0;
- for_each_child_of_node(np, child)
+ for_each_child_of_node_scoped(np, child)
info->ngroups += of_get_child_count(child);
info->groups = devm_kcalloc(&pdev->dev, info->ngroups,
@@ -898,12 +889,10 @@ static int s32_pinctrl_probe_dt(struct platform_device *pdev,
return -ENOMEM;
i = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = s32_pinctrl_parse_functions(child, info, i++);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
return 0;
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 80de389199bd..a499b8af5c1f 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -382,7 +382,6 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
unsigned int *num_maps, enum pinctrl_map_type type)
{
unsigned int reserved_maps;
- struct device_node *np;
int ret;
reserved_maps = 0;
@@ -394,13 +393,11 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto exit;
- for_each_available_child_of_node(np_config, np) {
+ for_each_available_child_of_node_scoped(np_config, np) {
ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps, type);
- if (ret < 0) {
- of_node_put(np);
+ if (ret < 0)
goto exit;
- }
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index a27c01fcbb47..8b01d312305a 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -632,7 +632,6 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map **map,
unsigned int *num_maps)
{
- struct device_node *np;
unsigned int reserved_maps;
int ret;
@@ -648,13 +647,11 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = atmel_pctl_dt_subnode_to_map(pctldev, np_config, map,
&reserved_maps, num_maps);
if (ret) {
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
- if (ret < 0) {
- of_node_put(np);
+ if (ret < 0)
break;
- }
}
}
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 5aa9d5c533c6..b3c3f5fb2e2e 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1244,7 +1244,6 @@ static int at91_pinctrl_parse_groups(struct device_node *np,
static int at91_pinctrl_parse_functions(struct device_node *np,
struct at91_pinctrl *info, u32 index)
{
- struct device_node *child;
struct at91_pmx_func *func;
struct at91_pin_group *grp;
int ret;
@@ -1267,14 +1266,12 @@ static int at91_pinctrl_parse_functions(struct device_node *np,
if (!func->groups)
return -ENOMEM;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
func->groups[i] = child->name;
grp = &info->groups[grp_index++];
ret = at91_pinctrl_parse_groups(child, grp, info, i++);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
return 0;
@@ -1296,7 +1293,6 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
int i, j, ngpio_chips_enabled = 0;
uint32_t *tmp;
struct device_node *np = dev->of_node;
- struct device_node *child;
if (!np)
return -ENODEV;
@@ -1349,14 +1345,12 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
i = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (of_device_is_compatible(child, gpio_compat))
continue;
ret = at91_pinctrl_parse_functions(child, info, i++);
- if (ret) {
- of_node_put(child);
+ if (ret)
return dev_err_probe(dev, ret, "failed to parse function\n");
- }
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 981c569bd671..9a92707d2525 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
+#include <linux/cleanup.h>
#include <linux/dmi.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
@@ -58,9 +59,14 @@
#define CY8C95X0_PIN_TO_OFFSET(x) (((x) >= 20) ? ((x) + 4) : (x))
-#define CY8C95X0_MUX_REGMAP_TO_PORT(x) ((x) / MUXED_STRIDE)
-#define CY8C95X0_MUX_REGMAP_TO_REG(x) (((x) % MUXED_STRIDE) + CY8C95X0_INTMASK)
-#define CY8C95X0_MUX_REGMAP_TO_OFFSET(x, p) ((x) - CY8C95X0_INTMASK + (p) * MUXED_STRIDE)
+#define MAX_BANK 8
+#define BANK_SZ 8
+#define MAX_LINE (MAX_BANK * BANK_SZ)
+#define MUXED_STRIDE (CY8C95X0_DRV_HIZ - CY8C95X0_INTMASK)
+#define CY8C95X0_GPIO_MASK GENMASK(7, 0)
+#define CY8C95X0_VIRTUAL (CY8C95X0_COMMAND + 1)
+#define CY8C95X0_MUX_REGMAP_TO_OFFSET(x, p) \
+ (CY8C95X0_VIRTUAL + (x) - CY8C95X0_INTMASK + (p) * MUXED_STRIDE)
static const struct i2c_device_id cy8c95x0_id[] = {
{ "cy8c9520", 20, },
@@ -120,18 +126,11 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
{}
};
-#define MAX_BANK 8
-#define BANK_SZ 8
-#define MAX_LINE (MAX_BANK * BANK_SZ)
-#define MUXED_STRIDE 16
-#define CY8C95X0_GPIO_MASK GENMASK(7, 0)
-
/**
* struct cy8c95x0_pinctrl - driver data
* @regmap: Device's regmap. Only direct access registers.
- * @muxed_regmap: Regmap for all muxed registers.
* @irq_lock: IRQ bus lock
- * @i2c_lock: Mutex for the device internal mux register
+ * @i2c_lock: Mutex to hold while using the regmap
* @irq_mask: I/O bits affected by interrupts
* @irq_trig_raise: I/O bits affected by raising voltage level
* @irq_trig_fall: I/O bits affected by falling voltage level
@@ -152,7 +151,6 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
*/
struct cy8c95x0_pinctrl {
struct regmap *regmap;
- struct regmap *muxed_regmap;
struct mutex irq_lock;
struct mutex i2c_lock;
DECLARE_BITMAP(irq_mask, MAX_LINE);
@@ -331,6 +329,9 @@ static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
{
+ if (reg >= CY8C95X0_VIRTUAL)
+ return true;
+
switch (reg) {
case 0x24 ... 0x27:
return false;
@@ -341,6 +342,9 @@ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
{
+ if (reg >= CY8C95X0_VIRTUAL)
+ return true;
+
switch (reg) {
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
return false;
@@ -433,115 +437,34 @@ static bool cy8c95x0_quick_path_register(unsigned int reg)
}
}
-static const struct reg_default cy8c95x0_reg_defaults[] = {
- { CY8C95X0_OUTPUT_(0), GENMASK(7, 0) },
- { CY8C95X0_OUTPUT_(1), GENMASK(7, 0) },
- { CY8C95X0_OUTPUT_(2), GENMASK(7, 0) },
- { CY8C95X0_OUTPUT_(3), GENMASK(7, 0) },
- { CY8C95X0_OUTPUT_(4), GENMASK(7, 0) },
- { CY8C95X0_OUTPUT_(5), GENMASK(7, 0) },
- { CY8C95X0_OUTPUT_(6), GENMASK(7, 0) },
- { CY8C95X0_OUTPUT_(7), GENMASK(7, 0) },
- { CY8C95X0_PORTSEL, 0 },
- { CY8C95X0_PWMSEL, 0 },
-};
-
-static int
-cy8c95x0_mux_reg_read(void *context, unsigned int off, unsigned int *val)
-{
- struct cy8c95x0_pinctrl *chip = context;
- u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
- int ret, reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
-
- mutex_lock(&chip->i2c_lock);
- /* Select the correct bank */
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
- if (ret < 0)
- goto out;
-
- /*
- * Read the register through direct access regmap. The target range
- * is marked volatile.
- */
- ret = regmap_read(chip->regmap, reg, val);
-out:
- mutex_unlock(&chip->i2c_lock);
-
- return ret;
-}
-
-static int
-cy8c95x0_mux_reg_write(void *context, unsigned int off, unsigned int val)
-{
- struct cy8c95x0_pinctrl *chip = context;
- u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
- int ret, reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
-
- mutex_lock(&chip->i2c_lock);
- /* Select the correct bank */
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
- if (ret < 0)
- goto out;
-
- /*
- * Write the register through direct access regmap. The target range
- * is marked volatile.
- */
- ret = regmap_write(chip->regmap, reg, val);
-out:
- mutex_unlock(&chip->i2c_lock);
-
- return ret;
-}
-
-static bool cy8c95x0_mux_accessible_register(struct device *dev, unsigned int off)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct cy8c95x0_pinctrl *chip = i2c_get_clientdata(i2c);
- u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
- u8 reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
-
- if (port >= chip->nport)
- return false;
-
- return cy8c95x0_muxed_register(reg);
-}
-
-static struct regmap_bus cy8c95x0_regmap_bus = {
- .reg_read = cy8c95x0_mux_reg_read,
- .reg_write = cy8c95x0_mux_reg_write,
-};
-
-/* Regmap for muxed registers CY8C95X0_INTMASK - CY8C95X0_DRV_HIZ */
-static const struct regmap_config cy8c95x0_muxed_regmap = {
- .name = "muxed",
- .reg_bits = 8,
- .val_bits = 8,
- .cache_type = REGCACHE_FLAT,
- .use_single_read = true,
- .use_single_write = true,
- .max_register = MUXED_STRIDE * BANK_SZ,
- .num_reg_defaults_raw = MUXED_STRIDE * BANK_SZ,
- .readable_reg = cy8c95x0_mux_accessible_register,
- .writeable_reg = cy8c95x0_mux_accessible_register,
+static const struct regmap_range_cfg cy8c95x0_ranges[] = {
+ {
+ .range_min = CY8C95X0_VIRTUAL,
+ .range_max = 0, /* Updated at runtime */
+ .selector_reg = CY8C95X0_PORTSEL,
+ .selector_mask = 0x07,
+ .selector_shift = 0x0,
+ .window_start = CY8C95X0_INTMASK,
+ .window_len = MUXED_STRIDE,
+ }
};
-/* Direct access regmap */
-static const struct regmap_config cy8c95x0_i2c_regmap = {
- .name = "direct",
+static const struct regmap_config cy8c9520_i2c_regmap = {
.reg_bits = 8,
.val_bits = 8,
- .reg_defaults = cy8c95x0_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(cy8c95x0_reg_defaults),
-
.readable_reg = cy8c95x0_readable_register,
.writeable_reg = cy8c95x0_writeable_register,
.volatile_reg = cy8c95x0_volatile_register,
.precious_reg = cy8c95x0_precious_register,
- .cache_type = REGCACHE_FLAT,
- .max_register = CY8C95X0_COMMAND,
+ .cache_type = REGCACHE_MAPLE,
+ .ranges = NULL, /* Updated at runtime */
+ .num_ranges = 1,
+ .max_register = 0, /* Updated at runtime */
+ .num_reg_defaults_raw = 0, /* Updated at runtime */
+ .use_single_read = true, /* Workaround for regcache bug */
+ .disable_locking = true,
};
static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
@@ -552,48 +475,42 @@ static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip
bool *change, bool async,
bool force)
{
- struct regmap *regmap;
- int ret, off, i, read_val;
+ int ret, off, i;
/* Caller should never modify PORTSEL directly */
if (reg == CY8C95X0_PORTSEL)
return -EINVAL;
- /* Registers behind the PORTSEL mux have their own regmap */
+ /* Registers behind the PORTSEL mux have their own range in regmap */
if (cy8c95x0_muxed_register(reg)) {
- regmap = chip->muxed_regmap;
off = CY8C95X0_MUX_REGMAP_TO_OFFSET(reg, port);
} else {
- regmap = chip->regmap;
/* Quick path direct access registers honor the port argument */
if (cy8c95x0_quick_path_register(reg))
off = reg + port;
else
off = reg;
}
+ guard(mutex)(&chip->i2c_lock);
- ret = regmap_update_bits_base(regmap, off, mask, val, change, async, force);
+ ret = regmap_update_bits_base(chip->regmap, off, mask, val, change, async, force);
if (ret < 0)
return ret;
- /* Update the cache when a WC bit is written */
+ /* Mimic what hardware does and update the cache when a WC bit is written.
+ * Allows to mark the registers as non-volatile and reduces I/O cycles.
+ */
if (cy8c95x0_wc_register(reg) && (mask & val)) {
+ /* Writing a 1 clears set bits in the other drive mode registers */
+ regcache_cache_only(chip->regmap, true);
for (i = CY8C95X0_DRV_PU; i <= CY8C95X0_DRV_HIZ; i++) {
if (i == reg)
continue;
- off = CY8C95X0_MUX_REGMAP_TO_OFFSET(i, port);
-
- ret = regmap_read(regmap, off, &read_val);
- if (ret < 0)
- continue;
- if (!(read_val & mask & val))
- continue;
-
- regcache_cache_only(regmap, true);
- regmap_update_bits(regmap, off, mask & val, 0);
- regcache_cache_only(regmap, false);
+ off = CY8C95X0_MUX_REGMAP_TO_OFFSET(i, port);
+ regmap_clear_bits(chip->regmap, off, mask & val);
}
+ regcache_cache_only(chip->regmap, false);
}
return ret;
@@ -666,23 +583,23 @@ static int cy8c95x0_regmap_update_bits(struct cy8c95x0_pinctrl *chip, unsigned i
static int cy8c95x0_regmap_read(struct cy8c95x0_pinctrl *chip, unsigned int reg,
unsigned int port, unsigned int *read_val)
{
- struct regmap *regmap;
- int off;
+ int off, ret;
- /* Registers behind the PORTSEL mux have their own regmap */
+ /* Registers behind the PORTSEL mux have their own range in regmap */
if (cy8c95x0_muxed_register(reg)) {
- regmap = chip->muxed_regmap;
off = CY8C95X0_MUX_REGMAP_TO_OFFSET(reg, port);
} else {
- regmap = chip->regmap;
/* Quick path direct access registers honor the port argument */
if (cy8c95x0_quick_path_register(reg))
off = reg + port;
else
off = reg;
}
+ guard(mutex)(&chip->i2c_lock);
- return regmap_read(regmap, off, read_val);
+ ret = regmap_read(chip->regmap, off, read_val);
+
+ return ret;
}
static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
@@ -1511,6 +1428,8 @@ static int cy8c95x0_detect(struct i2c_client *client,
static int cy8c95x0_probe(struct i2c_client *client)
{
struct cy8c95x0_pinctrl *chip;
+ struct regmap_config regmap_conf;
+ struct regmap_range_cfg regmap_range_conf;
struct regulator *reg;
int ret;
@@ -1530,15 +1449,20 @@ static int cy8c95x0_probe(struct i2c_client *client)
chip->tpin = chip->driver_data & CY8C95X0_GPIO_MASK;
chip->nport = DIV_ROUND_UP(CY8C95X0_PIN_TO_OFFSET(chip->tpin), BANK_SZ);
+ memcpy(&regmap_range_conf, &cy8c95x0_ranges[0], sizeof(regmap_range_conf));
+
switch (chip->tpin) {
case 20:
strscpy(chip->name, cy8c95x0_id[0].name, I2C_NAME_SIZE);
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE;
break;
case 40:
strscpy(chip->name, cy8c95x0_id[1].name, I2C_NAME_SIZE);
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE;
break;
case 60:
strscpy(chip->name, cy8c95x0_id[2].name, I2C_NAME_SIZE);
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE;
break;
default:
return -ENODEV;
@@ -1571,22 +1495,18 @@ static int cy8c95x0_probe(struct i2c_client *client)
gpiod_set_consumer_name(chip->gpio_reset, "CY8C95X0 RESET");
}
- /* Generic regmap for direct access registers */
- chip->regmap = devm_regmap_init_i2c(client, &cy8c95x0_i2c_regmap);
+ /* Regmap for direct and paged registers */
+ memcpy(&regmap_conf, &cy8c9520_i2c_regmap, sizeof(regmap_conf));
+ regmap_conf.ranges = &regmap_range_conf;
+ regmap_conf.max_register = regmap_range_conf.range_max;
+ regmap_conf.num_reg_defaults_raw = regmap_range_conf.range_max;
+
+ chip->regmap = devm_regmap_init_i2c(client, &regmap_conf);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
goto err_exit;
}
- /* Port specific regmap behind PORTSEL mux */
- chip->muxed_regmap = devm_regmap_init(&client->dev, &cy8c95x0_regmap_bus,
- chip, &cy8c95x0_muxed_regmap);
- if (IS_ERR(chip->muxed_regmap)) {
- ret = dev_err_probe(&client->dev, PTR_ERR(chip->muxed_regmap),
- "Failed to register muxed regmap\n");
- goto err_exit;
- }
-
bitmap_zero(chip->push_pull, MAX_LINE);
bitmap_zero(chip->shiftmask, MAX_LINE);
bitmap_set(chip->shiftmask, 0, 20);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index 6e1be38865c3..3a9a0f059090 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -566,8 +566,8 @@ static const struct pinconf_ops eqbr_pinconf_ops = {
.pin_config_config_dbg_show = pinconf_generic_dump_config,
};
-static bool is_func_exist(struct eqbr_pmx_func *funcs, const char *name,
- unsigned int nr_funcs, unsigned int *idx)
+static bool is_func_exist(struct pinfunction *funcs, const char *name,
+ unsigned int nr_funcs, unsigned int *idx)
{
int i;
@@ -584,18 +584,18 @@ static bool is_func_exist(struct eqbr_pmx_func *funcs, const char *name,
return false;
}
-static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
+static int funcs_utils(struct device *dev, struct pinfunction *funcs,
unsigned int *nr_funcs, funcs_util_ops op)
{
struct device_node *node = dev->of_node;
- struct device_node *np;
struct property *prop;
const char *fn_name;
+ const char **groups;
unsigned int fid;
int i, j;
i = 0;
- for_each_child_of_node(node, np) {
+ for_each_child_of_node_scoped(node, np) {
prop = of_find_property(np, "groups", NULL);
if (!prop)
continue;
@@ -620,20 +620,20 @@ static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
case OP_COUNT_NR_FUNC_GRPS:
if (is_func_exist(funcs, fn_name, *nr_funcs, &fid))
- funcs[fid].nr_groups++;
+ funcs[fid].ngroups++;
break;
case OP_ADD_FUNC_GRPS:
if (is_func_exist(funcs, fn_name, *nr_funcs, &fid)) {
- for (j = 0; j < funcs[fid].nr_groups; j++)
- if (!funcs[fid].groups[j])
+ groups = (const char **)funcs[fid].groups;
+ for (j = 0; j < funcs[fid].ngroups; j++)
+ if (!groups[j])
break;
- funcs[fid].groups[j] = prop->value;
+ groups[j] = prop->value;
}
break;
default:
- of_node_put(np);
return -EINVAL;
}
i++;
@@ -645,7 +645,7 @@ static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
static int eqbr_build_functions(struct eqbr_pinctrl_drv_data *drvdata)
{
struct device *dev = drvdata->dev;
- struct eqbr_pmx_func *funcs = NULL;
+ struct pinfunction *funcs = NULL;
unsigned int nr_funcs = 0;
int i, ret;
@@ -666,9 +666,9 @@ static int eqbr_build_functions(struct eqbr_pinctrl_drv_data *drvdata)
return ret;
for (i = 0; i < nr_funcs; i++) {
- if (!funcs[i].nr_groups)
+ if (!funcs[i].ngroups)
continue;
- funcs[i].groups = devm_kcalloc(dev, funcs[i].nr_groups,
+ funcs[i].groups = devm_kcalloc(dev, funcs[i].ngroups,
sizeof(*(funcs[i].groups)),
GFP_KERNEL);
if (!funcs[i].groups)
@@ -688,7 +688,7 @@ static int eqbr_build_functions(struct eqbr_pinctrl_drv_data *drvdata)
ret = pinmux_generic_add_function(drvdata->pctl_dev,
funcs[i].name,
funcs[i].groups,
- funcs[i].nr_groups,
+ funcs[i].ngroups,
drvdata);
if (ret < 0) {
dev_err(dev, "Failed to register function %s\n",
@@ -706,11 +706,10 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
struct device_node *node = dev->of_node;
unsigned int *pins, *pinmux, pin_id, pinmux_id;
struct pingroup group, *grp = &group;
- struct device_node *np;
struct property *prop;
int j, err;
- for_each_child_of_node(node, np) {
+ for_each_child_of_node_scoped(node, np) {
prop = of_find_property(np, "groups", NULL);
if (!prop)
continue;
@@ -718,42 +717,35 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
err = of_property_count_u32_elems(np, "pins");
if (err < 0) {
dev_err(dev, "No pins in the group: %s\n", prop->name);
- of_node_put(np);
return err;
}
grp->npins = err;
grp->name = prop->value;
pins = devm_kcalloc(dev, grp->npins, sizeof(*pins), GFP_KERNEL);
- if (!pins) {
- of_node_put(np);
+ if (!pins)
return -ENOMEM;
- }
+
grp->pins = pins;
pinmux = devm_kcalloc(dev, grp->npins, sizeof(*pinmux), GFP_KERNEL);
- if (!pinmux) {
- of_node_put(np);
+ if (!pinmux)
return -ENOMEM;
- }
for (j = 0; j < grp->npins; j++) {
if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
dev_err(dev, "Group %s: Read intel pins id failed\n",
grp->name);
- of_node_put(np);
return -EINVAL;
}
if (pin_id >= drvdata->pctl_desc.npins) {
dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
grp->name, j, pin_id);
- of_node_put(np);
return -EINVAL;
}
pins[j] = pin_id;
if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
dev_err(dev, "Group %s: Read intel pinmux id failed\n",
grp->name);
- of_node_put(np);
return -EINVAL;
}
pinmux[j] = pinmux_id;
@@ -764,7 +756,6 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
pinmux);
if (err < 0) {
dev_err(dev, "Failed to register group %s\n", grp->name);
- of_node_put(np);
return err;
}
memset(&group, 0, sizeof(group));
diff --git a/drivers/pinctrl/pinctrl-equilibrium.h b/drivers/pinctrl/pinctrl-equilibrium.h
index 83768cc8b3db..b4d149bde39d 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.h
+++ b/drivers/pinctrl/pinctrl-equilibrium.h
@@ -68,18 +68,6 @@ struct gpio_irq_type {
};
/**
- * struct eqbr_pmx_func: represent a pin function.
- * @name: name of the pin function, used to lookup the function.
- * @groups: one or more names of pin groups that provide this function.
- * @nr_groups: number of groups included in @groups.
- */
-struct eqbr_pmx_func {
- const char *name;
- const char **groups;
- unsigned int nr_groups;
-};
-
-/**
* struct eqbr_pin_bank: represent a pin bank.
* @membase: base address of the pin bank register.
* @id: bank id, to idenify the unique bank.
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index bc6358a686fc..31703737731b 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -94,6 +94,12 @@
.data = (void *)func, \
}
+#define INGENIC_PIN_FUNCTION(_name_, id) \
+ { \
+ .func = PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups)), \
+ .data = NULL, \
+ }
+
enum jz_version {
ID_JZ4730,
ID_JZ4740,
@@ -238,15 +244,15 @@ static const char *jz4730_pwm0_groups[] = { "pwm0", };
static const char *jz4730_pwm1_groups[] = { "pwm1", };
static const struct function_desc jz4730_functions[] = {
- { "mmc", jz4730_mmc_groups, ARRAY_SIZE(jz4730_mmc_groups), },
- { "uart0", jz4730_uart0_groups, ARRAY_SIZE(jz4730_uart0_groups), },
- { "uart1", jz4730_uart1_groups, ARRAY_SIZE(jz4730_uart1_groups), },
- { "uart2", jz4730_uart2_groups, ARRAY_SIZE(jz4730_uart2_groups), },
- { "uart3", jz4730_uart3_groups, ARRAY_SIZE(jz4730_uart3_groups), },
- { "lcd", jz4730_lcd_groups, ARRAY_SIZE(jz4730_lcd_groups), },
- { "nand", jz4730_nand_groups, ARRAY_SIZE(jz4730_nand_groups), },
- { "pwm0", jz4730_pwm0_groups, ARRAY_SIZE(jz4730_pwm0_groups), },
- { "pwm1", jz4730_pwm1_groups, ARRAY_SIZE(jz4730_pwm1_groups), },
+ INGENIC_PIN_FUNCTION("mmc", jz4730_mmc),
+ INGENIC_PIN_FUNCTION("uart0", jz4730_uart0),
+ INGENIC_PIN_FUNCTION("uart1", jz4730_uart1),
+ INGENIC_PIN_FUNCTION("uart2", jz4730_uart2),
+ INGENIC_PIN_FUNCTION("uart3", jz4730_uart3),
+ INGENIC_PIN_FUNCTION("lcd", jz4730_lcd),
+ INGENIC_PIN_FUNCTION("nand", jz4730_nand),
+ INGENIC_PIN_FUNCTION("pwm0", jz4730_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4730_pwm1),
};
static const struct ingenic_chip_info jz4730_chip_info = {
@@ -343,19 +349,19 @@ static const char *jz4740_pwm6_groups[] = { "pwm6", };
static const char *jz4740_pwm7_groups[] = { "pwm7", };
static const struct function_desc jz4740_functions[] = {
- { "mmc", jz4740_mmc_groups, ARRAY_SIZE(jz4740_mmc_groups), },
- { "uart0", jz4740_uart0_groups, ARRAY_SIZE(jz4740_uart0_groups), },
- { "uart1", jz4740_uart1_groups, ARRAY_SIZE(jz4740_uart1_groups), },
- { "lcd", jz4740_lcd_groups, ARRAY_SIZE(jz4740_lcd_groups), },
- { "nand", jz4740_nand_groups, ARRAY_SIZE(jz4740_nand_groups), },
- { "pwm0", jz4740_pwm0_groups, ARRAY_SIZE(jz4740_pwm0_groups), },
- { "pwm1", jz4740_pwm1_groups, ARRAY_SIZE(jz4740_pwm1_groups), },
- { "pwm2", jz4740_pwm2_groups, ARRAY_SIZE(jz4740_pwm2_groups), },
- { "pwm3", jz4740_pwm3_groups, ARRAY_SIZE(jz4740_pwm3_groups), },
- { "pwm4", jz4740_pwm4_groups, ARRAY_SIZE(jz4740_pwm4_groups), },
- { "pwm5", jz4740_pwm5_groups, ARRAY_SIZE(jz4740_pwm5_groups), },
- { "pwm6", jz4740_pwm6_groups, ARRAY_SIZE(jz4740_pwm6_groups), },
- { "pwm7", jz4740_pwm7_groups, ARRAY_SIZE(jz4740_pwm7_groups), },
+ INGENIC_PIN_FUNCTION("mmc", jz4740_mmc),
+ INGENIC_PIN_FUNCTION("uart0", jz4740_uart0),
+ INGENIC_PIN_FUNCTION("uart1", jz4740_uart1),
+ INGENIC_PIN_FUNCTION("lcd", jz4740_lcd),
+ INGENIC_PIN_FUNCTION("nand", jz4740_nand),
+ INGENIC_PIN_FUNCTION("pwm0", jz4740_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4740_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", jz4740_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", jz4740_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", jz4740_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", jz4740_pwm5),
+ INGENIC_PIN_FUNCTION("pwm6", jz4740_pwm6),
+ INGENIC_PIN_FUNCTION("pwm7", jz4740_pwm7),
};
static const struct ingenic_chip_info jz4740_chip_info = {
@@ -447,17 +453,17 @@ static const char *jz4725b_pwm4_groups[] = { "pwm4", };
static const char *jz4725b_pwm5_groups[] = { "pwm5", };
static const struct function_desc jz4725b_functions[] = {
- { "mmc0", jz4725b_mmc0_groups, ARRAY_SIZE(jz4725b_mmc0_groups), },
- { "mmc1", jz4725b_mmc1_groups, ARRAY_SIZE(jz4725b_mmc1_groups), },
- { "uart", jz4725b_uart_groups, ARRAY_SIZE(jz4725b_uart_groups), },
- { "nand", jz4725b_nand_groups, ARRAY_SIZE(jz4725b_nand_groups), },
- { "pwm0", jz4725b_pwm0_groups, ARRAY_SIZE(jz4725b_pwm0_groups), },
- { "pwm1", jz4725b_pwm1_groups, ARRAY_SIZE(jz4725b_pwm1_groups), },
- { "pwm2", jz4725b_pwm2_groups, ARRAY_SIZE(jz4725b_pwm2_groups), },
- { "pwm3", jz4725b_pwm3_groups, ARRAY_SIZE(jz4725b_pwm3_groups), },
- { "pwm4", jz4725b_pwm4_groups, ARRAY_SIZE(jz4725b_pwm4_groups), },
- { "pwm5", jz4725b_pwm5_groups, ARRAY_SIZE(jz4725b_pwm5_groups), },
- { "lcd", jz4725b_lcd_groups, ARRAY_SIZE(jz4725b_lcd_groups), },
+ INGENIC_PIN_FUNCTION("mmc0", jz4725b_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", jz4725b_mmc1),
+ INGENIC_PIN_FUNCTION("uart", jz4725b_uart),
+ INGENIC_PIN_FUNCTION("nand", jz4725b_nand),
+ INGENIC_PIN_FUNCTION("pwm0", jz4725b_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4725b_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", jz4725b_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", jz4725b_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", jz4725b_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", jz4725b_pwm5),
+ INGENIC_PIN_FUNCTION("lcd", jz4725b_lcd),
};
static const struct ingenic_chip_info jz4725b_chip_info = {
@@ -579,22 +585,22 @@ static const char *jz4750_pwm4_groups[] = { "pwm4", };
static const char *jz4750_pwm5_groups[] = { "pwm5", };
static const struct function_desc jz4750_functions[] = {
- { "uart0", jz4750_uart0_groups, ARRAY_SIZE(jz4750_uart0_groups), },
- { "uart1", jz4750_uart1_groups, ARRAY_SIZE(jz4750_uart1_groups), },
- { "uart2", jz4750_uart2_groups, ARRAY_SIZE(jz4750_uart2_groups), },
- { "uart3", jz4750_uart3_groups, ARRAY_SIZE(jz4750_uart3_groups), },
- { "mmc0", jz4750_mmc0_groups, ARRAY_SIZE(jz4750_mmc0_groups), },
- { "mmc1", jz4750_mmc1_groups, ARRAY_SIZE(jz4750_mmc1_groups), },
- { "i2c", jz4750_i2c_groups, ARRAY_SIZE(jz4750_i2c_groups), },
- { "cim", jz4750_cim_groups, ARRAY_SIZE(jz4750_cim_groups), },
- { "lcd", jz4750_lcd_groups, ARRAY_SIZE(jz4750_lcd_groups), },
- { "nand", jz4750_nand_groups, ARRAY_SIZE(jz4750_nand_groups), },
- { "pwm0", jz4750_pwm0_groups, ARRAY_SIZE(jz4750_pwm0_groups), },
- { "pwm1", jz4750_pwm1_groups, ARRAY_SIZE(jz4750_pwm1_groups), },
- { "pwm2", jz4750_pwm2_groups, ARRAY_SIZE(jz4750_pwm2_groups), },
- { "pwm3", jz4750_pwm3_groups, ARRAY_SIZE(jz4750_pwm3_groups), },
- { "pwm4", jz4750_pwm4_groups, ARRAY_SIZE(jz4750_pwm4_groups), },
- { "pwm5", jz4750_pwm5_groups, ARRAY_SIZE(jz4750_pwm5_groups), },
+ INGENIC_PIN_FUNCTION("uart0", jz4750_uart0),
+ INGENIC_PIN_FUNCTION("uart1", jz4750_uart1),
+ INGENIC_PIN_FUNCTION("uart2", jz4750_uart2),
+ INGENIC_PIN_FUNCTION("uart3", jz4750_uart3),
+ INGENIC_PIN_FUNCTION("mmc0", jz4750_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", jz4750_mmc1),
+ INGENIC_PIN_FUNCTION("i2c", jz4750_i2c),
+ INGENIC_PIN_FUNCTION("cim", jz4750_cim),
+ INGENIC_PIN_FUNCTION("lcd", jz4750_lcd),
+ INGENIC_PIN_FUNCTION("nand", jz4750_nand),
+ INGENIC_PIN_FUNCTION("pwm0", jz4750_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4750_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", jz4750_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", jz4750_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", jz4750_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", jz4750_pwm5),
};
static const struct ingenic_chip_info jz4750_chip_info = {
@@ -744,22 +750,22 @@ static const char *jz4755_pwm4_groups[] = { "pwm4", };
static const char *jz4755_pwm5_groups[] = { "pwm5", };
static const struct function_desc jz4755_functions[] = {
- { "uart0", jz4755_uart0_groups, ARRAY_SIZE(jz4755_uart0_groups), },
- { "uart1", jz4755_uart1_groups, ARRAY_SIZE(jz4755_uart1_groups), },
- { "uart2", jz4755_uart2_groups, ARRAY_SIZE(jz4755_uart2_groups), },
- { "ssi", jz4755_ssi_groups, ARRAY_SIZE(jz4755_ssi_groups), },
- { "mmc0", jz4755_mmc0_groups, ARRAY_SIZE(jz4755_mmc0_groups), },
- { "mmc1", jz4755_mmc1_groups, ARRAY_SIZE(jz4755_mmc1_groups), },
- { "i2c", jz4755_i2c_groups, ARRAY_SIZE(jz4755_i2c_groups), },
- { "cim", jz4755_cim_groups, ARRAY_SIZE(jz4755_cim_groups), },
- { "lcd", jz4755_lcd_groups, ARRAY_SIZE(jz4755_lcd_groups), },
- { "nand", jz4755_nand_groups, ARRAY_SIZE(jz4755_nand_groups), },
- { "pwm0", jz4755_pwm0_groups, ARRAY_SIZE(jz4755_pwm0_groups), },
- { "pwm1", jz4755_pwm1_groups, ARRAY_SIZE(jz4755_pwm1_groups), },
- { "pwm2", jz4755_pwm2_groups, ARRAY_SIZE(jz4755_pwm2_groups), },
- { "pwm3", jz4755_pwm3_groups, ARRAY_SIZE(jz4755_pwm3_groups), },
- { "pwm4", jz4755_pwm4_groups, ARRAY_SIZE(jz4755_pwm4_groups), },
- { "pwm5", jz4755_pwm5_groups, ARRAY_SIZE(jz4755_pwm5_groups), },
+ INGENIC_PIN_FUNCTION("uart0", jz4755_uart0),
+ INGENIC_PIN_FUNCTION("uart1", jz4755_uart1),
+ INGENIC_PIN_FUNCTION("uart2", jz4755_uart2),
+ INGENIC_PIN_FUNCTION("ssi", jz4755_ssi),
+ INGENIC_PIN_FUNCTION("mmc0", jz4755_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", jz4755_mmc1),
+ INGENIC_PIN_FUNCTION("i2c", jz4755_i2c),
+ INGENIC_PIN_FUNCTION("cim", jz4755_cim),
+ INGENIC_PIN_FUNCTION("lcd", jz4755_lcd),
+ INGENIC_PIN_FUNCTION("nand", jz4755_nand),
+ INGENIC_PIN_FUNCTION("pwm0", jz4755_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4755_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", jz4755_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", jz4755_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", jz4755_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", jz4755_pwm5),
};
static const struct ingenic_chip_info jz4755_chip_info = {
@@ -1079,35 +1085,35 @@ static const char *jz4760_pwm7_groups[] = { "pwm7", };
static const char *jz4760_otg_groups[] = { "otg-vbus", };
static const struct function_desc jz4760_functions[] = {
- { "uart0", jz4760_uart0_groups, ARRAY_SIZE(jz4760_uart0_groups), },
- { "uart1", jz4760_uart1_groups, ARRAY_SIZE(jz4760_uart1_groups), },
- { "uart2", jz4760_uart2_groups, ARRAY_SIZE(jz4760_uart2_groups), },
- { "uart3", jz4760_uart3_groups, ARRAY_SIZE(jz4760_uart3_groups), },
- { "ssi0", jz4760_ssi0_groups, ARRAY_SIZE(jz4760_ssi0_groups), },
- { "ssi1", jz4760_ssi1_groups, ARRAY_SIZE(jz4760_ssi1_groups), },
- { "mmc0", jz4760_mmc0_groups, ARRAY_SIZE(jz4760_mmc0_groups), },
- { "mmc1", jz4760_mmc1_groups, ARRAY_SIZE(jz4760_mmc1_groups), },
- { "mmc2", jz4760_mmc2_groups, ARRAY_SIZE(jz4760_mmc2_groups), },
- { "nemc", jz4760_nemc_groups, ARRAY_SIZE(jz4760_nemc_groups), },
- { "nemc-cs1", jz4760_cs1_groups, ARRAY_SIZE(jz4760_cs1_groups), },
- { "nemc-cs2", jz4760_cs2_groups, ARRAY_SIZE(jz4760_cs2_groups), },
- { "nemc-cs3", jz4760_cs3_groups, ARRAY_SIZE(jz4760_cs3_groups), },
- { "nemc-cs4", jz4760_cs4_groups, ARRAY_SIZE(jz4760_cs4_groups), },
- { "nemc-cs5", jz4760_cs5_groups, ARRAY_SIZE(jz4760_cs5_groups), },
- { "nemc-cs6", jz4760_cs6_groups, ARRAY_SIZE(jz4760_cs6_groups), },
- { "i2c0", jz4760_i2c0_groups, ARRAY_SIZE(jz4760_i2c0_groups), },
- { "i2c1", jz4760_i2c1_groups, ARRAY_SIZE(jz4760_i2c1_groups), },
- { "cim", jz4760_cim_groups, ARRAY_SIZE(jz4760_cim_groups), },
- { "lcd", jz4760_lcd_groups, ARRAY_SIZE(jz4760_lcd_groups), },
- { "pwm0", jz4760_pwm0_groups, ARRAY_SIZE(jz4760_pwm0_groups), },
- { "pwm1", jz4760_pwm1_groups, ARRAY_SIZE(jz4760_pwm1_groups), },
- { "pwm2", jz4760_pwm2_groups, ARRAY_SIZE(jz4760_pwm2_groups), },
- { "pwm3", jz4760_pwm3_groups, ARRAY_SIZE(jz4760_pwm3_groups), },
- { "pwm4", jz4760_pwm4_groups, ARRAY_SIZE(jz4760_pwm4_groups), },
- { "pwm5", jz4760_pwm5_groups, ARRAY_SIZE(jz4760_pwm5_groups), },
- { "pwm6", jz4760_pwm6_groups, ARRAY_SIZE(jz4760_pwm6_groups), },
- { "pwm7", jz4760_pwm7_groups, ARRAY_SIZE(jz4760_pwm7_groups), },
- { "otg", jz4760_otg_groups, ARRAY_SIZE(jz4760_otg_groups), },
+ INGENIC_PIN_FUNCTION("uart0", jz4760_uart0),
+ INGENIC_PIN_FUNCTION("uart1", jz4760_uart1),
+ INGENIC_PIN_FUNCTION("uart2", jz4760_uart2),
+ INGENIC_PIN_FUNCTION("uart3", jz4760_uart3),
+ INGENIC_PIN_FUNCTION("ssi0", jz4760_ssi0),
+ INGENIC_PIN_FUNCTION("ssi1", jz4760_ssi1),
+ INGENIC_PIN_FUNCTION("mmc0", jz4760_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", jz4760_mmc1),
+ INGENIC_PIN_FUNCTION("mmc2", jz4760_mmc2),
+ INGENIC_PIN_FUNCTION("nemc", jz4760_nemc),
+ INGENIC_PIN_FUNCTION("nemc-cs1", jz4760_cs1),
+ INGENIC_PIN_FUNCTION("nemc-cs2", jz4760_cs2),
+ INGENIC_PIN_FUNCTION("nemc-cs3", jz4760_cs3),
+ INGENIC_PIN_FUNCTION("nemc-cs4", jz4760_cs4),
+ INGENIC_PIN_FUNCTION("nemc-cs5", jz4760_cs5),
+ INGENIC_PIN_FUNCTION("nemc-cs6", jz4760_cs6),
+ INGENIC_PIN_FUNCTION("i2c0", jz4760_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", jz4760_i2c1),
+ INGENIC_PIN_FUNCTION("cim", jz4760_cim),
+ INGENIC_PIN_FUNCTION("lcd", jz4760_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", jz4760_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4760_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", jz4760_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", jz4760_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", jz4760_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", jz4760_pwm5),
+ INGENIC_PIN_FUNCTION("pwm6", jz4760_pwm6),
+ INGENIC_PIN_FUNCTION("pwm7", jz4760_pwm7),
+ INGENIC_PIN_FUNCTION("otg", jz4760_otg),
};
static const struct ingenic_chip_info jz4760_chip_info = {
@@ -1417,37 +1423,37 @@ static const char *jz4770_pwm7_groups[] = { "pwm7", };
static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
static const struct function_desc jz4770_functions[] = {
- { "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
- { "uart1", jz4770_uart1_groups, ARRAY_SIZE(jz4770_uart1_groups), },
- { "uart2", jz4770_uart2_groups, ARRAY_SIZE(jz4770_uart2_groups), },
- { "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), },
- { "ssi0", jz4770_ssi0_groups, ARRAY_SIZE(jz4770_ssi0_groups), },
- { "ssi1", jz4770_ssi1_groups, ARRAY_SIZE(jz4770_ssi1_groups), },
- { "mmc0", jz4770_mmc0_groups, ARRAY_SIZE(jz4770_mmc0_groups), },
- { "mmc1", jz4770_mmc1_groups, ARRAY_SIZE(jz4770_mmc1_groups), },
- { "mmc2", jz4770_mmc2_groups, ARRAY_SIZE(jz4770_mmc2_groups), },
- { "nemc", jz4770_nemc_groups, ARRAY_SIZE(jz4770_nemc_groups), },
- { "nemc-cs1", jz4770_cs1_groups, ARRAY_SIZE(jz4770_cs1_groups), },
- { "nemc-cs2", jz4770_cs2_groups, ARRAY_SIZE(jz4770_cs2_groups), },
- { "nemc-cs3", jz4770_cs3_groups, ARRAY_SIZE(jz4770_cs3_groups), },
- { "nemc-cs4", jz4770_cs4_groups, ARRAY_SIZE(jz4770_cs4_groups), },
- { "nemc-cs5", jz4770_cs5_groups, ARRAY_SIZE(jz4770_cs5_groups), },
- { "nemc-cs6", jz4770_cs6_groups, ARRAY_SIZE(jz4770_cs6_groups), },
- { "i2c0", jz4770_i2c0_groups, ARRAY_SIZE(jz4770_i2c0_groups), },
- { "i2c1", jz4770_i2c1_groups, ARRAY_SIZE(jz4770_i2c1_groups), },
- { "i2c2", jz4770_i2c2_groups, ARRAY_SIZE(jz4770_i2c2_groups), },
- { "cim", jz4770_cim_groups, ARRAY_SIZE(jz4770_cim_groups), },
- { "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), },
- { "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), },
- { "pwm1", jz4770_pwm1_groups, ARRAY_SIZE(jz4770_pwm1_groups), },
- { "pwm2", jz4770_pwm2_groups, ARRAY_SIZE(jz4770_pwm2_groups), },
- { "pwm3", jz4770_pwm3_groups, ARRAY_SIZE(jz4770_pwm3_groups), },
- { "pwm4", jz4770_pwm4_groups, ARRAY_SIZE(jz4770_pwm4_groups), },
- { "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), },
- { "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
- { "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
- { "mac", jz4770_mac_groups, ARRAY_SIZE(jz4770_mac_groups), },
- { "otg", jz4760_otg_groups, ARRAY_SIZE(jz4760_otg_groups), },
+ INGENIC_PIN_FUNCTION("uart0", jz4770_uart0),
+ INGENIC_PIN_FUNCTION("uart1", jz4770_uart1),
+ INGENIC_PIN_FUNCTION("uart2", jz4770_uart2),
+ INGENIC_PIN_FUNCTION("uart3", jz4770_uart3),
+ INGENIC_PIN_FUNCTION("ssi0", jz4770_ssi0),
+ INGENIC_PIN_FUNCTION("ssi1", jz4770_ssi1),
+ INGENIC_PIN_FUNCTION("mmc0", jz4770_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", jz4770_mmc1),
+ INGENIC_PIN_FUNCTION("mmc2", jz4770_mmc2),
+ INGENIC_PIN_FUNCTION("nemc", jz4770_nemc),
+ INGENIC_PIN_FUNCTION("nemc-cs1", jz4770_cs1),
+ INGENIC_PIN_FUNCTION("nemc-cs2", jz4770_cs2),
+ INGENIC_PIN_FUNCTION("nemc-cs3", jz4770_cs3),
+ INGENIC_PIN_FUNCTION("nemc-cs4", jz4770_cs4),
+ INGENIC_PIN_FUNCTION("nemc-cs5", jz4770_cs5),
+ INGENIC_PIN_FUNCTION("nemc-cs6", jz4770_cs6),
+ INGENIC_PIN_FUNCTION("i2c0", jz4770_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", jz4770_i2c1),
+ INGENIC_PIN_FUNCTION("i2c2", jz4770_i2c2),
+ INGENIC_PIN_FUNCTION("cim", jz4770_cim),
+ INGENIC_PIN_FUNCTION("lcd", jz4770_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", jz4770_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4770_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", jz4770_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", jz4770_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", jz4770_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", jz4770_pwm5),
+ INGENIC_PIN_FUNCTION("pwm6", jz4770_pwm6),
+ INGENIC_PIN_FUNCTION("pwm7", jz4770_pwm7),
+ INGENIC_PIN_FUNCTION("mac", jz4770_mac),
+ INGENIC_PIN_FUNCTION("otg", jz4760_otg),
};
static const struct ingenic_chip_info jz4770_chip_info = {
@@ -1696,31 +1702,31 @@ static const char *jz4775_mac_groups[] = {
static const char *jz4775_otg_groups[] = { "otg-vbus", };
static const struct function_desc jz4775_functions[] = {
- { "uart0", jz4775_uart0_groups, ARRAY_SIZE(jz4775_uart0_groups), },
- { "uart1", jz4775_uart1_groups, ARRAY_SIZE(jz4775_uart1_groups), },
- { "uart2", jz4775_uart2_groups, ARRAY_SIZE(jz4775_uart2_groups), },
- { "uart3", jz4775_uart3_groups, ARRAY_SIZE(jz4775_uart3_groups), },
- { "ssi", jz4775_ssi_groups, ARRAY_SIZE(jz4775_ssi_groups), },
- { "mmc0", jz4775_mmc0_groups, ARRAY_SIZE(jz4775_mmc0_groups), },
- { "mmc1", jz4775_mmc1_groups, ARRAY_SIZE(jz4775_mmc1_groups), },
- { "mmc2", jz4775_mmc2_groups, ARRAY_SIZE(jz4775_mmc2_groups), },
- { "nemc", jz4775_nemc_groups, ARRAY_SIZE(jz4775_nemc_groups), },
- { "nemc-cs1", jz4775_cs1_groups, ARRAY_SIZE(jz4775_cs1_groups), },
- { "nemc-cs2", jz4775_cs2_groups, ARRAY_SIZE(jz4775_cs2_groups), },
- { "nemc-cs3", jz4775_cs3_groups, ARRAY_SIZE(jz4775_cs3_groups), },
- { "i2c0", jz4775_i2c0_groups, ARRAY_SIZE(jz4775_i2c0_groups), },
- { "i2c1", jz4775_i2c1_groups, ARRAY_SIZE(jz4775_i2c1_groups), },
- { "i2c2", jz4775_i2c2_groups, ARRAY_SIZE(jz4775_i2c2_groups), },
- { "i2s", jz4775_i2s_groups, ARRAY_SIZE(jz4775_i2s_groups), },
- { "dmic", jz4775_dmic_groups, ARRAY_SIZE(jz4775_dmic_groups), },
- { "cim", jz4775_cim_groups, ARRAY_SIZE(jz4775_cim_groups), },
- { "lcd", jz4775_lcd_groups, ARRAY_SIZE(jz4775_lcd_groups), },
- { "pwm0", jz4775_pwm0_groups, ARRAY_SIZE(jz4775_pwm0_groups), },
- { "pwm1", jz4775_pwm1_groups, ARRAY_SIZE(jz4775_pwm1_groups), },
- { "pwm2", jz4775_pwm2_groups, ARRAY_SIZE(jz4775_pwm2_groups), },
- { "pwm3", jz4775_pwm3_groups, ARRAY_SIZE(jz4775_pwm3_groups), },
- { "mac", jz4775_mac_groups, ARRAY_SIZE(jz4775_mac_groups), },
- { "otg", jz4775_otg_groups, ARRAY_SIZE(jz4775_otg_groups), },
+ INGENIC_PIN_FUNCTION("uart0", jz4775_uart0),
+ INGENIC_PIN_FUNCTION("uart1", jz4775_uart1),
+ INGENIC_PIN_FUNCTION("uart2", jz4775_uart2),
+ INGENIC_PIN_FUNCTION("uart3", jz4775_uart3),
+ INGENIC_PIN_FUNCTION("ssi", jz4775_ssi),
+ INGENIC_PIN_FUNCTION("mmc0", jz4775_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", jz4775_mmc1),
+ INGENIC_PIN_FUNCTION("mmc2", jz4775_mmc2),
+ INGENIC_PIN_FUNCTION("nemc", jz4775_nemc),
+ INGENIC_PIN_FUNCTION("nemc-cs1", jz4775_cs1),
+ INGENIC_PIN_FUNCTION("nemc-cs2", jz4775_cs2),
+ INGENIC_PIN_FUNCTION("nemc-cs3", jz4775_cs3),
+ INGENIC_PIN_FUNCTION("i2c0", jz4775_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", jz4775_i2c1),
+ INGENIC_PIN_FUNCTION("i2c2", jz4775_i2c2),
+ INGENIC_PIN_FUNCTION("i2s", jz4775_i2s),
+ INGENIC_PIN_FUNCTION("dmic", jz4775_dmic),
+ INGENIC_PIN_FUNCTION("cim", jz4775_cim),
+ INGENIC_PIN_FUNCTION("lcd", jz4775_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", jz4775_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4775_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", jz4775_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", jz4775_pwm3),
+ INGENIC_PIN_FUNCTION("mac", jz4775_mac),
+ INGENIC_PIN_FUNCTION("otg", jz4775_otg),
};
static const struct ingenic_chip_info jz4775_chip_info = {
@@ -1949,42 +1955,41 @@ static const char *jz4780_cim_groups[] = { "cim-data", };
static const char *jz4780_hdmi_ddc_groups[] = { "hdmi-ddc", };
static const struct function_desc jz4780_functions[] = {
- { "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
- { "uart1", jz4770_uart1_groups, ARRAY_SIZE(jz4770_uart1_groups), },
- { "uart2", jz4780_uart2_groups, ARRAY_SIZE(jz4780_uart2_groups), },
- { "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), },
- { "uart4", jz4780_uart4_groups, ARRAY_SIZE(jz4780_uart4_groups), },
- { "ssi0", jz4780_ssi0_groups, ARRAY_SIZE(jz4780_ssi0_groups), },
- { "ssi1", jz4780_ssi1_groups, ARRAY_SIZE(jz4780_ssi1_groups), },
- { "mmc0", jz4780_mmc0_groups, ARRAY_SIZE(jz4780_mmc0_groups), },
- { "mmc1", jz4780_mmc1_groups, ARRAY_SIZE(jz4780_mmc1_groups), },
- { "mmc2", jz4780_mmc2_groups, ARRAY_SIZE(jz4780_mmc2_groups), },
- { "nemc", jz4780_nemc_groups, ARRAY_SIZE(jz4780_nemc_groups), },
- { "nemc-cs1", jz4770_cs1_groups, ARRAY_SIZE(jz4770_cs1_groups), },
- { "nemc-cs2", jz4770_cs2_groups, ARRAY_SIZE(jz4770_cs2_groups), },
- { "nemc-cs3", jz4770_cs3_groups, ARRAY_SIZE(jz4770_cs3_groups), },
- { "nemc-cs4", jz4770_cs4_groups, ARRAY_SIZE(jz4770_cs4_groups), },
- { "nemc-cs5", jz4770_cs5_groups, ARRAY_SIZE(jz4770_cs5_groups), },
- { "nemc-cs6", jz4770_cs6_groups, ARRAY_SIZE(jz4770_cs6_groups), },
- { "i2c0", jz4770_i2c0_groups, ARRAY_SIZE(jz4770_i2c0_groups), },
- { "i2c1", jz4770_i2c1_groups, ARRAY_SIZE(jz4770_i2c1_groups), },
- { "i2c2", jz4770_i2c2_groups, ARRAY_SIZE(jz4770_i2c2_groups), },
- { "i2c3", jz4780_i2c3_groups, ARRAY_SIZE(jz4780_i2c3_groups), },
- { "i2c4", jz4780_i2c4_groups, ARRAY_SIZE(jz4780_i2c4_groups), },
- { "i2s", jz4780_i2s_groups, ARRAY_SIZE(jz4780_i2s_groups), },
- { "dmic", jz4780_dmic_groups, ARRAY_SIZE(jz4780_dmic_groups), },
- { "cim", jz4780_cim_groups, ARRAY_SIZE(jz4780_cim_groups), },
- { "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), },
- { "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), },
- { "pwm1", jz4770_pwm1_groups, ARRAY_SIZE(jz4770_pwm1_groups), },
- { "pwm2", jz4770_pwm2_groups, ARRAY_SIZE(jz4770_pwm2_groups), },
- { "pwm3", jz4770_pwm3_groups, ARRAY_SIZE(jz4770_pwm3_groups), },
- { "pwm4", jz4770_pwm4_groups, ARRAY_SIZE(jz4770_pwm4_groups), },
- { "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), },
- { "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
- { "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
- { "hdmi-ddc", jz4780_hdmi_ddc_groups,
- ARRAY_SIZE(jz4780_hdmi_ddc_groups), },
+ INGENIC_PIN_FUNCTION("uart0", jz4770_uart0),
+ INGENIC_PIN_FUNCTION("uart1", jz4770_uart1),
+ INGENIC_PIN_FUNCTION("uart2", jz4780_uart2),
+ INGENIC_PIN_FUNCTION("uart3", jz4770_uart3),
+ INGENIC_PIN_FUNCTION("uart4", jz4780_uart4),
+ INGENIC_PIN_FUNCTION("ssi0", jz4780_ssi0),
+ INGENIC_PIN_FUNCTION("ssi1", jz4780_ssi1),
+ INGENIC_PIN_FUNCTION("mmc0", jz4780_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", jz4780_mmc1),
+ INGENIC_PIN_FUNCTION("mmc2", jz4780_mmc2),
+ INGENIC_PIN_FUNCTION("nemc", jz4780_nemc),
+ INGENIC_PIN_FUNCTION("nemc-cs1", jz4770_cs1),
+ INGENIC_PIN_FUNCTION("nemc-cs2", jz4770_cs2),
+ INGENIC_PIN_FUNCTION("nemc-cs3", jz4770_cs3),
+ INGENIC_PIN_FUNCTION("nemc-cs4", jz4770_cs4),
+ INGENIC_PIN_FUNCTION("nemc-cs5", jz4770_cs5),
+ INGENIC_PIN_FUNCTION("nemc-cs6", jz4770_cs6),
+ INGENIC_PIN_FUNCTION("i2c0", jz4770_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", jz4770_i2c1),
+ INGENIC_PIN_FUNCTION("i2c2", jz4770_i2c2),
+ INGENIC_PIN_FUNCTION("i2c3", jz4780_i2c3),
+ INGENIC_PIN_FUNCTION("i2c4", jz4780_i2c4),
+ INGENIC_PIN_FUNCTION("i2s", jz4780_i2s),
+ INGENIC_PIN_FUNCTION("dmic", jz4780_dmic),
+ INGENIC_PIN_FUNCTION("cim", jz4780_cim),
+ INGENIC_PIN_FUNCTION("lcd", jz4770_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", jz4770_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", jz4770_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", jz4770_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", jz4770_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", jz4770_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", jz4770_pwm5),
+ INGENIC_PIN_FUNCTION("pwm6", jz4770_pwm6),
+ INGENIC_PIN_FUNCTION("pwm7", jz4770_pwm7),
+ INGENIC_PIN_FUNCTION("hdmi-ddc", jz4780_hdmi_ddc),
};
static const struct ingenic_chip_info jz4780_chip_info = {
@@ -2185,29 +2190,29 @@ static const char *x1000_pwm4_groups[] = { "pwm4", };
static const char *x1000_mac_groups[] = { "mac", };
static const struct function_desc x1000_functions[] = {
- { "uart0", x1000_uart0_groups, ARRAY_SIZE(x1000_uart0_groups), },
- { "uart1", x1000_uart1_groups, ARRAY_SIZE(x1000_uart1_groups), },
- { "uart2", x1000_uart2_groups, ARRAY_SIZE(x1000_uart2_groups), },
- { "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
- { "ssi", x1000_ssi_groups, ARRAY_SIZE(x1000_ssi_groups), },
- { "mmc0", x1000_mmc0_groups, ARRAY_SIZE(x1000_mmc0_groups), },
- { "mmc1", x1000_mmc1_groups, ARRAY_SIZE(x1000_mmc1_groups), },
- { "emc", x1000_emc_groups, ARRAY_SIZE(x1000_emc_groups), },
- { "emc-cs1", x1000_cs1_groups, ARRAY_SIZE(x1000_cs1_groups), },
- { "emc-cs2", x1000_cs2_groups, ARRAY_SIZE(x1000_cs2_groups), },
- { "i2c0", x1000_i2c0_groups, ARRAY_SIZE(x1000_i2c0_groups), },
- { "i2c1", x1000_i2c1_groups, ARRAY_SIZE(x1000_i2c1_groups), },
- { "i2c2", x1000_i2c2_groups, ARRAY_SIZE(x1000_i2c2_groups), },
- { "i2s", x1000_i2s_groups, ARRAY_SIZE(x1000_i2s_groups), },
- { "dmic", x1000_dmic_groups, ARRAY_SIZE(x1000_dmic_groups), },
- { "cim", x1000_cim_groups, ARRAY_SIZE(x1000_cim_groups), },
- { "lcd", x1000_lcd_groups, ARRAY_SIZE(x1000_lcd_groups), },
- { "pwm0", x1000_pwm0_groups, ARRAY_SIZE(x1000_pwm0_groups), },
- { "pwm1", x1000_pwm1_groups, ARRAY_SIZE(x1000_pwm1_groups), },
- { "pwm2", x1000_pwm2_groups, ARRAY_SIZE(x1000_pwm2_groups), },
- { "pwm3", x1000_pwm3_groups, ARRAY_SIZE(x1000_pwm3_groups), },
- { "pwm4", x1000_pwm4_groups, ARRAY_SIZE(x1000_pwm4_groups), },
- { "mac", x1000_mac_groups, ARRAY_SIZE(x1000_mac_groups), },
+ INGENIC_PIN_FUNCTION("uart0", x1000_uart0),
+ INGENIC_PIN_FUNCTION("uart1", x1000_uart1),
+ INGENIC_PIN_FUNCTION("uart2", x1000_uart2),
+ INGENIC_PIN_FUNCTION("sfc", x1000_sfc),
+ INGENIC_PIN_FUNCTION("ssi", x1000_ssi),
+ INGENIC_PIN_FUNCTION("mmc0", x1000_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", x1000_mmc1),
+ INGENIC_PIN_FUNCTION("emc", x1000_emc),
+ INGENIC_PIN_FUNCTION("emc-cs1", x1000_cs1),
+ INGENIC_PIN_FUNCTION("emc-cs2", x1000_cs2),
+ INGENIC_PIN_FUNCTION("i2c0", x1000_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", x1000_i2c1),
+ INGENIC_PIN_FUNCTION("i2c2", x1000_i2c2),
+ INGENIC_PIN_FUNCTION("i2s", x1000_i2s),
+ INGENIC_PIN_FUNCTION("dmic", x1000_dmic),
+ INGENIC_PIN_FUNCTION("cim", x1000_cim),
+ INGENIC_PIN_FUNCTION("lcd", x1000_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", x1000_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", x1000_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", x1000_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", x1000_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", x1000_pwm4),
+ INGENIC_PIN_FUNCTION("mac", x1000_mac),
};
static const struct regmap_range x1000_access_ranges[] = {
@@ -2315,22 +2320,22 @@ static const char *x1500_pwm3_groups[] = { "pwm3", };
static const char *x1500_pwm4_groups[] = { "pwm4", };
static const struct function_desc x1500_functions[] = {
- { "uart0", x1500_uart0_groups, ARRAY_SIZE(x1500_uart0_groups), },
- { "uart1", x1500_uart1_groups, ARRAY_SIZE(x1500_uart1_groups), },
- { "uart2", x1500_uart2_groups, ARRAY_SIZE(x1500_uart2_groups), },
- { "sfc", x1000_sfc_groups, ARRAY_SIZE(x1000_sfc_groups), },
- { "mmc", x1500_mmc_groups, ARRAY_SIZE(x1500_mmc_groups), },
- { "i2c0", x1500_i2c0_groups, ARRAY_SIZE(x1500_i2c0_groups), },
- { "i2c1", x1500_i2c1_groups, ARRAY_SIZE(x1500_i2c1_groups), },
- { "i2c2", x1500_i2c2_groups, ARRAY_SIZE(x1500_i2c2_groups), },
- { "i2s", x1500_i2s_groups, ARRAY_SIZE(x1500_i2s_groups), },
- { "dmic", x1500_dmic_groups, ARRAY_SIZE(x1500_dmic_groups), },
- { "cim", x1500_cim_groups, ARRAY_SIZE(x1500_cim_groups), },
- { "pwm0", x1500_pwm0_groups, ARRAY_SIZE(x1500_pwm0_groups), },
- { "pwm1", x1500_pwm1_groups, ARRAY_SIZE(x1500_pwm1_groups), },
- { "pwm2", x1500_pwm2_groups, ARRAY_SIZE(x1500_pwm2_groups), },
- { "pwm3", x1500_pwm3_groups, ARRAY_SIZE(x1500_pwm3_groups), },
- { "pwm4", x1500_pwm4_groups, ARRAY_SIZE(x1500_pwm4_groups), },
+ INGENIC_PIN_FUNCTION("uart0", x1500_uart0),
+ INGENIC_PIN_FUNCTION("uart1", x1500_uart1),
+ INGENIC_PIN_FUNCTION("uart2", x1500_uart2),
+ INGENIC_PIN_FUNCTION("sfc", x1000_sfc),
+ INGENIC_PIN_FUNCTION("mmc", x1500_mmc),
+ INGENIC_PIN_FUNCTION("i2c0", x1500_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", x1500_i2c1),
+ INGENIC_PIN_FUNCTION("i2c2", x1500_i2c2),
+ INGENIC_PIN_FUNCTION("i2s", x1500_i2s),
+ INGENIC_PIN_FUNCTION("dmic", x1500_dmic),
+ INGENIC_PIN_FUNCTION("cim", x1500_cim),
+ INGENIC_PIN_FUNCTION("pwm0", x1500_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", x1500_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", x1500_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", x1500_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", x1500_pwm4),
};
static const struct ingenic_chip_info x1500_chip_info = {
@@ -2526,28 +2531,28 @@ static const char *x1830_pwm7_groups[] = { "pwm7-c-18", "pwm7-c-28", };
static const char *x1830_mac_groups[] = { "mac", };
static const struct function_desc x1830_functions[] = {
- { "uart0", x1830_uart0_groups, ARRAY_SIZE(x1830_uart0_groups), },
- { "uart1", x1830_uart1_groups, ARRAY_SIZE(x1830_uart1_groups), },
- { "sfc", x1830_sfc_groups, ARRAY_SIZE(x1830_sfc_groups), },
- { "ssi0", x1830_ssi0_groups, ARRAY_SIZE(x1830_ssi0_groups), },
- { "ssi1", x1830_ssi1_groups, ARRAY_SIZE(x1830_ssi1_groups), },
- { "mmc0", x1830_mmc0_groups, ARRAY_SIZE(x1830_mmc0_groups), },
- { "mmc1", x1830_mmc1_groups, ARRAY_SIZE(x1830_mmc1_groups), },
- { "i2c0", x1830_i2c0_groups, ARRAY_SIZE(x1830_i2c0_groups), },
- { "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
- { "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
- { "i2s", x1830_i2s_groups, ARRAY_SIZE(x1830_i2s_groups), },
- { "dmic", x1830_dmic_groups, ARRAY_SIZE(x1830_dmic_groups), },
- { "lcd", x1830_lcd_groups, ARRAY_SIZE(x1830_lcd_groups), },
- { "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
- { "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
- { "pwm2", x1830_pwm2_groups, ARRAY_SIZE(x1830_pwm2_groups), },
- { "pwm3", x1830_pwm3_groups, ARRAY_SIZE(x1830_pwm3_groups), },
- { "pwm4", x1830_pwm4_groups, ARRAY_SIZE(x1830_pwm4_groups), },
- { "pwm5", x1830_pwm5_groups, ARRAY_SIZE(x1830_pwm4_groups), },
- { "pwm6", x1830_pwm6_groups, ARRAY_SIZE(x1830_pwm4_groups), },
- { "pwm7", x1830_pwm7_groups, ARRAY_SIZE(x1830_pwm4_groups), },
- { "mac", x1830_mac_groups, ARRAY_SIZE(x1830_mac_groups), },
+ INGENIC_PIN_FUNCTION("uart0", x1830_uart0),
+ INGENIC_PIN_FUNCTION("uart1", x1830_uart1),
+ INGENIC_PIN_FUNCTION("sfc", x1830_sfc),
+ INGENIC_PIN_FUNCTION("ssi0", x1830_ssi0),
+ INGENIC_PIN_FUNCTION("ssi1", x1830_ssi1),
+ INGENIC_PIN_FUNCTION("mmc0", x1830_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", x1830_mmc1),
+ INGENIC_PIN_FUNCTION("i2c0", x1830_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", x1830_i2c1),
+ INGENIC_PIN_FUNCTION("i2c2", x1830_i2c2),
+ INGENIC_PIN_FUNCTION("i2s", x1830_i2s),
+ INGENIC_PIN_FUNCTION("dmic", x1830_dmic),
+ INGENIC_PIN_FUNCTION("lcd", x1830_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", x1830_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", x1830_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", x1830_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", x1830_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", x1830_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", x1830_pwm5),
+ INGENIC_PIN_FUNCTION("pwm6", x1830_pwm6),
+ INGENIC_PIN_FUNCTION("pwm7", x1830_pwm7),
+ INGENIC_PIN_FUNCTION("mac", x1830_mac),
};
static const struct regmap_range x1830_access_ranges[] = {
@@ -2972,56 +2977,56 @@ static const char *x2000_mac1_groups[] = { "mac1-rmii", "mac1-rgmii", };
static const char *x2000_otg_groups[] = { "otg-vbus", };
static const struct function_desc x2000_functions[] = {
- { "uart0", x2000_uart0_groups, ARRAY_SIZE(x2000_uart0_groups), },
- { "uart1", x2000_uart1_groups, ARRAY_SIZE(x2000_uart1_groups), },
- { "uart2", x2000_uart2_groups, ARRAY_SIZE(x2000_uart2_groups), },
- { "uart3", x2000_uart3_groups, ARRAY_SIZE(x2000_uart3_groups), },
- { "uart4", x2000_uart4_groups, ARRAY_SIZE(x2000_uart4_groups), },
- { "uart5", x2000_uart5_groups, ARRAY_SIZE(x2000_uart5_groups), },
- { "uart6", x2000_uart6_groups, ARRAY_SIZE(x2000_uart6_groups), },
- { "uart7", x2000_uart7_groups, ARRAY_SIZE(x2000_uart7_groups), },
- { "uart8", x2000_uart8_groups, ARRAY_SIZE(x2000_uart8_groups), },
- { "uart9", x2000_uart9_groups, ARRAY_SIZE(x2000_uart9_groups), },
- { "sfc", x2000_sfc_groups, ARRAY_SIZE(x2000_sfc_groups), },
- { "ssi0", x2000_ssi0_groups, ARRAY_SIZE(x2000_ssi0_groups), },
- { "ssi1", x2000_ssi1_groups, ARRAY_SIZE(x2000_ssi1_groups), },
- { "mmc0", x2000_mmc0_groups, ARRAY_SIZE(x2000_mmc0_groups), },
- { "mmc1", x2000_mmc1_groups, ARRAY_SIZE(x2000_mmc1_groups), },
- { "mmc2", x2000_mmc2_groups, ARRAY_SIZE(x2000_mmc2_groups), },
- { "emc", x2000_emc_groups, ARRAY_SIZE(x2000_emc_groups), },
- { "emc-cs1", x2000_cs1_groups, ARRAY_SIZE(x2000_cs1_groups), },
- { "emc-cs2", x2000_cs2_groups, ARRAY_SIZE(x2000_cs2_groups), },
- { "i2c0", x2000_i2c0_groups, ARRAY_SIZE(x2000_i2c0_groups), },
- { "i2c1", x2000_i2c1_groups, ARRAY_SIZE(x2000_i2c1_groups), },
- { "i2c2", x2000_i2c2_groups, ARRAY_SIZE(x2000_i2c2_groups), },
- { "i2c3", x2000_i2c3_groups, ARRAY_SIZE(x2000_i2c3_groups), },
- { "i2c4", x2000_i2c4_groups, ARRAY_SIZE(x2000_i2c4_groups), },
- { "i2c5", x2000_i2c5_groups, ARRAY_SIZE(x2000_i2c5_groups), },
- { "i2s1", x2000_i2s1_groups, ARRAY_SIZE(x2000_i2s1_groups), },
- { "i2s2", x2000_i2s2_groups, ARRAY_SIZE(x2000_i2s2_groups), },
- { "i2s3", x2000_i2s3_groups, ARRAY_SIZE(x2000_i2s3_groups), },
- { "dmic", x2000_dmic_groups, ARRAY_SIZE(x2000_dmic_groups), },
- { "cim", x2000_cim_groups, ARRAY_SIZE(x2000_cim_groups), },
- { "lcd", x2000_lcd_groups, ARRAY_SIZE(x2000_lcd_groups), },
- { "pwm0", x2000_pwm0_groups, ARRAY_SIZE(x2000_pwm0_groups), },
- { "pwm1", x2000_pwm1_groups, ARRAY_SIZE(x2000_pwm1_groups), },
- { "pwm2", x2000_pwm2_groups, ARRAY_SIZE(x2000_pwm2_groups), },
- { "pwm3", x2000_pwm3_groups, ARRAY_SIZE(x2000_pwm3_groups), },
- { "pwm4", x2000_pwm4_groups, ARRAY_SIZE(x2000_pwm4_groups), },
- { "pwm5", x2000_pwm5_groups, ARRAY_SIZE(x2000_pwm5_groups), },
- { "pwm6", x2000_pwm6_groups, ARRAY_SIZE(x2000_pwm6_groups), },
- { "pwm7", x2000_pwm7_groups, ARRAY_SIZE(x2000_pwm7_groups), },
- { "pwm8", x2000_pwm8_groups, ARRAY_SIZE(x2000_pwm8_groups), },
- { "pwm9", x2000_pwm9_groups, ARRAY_SIZE(x2000_pwm9_groups), },
- { "pwm10", x2000_pwm10_groups, ARRAY_SIZE(x2000_pwm10_groups), },
- { "pwm11", x2000_pwm11_groups, ARRAY_SIZE(x2000_pwm11_groups), },
- { "pwm12", x2000_pwm12_groups, ARRAY_SIZE(x2000_pwm12_groups), },
- { "pwm13", x2000_pwm13_groups, ARRAY_SIZE(x2000_pwm13_groups), },
- { "pwm14", x2000_pwm14_groups, ARRAY_SIZE(x2000_pwm14_groups), },
- { "pwm15", x2000_pwm15_groups, ARRAY_SIZE(x2000_pwm15_groups), },
- { "mac0", x2000_mac0_groups, ARRAY_SIZE(x2000_mac0_groups), },
- { "mac1", x2000_mac1_groups, ARRAY_SIZE(x2000_mac1_groups), },
- { "otg", x2000_otg_groups, ARRAY_SIZE(x2000_otg_groups), },
+ INGENIC_PIN_FUNCTION("uart0", x2000_uart0),
+ INGENIC_PIN_FUNCTION("uart1", x2000_uart1),
+ INGENIC_PIN_FUNCTION("uart2", x2000_uart2),
+ INGENIC_PIN_FUNCTION("uart3", x2000_uart3),
+ INGENIC_PIN_FUNCTION("uart4", x2000_uart4),
+ INGENIC_PIN_FUNCTION("uart5", x2000_uart5),
+ INGENIC_PIN_FUNCTION("uart6", x2000_uart6),
+ INGENIC_PIN_FUNCTION("uart7", x2000_uart7),
+ INGENIC_PIN_FUNCTION("uart8", x2000_uart8),
+ INGENIC_PIN_FUNCTION("uart9", x2000_uart9),
+ INGENIC_PIN_FUNCTION("sfc", x2000_sfc),
+ INGENIC_PIN_FUNCTION("ssi0", x2000_ssi0),
+ INGENIC_PIN_FUNCTION("ssi1", x2000_ssi1),
+ INGENIC_PIN_FUNCTION("mmc0", x2000_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", x2000_mmc1),
+ INGENIC_PIN_FUNCTION("mmc2", x2000_mmc2),
+ INGENIC_PIN_FUNCTION("emc", x2000_emc),
+ INGENIC_PIN_FUNCTION("emc-cs1", x2000_cs1),
+ INGENIC_PIN_FUNCTION("emc-cs2", x2000_cs2),
+ INGENIC_PIN_FUNCTION("i2c0", x2000_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", x2000_i2c1),
+ INGENIC_PIN_FUNCTION("i2c2", x2000_i2c2),
+ INGENIC_PIN_FUNCTION("i2c3", x2000_i2c3),
+ INGENIC_PIN_FUNCTION("i2c4", x2000_i2c4),
+ INGENIC_PIN_FUNCTION("i2c5", x2000_i2c5),
+ INGENIC_PIN_FUNCTION("i2s1", x2000_i2s1),
+ INGENIC_PIN_FUNCTION("i2s2", x2000_i2s2),
+ INGENIC_PIN_FUNCTION("i2s3", x2000_i2s3),
+ INGENIC_PIN_FUNCTION("dmic", x2000_dmic),
+ INGENIC_PIN_FUNCTION("cim", x2000_cim),
+ INGENIC_PIN_FUNCTION("lcd", x2000_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", x2000_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", x2000_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", x2000_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", x2000_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", x2000_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", x2000_pwm5),
+ INGENIC_PIN_FUNCTION("pwm6", x2000_pwm6),
+ INGENIC_PIN_FUNCTION("pwm7", x2000_pwm7),
+ INGENIC_PIN_FUNCTION("pwm8", x2000_pwm8),
+ INGENIC_PIN_FUNCTION("pwm9", x2000_pwm9),
+ INGENIC_PIN_FUNCTION("pwm10", x2000_pwm10),
+ INGENIC_PIN_FUNCTION("pwm11", x2000_pwm11),
+ INGENIC_PIN_FUNCTION("pwm12", x2000_pwm12),
+ INGENIC_PIN_FUNCTION("pwm13", x2000_pwm13),
+ INGENIC_PIN_FUNCTION("pwm14", x2000_pwm14),
+ INGENIC_PIN_FUNCTION("pwm15", x2000_pwm15),
+ INGENIC_PIN_FUNCTION("mac0", x2000_mac0),
+ INGENIC_PIN_FUNCTION("mac1", x2000_mac1),
+ INGENIC_PIN_FUNCTION("otg", x2000_otg),
};
static const struct regmap_range x2000_access_ranges[] = {
@@ -3196,54 +3201,54 @@ static const struct group_desc x2100_groups[] = {
static const char *x2100_mac_groups[] = { "mac", };
static const struct function_desc x2100_functions[] = {
- { "uart0", x2000_uart0_groups, ARRAY_SIZE(x2000_uart0_groups), },
- { "uart1", x2000_uart1_groups, ARRAY_SIZE(x2000_uart1_groups), },
- { "uart2", x2000_uart2_groups, ARRAY_SIZE(x2000_uart2_groups), },
- { "uart3", x2000_uart3_groups, ARRAY_SIZE(x2000_uart3_groups), },
- { "uart4", x2000_uart4_groups, ARRAY_SIZE(x2000_uart4_groups), },
- { "uart5", x2000_uart5_groups, ARRAY_SIZE(x2000_uart5_groups), },
- { "uart6", x2000_uart6_groups, ARRAY_SIZE(x2000_uart6_groups), },
- { "uart7", x2000_uart7_groups, ARRAY_SIZE(x2000_uart7_groups), },
- { "uart8", x2000_uart8_groups, ARRAY_SIZE(x2000_uart8_groups), },
- { "uart9", x2000_uart9_groups, ARRAY_SIZE(x2000_uart9_groups), },
- { "sfc", x2000_sfc_groups, ARRAY_SIZE(x2000_sfc_groups), },
- { "ssi0", x2000_ssi0_groups, ARRAY_SIZE(x2000_ssi0_groups), },
- { "ssi1", x2000_ssi1_groups, ARRAY_SIZE(x2000_ssi1_groups), },
- { "mmc0", x2000_mmc0_groups, ARRAY_SIZE(x2000_mmc0_groups), },
- { "mmc1", x2000_mmc1_groups, ARRAY_SIZE(x2000_mmc1_groups), },
- { "mmc2", x2000_mmc2_groups, ARRAY_SIZE(x2000_mmc2_groups), },
- { "emc", x2000_emc_groups, ARRAY_SIZE(x2000_emc_groups), },
- { "emc-cs1", x2000_cs1_groups, ARRAY_SIZE(x2000_cs1_groups), },
- { "emc-cs2", x2000_cs2_groups, ARRAY_SIZE(x2000_cs2_groups), },
- { "i2c0", x2000_i2c0_groups, ARRAY_SIZE(x2000_i2c0_groups), },
- { "i2c1", x2000_i2c1_groups, ARRAY_SIZE(x2000_i2c1_groups), },
- { "i2c2", x2000_i2c2_groups, ARRAY_SIZE(x2000_i2c2_groups), },
- { "i2c3", x2000_i2c3_groups, ARRAY_SIZE(x2000_i2c3_groups), },
- { "i2c4", x2000_i2c4_groups, ARRAY_SIZE(x2000_i2c4_groups), },
- { "i2c5", x2000_i2c5_groups, ARRAY_SIZE(x2000_i2c5_groups), },
- { "i2s1", x2000_i2s1_groups, ARRAY_SIZE(x2000_i2s1_groups), },
- { "i2s2", x2000_i2s2_groups, ARRAY_SIZE(x2000_i2s2_groups), },
- { "i2s3", x2000_i2s3_groups, ARRAY_SIZE(x2000_i2s3_groups), },
- { "dmic", x2000_dmic_groups, ARRAY_SIZE(x2000_dmic_groups), },
- { "cim", x2000_cim_groups, ARRAY_SIZE(x2000_cim_groups), },
- { "lcd", x2000_lcd_groups, ARRAY_SIZE(x2000_lcd_groups), },
- { "pwm0", x2000_pwm0_groups, ARRAY_SIZE(x2000_pwm0_groups), },
- { "pwm1", x2000_pwm1_groups, ARRAY_SIZE(x2000_pwm1_groups), },
- { "pwm2", x2000_pwm2_groups, ARRAY_SIZE(x2000_pwm2_groups), },
- { "pwm3", x2000_pwm3_groups, ARRAY_SIZE(x2000_pwm3_groups), },
- { "pwm4", x2000_pwm4_groups, ARRAY_SIZE(x2000_pwm4_groups), },
- { "pwm5", x2000_pwm5_groups, ARRAY_SIZE(x2000_pwm5_groups), },
- { "pwm6", x2000_pwm6_groups, ARRAY_SIZE(x2000_pwm6_groups), },
- { "pwm7", x2000_pwm7_groups, ARRAY_SIZE(x2000_pwm7_groups), },
- { "pwm8", x2000_pwm8_groups, ARRAY_SIZE(x2000_pwm8_groups), },
- { "pwm9", x2000_pwm9_groups, ARRAY_SIZE(x2000_pwm9_groups), },
- { "pwm10", x2000_pwm10_groups, ARRAY_SIZE(x2000_pwm10_groups), },
- { "pwm11", x2000_pwm11_groups, ARRAY_SIZE(x2000_pwm11_groups), },
- { "pwm12", x2000_pwm12_groups, ARRAY_SIZE(x2000_pwm12_groups), },
- { "pwm13", x2000_pwm13_groups, ARRAY_SIZE(x2000_pwm13_groups), },
- { "pwm14", x2000_pwm14_groups, ARRAY_SIZE(x2000_pwm14_groups), },
- { "pwm15", x2000_pwm15_groups, ARRAY_SIZE(x2000_pwm15_groups), },
- { "mac", x2100_mac_groups, ARRAY_SIZE(x2100_mac_groups), },
+ INGENIC_PIN_FUNCTION("uart0", x2000_uart0),
+ INGENIC_PIN_FUNCTION("uart1", x2000_uart1),
+ INGENIC_PIN_FUNCTION("uart2", x2000_uart2),
+ INGENIC_PIN_FUNCTION("uart3", x2000_uart3),
+ INGENIC_PIN_FUNCTION("uart4", x2000_uart4),
+ INGENIC_PIN_FUNCTION("uart5", x2000_uart5),
+ INGENIC_PIN_FUNCTION("uart6", x2000_uart6),
+ INGENIC_PIN_FUNCTION("uart7", x2000_uart7),
+ INGENIC_PIN_FUNCTION("uart8", x2000_uart8),
+ INGENIC_PIN_FUNCTION("uart9", x2000_uart9),
+ INGENIC_PIN_FUNCTION("sfc", x2000_sfc),
+ INGENIC_PIN_FUNCTION("ssi0", x2000_ssi0),
+ INGENIC_PIN_FUNCTION("ssi1", x2000_ssi1),
+ INGENIC_PIN_FUNCTION("mmc0", x2000_mmc0),
+ INGENIC_PIN_FUNCTION("mmc1", x2000_mmc1),
+ INGENIC_PIN_FUNCTION("mmc2", x2000_mmc2),
+ INGENIC_PIN_FUNCTION("emc", x2000_emc),
+ INGENIC_PIN_FUNCTION("emc-cs1", x2000_cs1),
+ INGENIC_PIN_FUNCTION("emc-cs2", x2000_cs2),
+ INGENIC_PIN_FUNCTION("i2c0", x2000_i2c0),
+ INGENIC_PIN_FUNCTION("i2c1", x2000_i2c1),
+ INGENIC_PIN_FUNCTION("i2c2", x2000_i2c2),
+ INGENIC_PIN_FUNCTION("i2c3", x2000_i2c3),
+ INGENIC_PIN_FUNCTION("i2c4", x2000_i2c4),
+ INGENIC_PIN_FUNCTION("i2c5", x2000_i2c5),
+ INGENIC_PIN_FUNCTION("i2s1", x2000_i2s1),
+ INGENIC_PIN_FUNCTION("i2s2", x2000_i2s2),
+ INGENIC_PIN_FUNCTION("i2s3", x2000_i2s3),
+ INGENIC_PIN_FUNCTION("dmic", x2000_dmic),
+ INGENIC_PIN_FUNCTION("cim", x2000_cim),
+ INGENIC_PIN_FUNCTION("lcd", x2000_lcd),
+ INGENIC_PIN_FUNCTION("pwm0", x2000_pwm0),
+ INGENIC_PIN_FUNCTION("pwm1", x2000_pwm1),
+ INGENIC_PIN_FUNCTION("pwm2", x2000_pwm2),
+ INGENIC_PIN_FUNCTION("pwm3", x2000_pwm3),
+ INGENIC_PIN_FUNCTION("pwm4", x2000_pwm4),
+ INGENIC_PIN_FUNCTION("pwm5", x2000_pwm5),
+ INGENIC_PIN_FUNCTION("pwm6", x2000_pwm6),
+ INGENIC_PIN_FUNCTION("pwm7", x2000_pwm7),
+ INGENIC_PIN_FUNCTION("pwm8", x2000_pwm8),
+ INGENIC_PIN_FUNCTION("pwm9", x2000_pwm9),
+ INGENIC_PIN_FUNCTION("pwm10", x2000_pwm10),
+ INGENIC_PIN_FUNCTION("pwm11", x2000_pwm11),
+ INGENIC_PIN_FUNCTION("pwm12", x2000_pwm12),
+ INGENIC_PIN_FUNCTION("pwm13", x2000_pwm13),
+ INGENIC_PIN_FUNCTION("pwm14", x2000_pwm14),
+ INGENIC_PIN_FUNCTION("pwm15", x2000_pwm15),
+ INGENIC_PIN_FUNCTION("mac", x2100_mac),
};
static const struct ingenic_chip_info x2100_chip_info = {
@@ -3762,7 +3767,7 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->name, grp->grp.name);
+ func->func.name, grp->grp.name);
mode = (uintptr_t)grp->data;
if (mode <= 3) {
@@ -4310,14 +4315,14 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
}
for (i = 0; i < chip_info->num_functions; i++) {
- const struct function_desc *func = &chip_info->functions[i];
+ const struct function_desc *function = &chip_info->functions[i];
+ const struct pinfunction *func = &function->func;
err = pinmux_generic_add_function(jzpc->pctl, func->name,
- func->group_names, func->num_group_names,
- func->data);
+ func->groups, func->ngroups,
+ function->data);
if (err < 0) {
- dev_err(dev, "Failed to register function %s\n",
- func->name);
+ dev_err(dev, "Failed to register function %s\n", func->name);
return err;
}
}
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index b6d1ed9ec9a3..2753e14c3e38 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -849,7 +849,6 @@ static int k210_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
unsigned int *num_maps)
{
unsigned int reserved_maps;
- struct device_node *np;
int ret;
reserved_maps = 0;
@@ -861,13 +860,11 @@ static int k210_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
goto err;
- for_each_available_child_of_node(np_config, np) {
+ for_each_available_child_of_node_scoped(np_config, np) {
ret = k210_pinctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
- if (ret < 0) {
- of_node_put(np);
+ if (ret < 0)
goto err;
- }
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
index b1349ee22799..b693f4787044 100644
--- a/drivers/pinctrl/pinctrl-keembay.c
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -1566,7 +1566,7 @@ static int keembay_add_functions(struct keembay_pinctrl *kpc,
unsigned int grp_idx = 0;
int j;
- group_names = devm_kcalloc(kpc->dev, func->num_group_names,
+ group_names = devm_kcalloc(kpc->dev, func->func.ngroups,
sizeof(*group_names), GFP_KERNEL);
if (!group_names)
return -ENOMEM;
@@ -1576,20 +1576,20 @@ static int keembay_add_functions(struct keembay_pinctrl *kpc,
struct keembay_mux_desc *mux;
for (mux = pdesc->drv_data; mux->name; mux++) {
- if (!strcmp(mux->name, func->name))
+ if (!strcmp(mux->name, func->func.name))
group_names[grp_idx++] = pdesc->name;
}
}
- func->group_names = group_names;
+ func->func.groups = group_names;
}
/* Add all functions */
for (i = 0; i < kpc->nfuncs; i++) {
pinmux_generic_add_function(kpc->pctrl,
- functions[i].name,
- functions[i].group_names,
- functions[i].num_group_names,
+ functions[i].func.name,
+ functions[i].func.groups,
+ functions[i].func.ngroups,
functions[i].data);
}
@@ -1619,17 +1619,17 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
struct function_desc *fdesc;
/* Check if we already have function for this mux */
- for (fdesc = keembay_funcs; fdesc->name; fdesc++) {
- if (!strcmp(mux->name, fdesc->name)) {
- fdesc->num_group_names++;
+ for (fdesc = keembay_funcs; fdesc->func.name; fdesc++) {
+ if (!strcmp(mux->name, fdesc->func.name)) {
+ fdesc->func.ngroups++;
break;
}
}
/* Setup new function for this mux we didn't see before */
- if (!fdesc->name) {
- fdesc->name = mux->name;
- fdesc->num_group_names = 1;
+ if (!fdesc->func.name) {
+ fdesc->func.name = mux->name;
+ fdesc->func.ngroups = 1;
fdesc->data = &mux->mode;
kpc->nfuncs++;
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 38c3a14c8b58..737d0ae3d0b6 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -696,4 +696,5 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
}
EXPORT_SYMBOL_GPL(mcp23s08_probe_one);
+MODULE_DESCRIPTION("MCP23S08 SPI/I2C GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_i2c.c b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
index 04e8e7d079f0..94e1add6ddd7 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
@@ -111,4 +111,5 @@ static void mcp23s08_i2c_exit(void)
}
module_exit(mcp23s08_i2c_exit);
+MODULE_DESCRIPTION("MCP23S08 I2C GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
index 4a872fff5fe8..54f61c8cb1c0 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
@@ -263,4 +263,5 @@ static void mcp23s08_spi_exit(void)
}
module_exit(mcp23s08_spi_exit);
+MODULE_DESCRIPTION("MCP23S08 SPI GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mlxbf3.c b/drivers/pinctrl/pinctrl-mlxbf3.c
index 7d1713824a89..ffb5dda364dc 100644
--- a/drivers/pinctrl/pinctrl-mlxbf3.c
+++ b/drivers/pinctrl/pinctrl-mlxbf3.c
@@ -259,16 +259,16 @@ static int mlxbf3_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(priv->fw_ctrl_set0);
priv->fw_ctrl_clr0 = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(priv->fw_ctrl_set0))
- return PTR_ERR(priv->fw_ctrl_set0);
+ if (IS_ERR(priv->fw_ctrl_clr0))
+ return PTR_ERR(priv->fw_ctrl_clr0);
priv->fw_ctrl_set1 = devm_platform_ioremap_resource(pdev, 2);
- if (IS_ERR(priv->fw_ctrl_set0))
- return PTR_ERR(priv->fw_ctrl_set0);
+ if (IS_ERR(priv->fw_ctrl_set1))
+ return PTR_ERR(priv->fw_ctrl_set1);
priv->fw_ctrl_clr1 = devm_platform_ioremap_resource(pdev, 3);
- if (IS_ERR(priv->fw_ctrl_set0))
- return PTR_ERR(priv->fw_ctrl_set0);
+ if (IS_ERR(priv->fw_ctrl_clr1))
+ return PTR_ERR(priv->fw_ctrl_clr1);
ret = devm_pinctrl_register_and_init(dev,
&mlxbf3_pin_desc,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 3f56991f5b89..0eacaf10c640 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -915,9 +915,8 @@ static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
RK_MUXROUTE_SAME(0, RK_PC3, 1, 0x314, BIT(16 + 0) | BIT(0)), /* rtc_clk */
RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x314, BIT(16 + 2) | BIT(16 + 3)), /* uart2_rxm0 */
RK_MUXROUTE_SAME(4, RK_PD2, 2, 0x314, BIT(16 + 2) | BIT(16 + 3) | BIT(2)), /* uart2_rxm1 */
- RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x608, BIT(16 + 8) | BIT(16 + 9)), /* i2c3_sdam0 */
- RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(8)), /* i2c3_sdam1 */
- RK_MUXROUTE_SAME(2, RK_PA0, 3, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(9)), /* i2c3_sdam2 */
+ RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x314, BIT(16 + 4)), /* i2c3_sdam0 */
+ RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x314, BIT(16 + 4) | BIT(4)), /* i2c3_sdam1 */
RK_MUXROUTE_SAME(1, RK_PA3, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclktxm0 */
RK_MUXROUTE_SAME(1, RK_PA4, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclkrxm0 */
RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclktxm1 */
@@ -926,18 +925,6 @@ static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
RK_MUXROUTE_SAME(1, RK_PB6, 4, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* pdm-clkm1 */
RK_MUXROUTE_SAME(2, RK_PA6, 2, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* pdm-clkm2 */
RK_MUXROUTE_SAME(2, RK_PA4, 3, 0x600, BIT(16 + 2) | BIT(2)), /* pdm-clkm-m2 */
- RK_MUXROUTE_SAME(3, RK_PB2, 3, 0x314, BIT(16 + 9)), /* spi1_miso */
- RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x314, BIT(16 + 9) | BIT(9)), /* spi1_miso_m1 */
- RK_MUXROUTE_SAME(0, RK_PB3, 3, 0x314, BIT(16 + 10) | BIT(16 + 11)), /* owire_m0 */
- RK_MUXROUTE_SAME(1, RK_PC6, 7, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* owire_m1 */
- RK_MUXROUTE_SAME(2, RK_PA2, 5, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* owire_m2 */
- RK_MUXROUTE_SAME(0, RK_PB3, 2, 0x314, BIT(16 + 12) | BIT(16 + 13)), /* can_rxd_m0 */
- RK_MUXROUTE_SAME(1, RK_PC6, 5, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* can_rxd_m1 */
- RK_MUXROUTE_SAME(2, RK_PA2, 4, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* can_rxd_m2 */
- RK_MUXROUTE_SAME(1, RK_PC4, 3, 0x314, BIT(16 + 14)), /* mac_rxd0_m0 */
- RK_MUXROUTE_SAME(4, RK_PA2, 2, 0x314, BIT(16 + 14) | BIT(14)), /* mac_rxd0_m1 */
- RK_MUXROUTE_SAME(3, RK_PB4, 4, 0x314, BIT(16 + 15)), /* uart3_rx */
- RK_MUXROUTE_SAME(0, RK_PC1, 3, 0x314, BIT(16 + 15) | BIT(15)), /* uart3_rx_m1 */
};
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
@@ -3107,7 +3094,6 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
u32 index)
{
struct device *dev = info->dev;
- struct device_node *child;
struct rockchip_pmx_func *func;
struct rockchip_pin_group *grp;
int ret;
@@ -3128,14 +3114,12 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
if (!func->groups)
return -ENOMEM;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
func->groups[i] = child->name;
grp = &info->groups[grp_index++];
ret = rockchip_pinctrl_parse_groups(child, grp, info, i++);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
return 0;
@@ -3146,7 +3130,6 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *child;
int ret;
int i;
@@ -3165,14 +3148,13 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
i = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (of_match_node(rockchip_bank_match, child))
continue;
ret = rockchip_pinctrl_parse_functions(child, info, i++);
if (ret) {
dev_err(dev, "failed to parse function\n");
- of_node_put(child);
return ret;
}
}
diff --git a/drivers/pinctrl/pinctrl-scmi.c b/drivers/pinctrl/pinctrl-scmi.c
index 036bc1e3fc6c..df4bbcd7d1d5 100644
--- a/drivers/pinctrl/pinctrl-scmi.c
+++ b/drivers/pinctrl/pinctrl-scmi.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/of.h>
#include <linux/scmi_protocol.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -504,6 +505,11 @@ static int pinctrl_scmi_get_pins(struct scmi_pinctrl *pmx,
return 0;
}
+static const char * const scmi_pinctrl_blocklist[] = {
+ "fsl,imx95",
+ NULL
+};
+
static int scmi_pinctrl_probe(struct scmi_device *sdev)
{
int ret;
@@ -515,6 +521,9 @@ static int scmi_pinctrl_probe(struct scmi_device *sdev)
if (!sdev->handle)
return -EINVAL;
+ if (of_machine_compatible_match(scmi_pinctrl_blocklist))
+ return -ENODEV;
+
handle = sdev->handle;
pinctrl_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PINCTRL, &ph);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index a798f31d6954..4c6bfabb6bd7 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1329,7 +1329,6 @@ static void pcs_irq_free(struct pcs_device *pcs)
static void pcs_free_resources(struct pcs_device *pcs)
{
pcs_irq_free(pcs);
- pinctrl_unregister(pcs->pctl);
#if IS_BUILTIN(CONFIG_PINCTRL_SINGLE)
if (pcs->missing_nr_pinctrl_cells)
@@ -1879,7 +1878,7 @@ static int pcs_probe(struct platform_device *pdev)
if (ret < 0)
goto free;
- ret = pinctrl_register_and_init(&pcs->desc, pcs->dev, pcs, &pcs->pctl);
+ ret = devm_pinctrl_register_and_init(pcs->dev, &pcs->desc, pcs, &pcs->pctl);
if (ret) {
dev_err(pcs->dev, "could not register single pinctrl driver\n");
goto free;
@@ -1912,8 +1911,10 @@ static int pcs_probe(struct platform_device *pdev)
dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size);
- return pinctrl_enable(pcs->pctl);
+ if (pinctrl_enable(pcs->pctl))
+ goto free;
+ return 0;
free:
pcs_free_resources(pcs);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 5d9abd6547d0..fe2d52e434db 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1195,10 +1195,10 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
struct property *pp;
struct device *dev = info->dev;
struct st_pinconf *conf;
- struct device_node *pins;
+ struct device_node *pins __free(device_node) = NULL;
phandle bank;
unsigned int offset;
- int i = 0, npins = 0, nr_props, ret = 0;
+ int i = 0, npins = 0, nr_props;
pins = of_get_child_by_name(np, "st,pins");
if (!pins)
@@ -1213,8 +1213,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
npins++;
} else {
pr_warn("Invalid st,pins in %pOFn node\n", np);
- ret = -EINVAL;
- goto out_put_node;
+ return -EINVAL;
}
}
@@ -1223,10 +1222,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
grp->pins = devm_kcalloc(dev, npins, sizeof(*grp->pins), GFP_KERNEL);
grp->pin_conf = devm_kcalloc(dev, npins, sizeof(*grp->pin_conf), GFP_KERNEL);
- if (!grp->pins || !grp->pin_conf) {
- ret = -ENOMEM;
- goto out_put_node;
- }
+ if (!grp->pins || !grp->pin_conf)
+ return -ENOMEM;
/* <bank offset mux direction rt_type rt_delay rt_clk> */
for_each_property_of_node(pins, pp) {
@@ -1260,17 +1257,13 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
i++;
}
-out_put_node:
- of_node_put(pins);
-
- return ret;
+ return 0;
}
static int st_pctl_parse_functions(struct device_node *np,
struct st_pinctrl *info, u32 index, int *grp_index)
{
struct device *dev = info->dev;
- struct device_node *child;
struct st_pmx_func *func;
struct st_pctl_group *grp;
int ret, i;
@@ -1285,15 +1278,13 @@ static int st_pctl_parse_functions(struct device_node *np,
return -ENOMEM;
i = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
func->groups[i] = child->name;
grp = &info->groups[*grp_index];
*grp_index += 1;
ret = st_pctl_dt_parse_groups(child, grp, info, i++);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
}
dev_info(dev, "Function[%d\t name:%s,\tgroups:%d]\n", index, func->name, func->ngroups);
@@ -1601,7 +1592,6 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
int i = 0, j = 0, k = 0, bank;
struct pinctrl_pin_desc *pdesc;
struct device_node *np = dev->of_node;
- struct device_node *child;
int grp_index = 0;
int irq = 0;
@@ -1646,25 +1636,21 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
pctl_desc->pins = pdesc;
bank = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (of_property_read_bool(child, "gpio-controller")) {
const char *bank_name = NULL;
char **pin_names;
ret = st_gpiolib_register_bank(info, bank, child);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
k = info->banks[bank].range.pin_base;
bank_name = info->banks[bank].range.name;
pin_names = devm_kasprintf_strarray(dev, bank_name, ST_GPIO_PINS_PER_BANK);
- if (IS_ERR(pin_names)) {
- of_node_put(child);
+ if (IS_ERR(pin_names))
return PTR_ERR(pin_names);
- }
for (j = 0; j < ST_GPIO_PINS_PER_BANK; j++, k++) {
pdesc->number = k;
@@ -1678,7 +1664,6 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
i++, &grp_index);
if (ret) {
dev_err(dev, "No functions found.\n");
- of_node_put(child);
return ret;
}
}
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index c3b76e6511ac..4f98f72565f4 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -830,4 +830,5 @@ static struct platform_driver tb10x_pinctrl_pdrv = {
module_platform_driver(tb10x_pinctrl_pdrv);
MODULE_AUTHOR("Christian Ruppert <christian.ruppert@abilis.com>");
+MODULE_DESCRIPTION("Abilis Systems TB10x pinctrl driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c
index 5e7c7cf93445..54cc810f79d6 100644
--- a/drivers/pinctrl/pinctrl-tps6594.c
+++ b/drivers/pinctrl/pinctrl-tps6594.c
@@ -237,13 +237,13 @@ struct muxval_remap {
u8 remap;
};
-struct muxval_remap tps65224_muxval_remap[] = {
+static struct muxval_remap tps65224_muxval_remap[] = {
{5, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION, TPS65224_PINCTRL_WKUP_FUNCTION_GPIO5},
{5, TPS65224_PINCTRL_SYNCCLKIN_FUNCTION, TPS65224_PINCTRL_SYNCCLKIN_FUNCTION_GPIO5},
{5, TPS65224_PINCTRL_NSLEEP2_FUNCTION, TPS65224_PINCTRL_NSLEEP2_FUNCTION_GPIO5},
};
-struct muxval_remap tps6594_muxval_remap[] = {
+static struct muxval_remap tps6594_muxval_remap[] = {
{8, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION_GPIO8},
{8, TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION, TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION_GPIO8},
{9, TPS6594_PINCTRL_CLK32KOUT_FUNCTION, TPS6594_PINCTRL_CLK32KOUT_FUNCTION_GPIO9},
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index 5c46b7d7ebcb..3c6d56fdb8c9 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -10,6 +10,7 @@
#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <linux/bitmap.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -97,7 +98,7 @@ static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
{
struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->ngroups;
+ return pctrl->ngroups + zynqmp_desc.npins;
}
static const char *zynqmp_pctrl_get_group_name(struct pinctrl_dev *pctldev,
@@ -105,7 +106,10 @@ static const char *zynqmp_pctrl_get_group_name(struct pinctrl_dev *pctldev,
{
struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->groups[selector].name;
+ if (selector < pctrl->ngroups)
+ return pctrl->groups[selector].name;
+
+ return zynqmp_desc.pins[selector - pctrl->ngroups].name;
}
static int zynqmp_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
@@ -115,8 +119,13 @@ static int zynqmp_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
{
struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *pins = pctrl->groups[selector].pins;
- *npins = pctrl->groups[selector].npins;
+ if (selector < pctrl->ngroups) {
+ *pins = pctrl->groups[selector].pins;
+ *npins = pctrl->groups[selector].npins;
+ } else {
+ *pins = &zynqmp_desc.pins[selector - pctrl->ngroups].number;
+ *npins = 1;
+ }
return 0;
}
@@ -197,17 +206,16 @@ static int zynqmp_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int function,
unsigned int group)
{
- struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[group];
+ const unsigned int *pins;
+ unsigned int npins;
int ret, i;
- for (i = 0; i < pgrp->npins; i++) {
- unsigned int pin = pgrp->pins[i];
-
- ret = zynqmp_pm_pinctrl_set_function(pin, function);
+ zynqmp_pctrl_get_group_pins(pctldev, group, &pins, &npins);
+ for (i = 0; i < npins; i++) {
+ ret = zynqmp_pm_pinctrl_set_function(pins[i], function);
if (ret) {
dev_err(pctldev->dev, "set mux failed for pin %u\n",
- pin);
+ pins[i]);
return ret;
}
}
@@ -467,12 +475,13 @@ static int zynqmp_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned long *configs,
unsigned int num_configs)
{
+ const unsigned int *pins;
+ unsigned int npins;
int i, ret;
- struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[selector];
- for (i = 0; i < pgrp->npins; i++) {
- ret = zynqmp_pinconf_cfg_set(pctldev, pgrp->pins[i], configs,
+ zynqmp_pctrl_get_group_pins(pctldev, selector, &pins, &npins);
+ for (i = 0; i < npins; i++) {
+ ret = zynqmp_pinconf_cfg_set(pctldev, pins[i], configs,
num_configs);
if (ret)
return ret;
@@ -560,10 +569,12 @@ static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
{
u16 resp[NUM_GROUPS_PER_RESP] = {0};
const char **fgroups;
- int ret, index, i;
+ int ret, index, i, pin;
+ unsigned int npins;
+ unsigned long *used_pins __free(bitmap) =
+ bitmap_zalloc(zynqmp_desc.npins, GFP_KERNEL);
- fgroups = devm_kcalloc(dev, func->ngroups, sizeof(*fgroups), GFP_KERNEL);
- if (!fgroups)
+ if (!used_pins)
return -ENOMEM;
for (index = 0; index < func->ngroups; index += NUM_GROUPS_PER_RESP) {
@@ -578,23 +589,37 @@ static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
if (resp[i] == RESERVED_GROUP)
continue;
- fgroups[index + i] = devm_kasprintf(dev, GFP_KERNEL,
- "%s_%d_grp",
- func->name,
- index + i);
- if (!fgroups[index + i])
- return -ENOMEM;
-
groups[resp[i]].name = devm_kasprintf(dev, GFP_KERNEL,
"%s_%d_grp",
func->name,
index + i);
if (!groups[resp[i]].name)
return -ENOMEM;
+
+ for (pin = 0; pin < groups[resp[i]].npins; pin++)
+ __set_bit(groups[resp[i]].pins[pin], used_pins);
}
}
done:
+ npins = bitmap_weight(used_pins, zynqmp_desc.npins);
+ fgroups = devm_kcalloc(dev, size_add(func->ngroups, npins),
+ sizeof(*fgroups), GFP_KERNEL);
+ if (!fgroups)
+ return -ENOMEM;
+
+ for (i = 0; i < func->ngroups; i++) {
+ fgroups[i] = devm_kasprintf(dev, GFP_KERNEL, "%s_%d_grp",
+ func->name, i);
+ if (!fgroups[i])
+ return -ENOMEM;
+ }
+
+ pin = 0;
+ for_each_set_bit(pin, used_pins, zynqmp_desc.npins)
+ fgroups[i++] = zynqmp_desc.pins[pin].name;
+
func->groups = fgroups;
+ func->ngroups += npins;
return 0;
}
@@ -718,7 +743,7 @@ static int zynqmp_pinctrl_prepare_group_pins(struct device *dev,
int ret;
for (pin = 0; pin < zynqmp_desc.npins; pin++) {
- ret = zynqmp_pinctrl_create_pin_groups(dev, groups, pin);
+ ret = zynqmp_pinctrl_create_pin_groups(dev, groups, zynqmp_desc.pins[pin].number);
if (ret)
return ret;
}
@@ -772,6 +797,10 @@ static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
if (!groups)
return -ENOMEM;
+ ret = zynqmp_pinctrl_prepare_group_pins(dev, groups, pctrl->ngroups);
+ if (ret)
+ return ret;
+
for (i = 0; i < pctrl->nfuncs; i++) {
ret = zynqmp_pinctrl_prepare_func_groups(dev, i, &funcs[i],
groups);
@@ -779,10 +808,6 @@ static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
return ret;
}
- ret = zynqmp_pinctrl_prepare_group_pins(dev, groups, pctrl->ngroups);
- if (ret)
- return ret;
-
pctrl->funcs = funcs;
pctrl->groups = groups;
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index addba55334d9..aae71a37219b 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -796,7 +796,7 @@ pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
if (!function)
return NULL;
- return function->name;
+ return function->func.name;
}
EXPORT_SYMBOL_GPL(pinmux_generic_get_function_name);
@@ -805,12 +805,12 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function_name);
* @pctldev: pin controller device
* @selector: function number
* @groups: array of pin groups
- * @num_groups: number of pin groups
+ * @ngroups: number of pin groups
*/
int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
const char * const **groups,
- unsigned int * const num_groups)
+ unsigned int * const ngroups)
{
struct function_desc *function;
@@ -821,8 +821,8 @@ int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
__func__, selector);
return -EINVAL;
}
- *groups = function->group_names;
- *num_groups = function->num_group_names;
+ *groups = function->func.groups;
+ *ngroups = function->func.ngroups;
return 0;
}
@@ -852,13 +852,13 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function);
* @pctldev: pin controller device
* @name: name of the function
* @groups: array of pin groups
- * @num_groups: number of pin groups
+ * @ngroups: number of pin groups
* @data: pin controller driver specific data
*/
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
const char * const *groups,
- const unsigned int num_groups,
+ const unsigned int ngroups,
void *data)
{
struct function_desc *function;
@@ -877,10 +877,7 @@ int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
if (!function)
return -ENOMEM;
- function->name = name;
- function->group_names = groups;
- function->num_group_names = num_groups;
- function->data = data;
+ *function = PINCTRL_FUNCTION_DESC(name, groups, ngroups, data);
error = radix_tree_insert(&pctldev->pin_function_tree, selector, function);
if (error)
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index 7c8aa25ccc80..2965ec20b77f 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -133,18 +133,21 @@ static inline void pinmux_init_device_debugfs(struct dentry *devroot,
/**
* struct function_desc - generic function descriptor
- * @name: name of the function
- * @group_names: array of pin group names
- * @num_group_names: number of pin group names
+ * @func: generic data of the pin function (name and groups of pins)
* @data: pin controller driver specific data
*/
struct function_desc {
- const char *name;
- const char * const *group_names;
- int num_group_names;
+ struct pinfunction func;
void *data;
};
+/* Convenient macro to define a generic pin function descriptor */
+#define PINCTRL_FUNCTION_DESC(_name, _grps, _num_grps, _data) \
+(struct function_desc) { \
+ .func = PINCTRL_PINFUNCTION(_name, _grps, _num_grps), \
+ .data = _data, \
+}
+
int pinmux_generic_get_function_count(struct pinctrl_dev *pctldev);
const char *
@@ -154,7 +157,7 @@ pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
const char * const **groups,
- unsigned int * const num_groups);
+ unsigned int * const ngroups);
struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
unsigned int selector);
@@ -162,7 +165,7 @@ struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
const char * const *groups,
- unsigned int const num_groups,
+ unsigned int const ngroups,
void *data);
int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 24619e80b2cc..dd9bbe8f3e11 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -68,6 +68,15 @@ config PINCTRL_SC7280_LPASS_LPI
Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
(Low Power Island) found on the Qualcomm Technologies Inc SC7280 platform.
+config PINCTRL_SM4250_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SM4250 LPASS LPI pin controller driver"
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SM4250 platform.
+
config PINCTRL_SM6115_LPASS_LPI
tristate "Qualcomm Technologies Inc SM6115 LPASS LPI pin controller driver"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index e2e76071d268..eb04297b6388 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SDX75) += pinctrl-sdx75.o
+obj-$(CONFIG_PINCTRL_SM4250_LPASS_LPI) += pinctrl-sm4250-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM4450) += pinctrl-sm4450.o
obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6115_LPASS_LPI) += pinctrl-sm6115-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 0d98008e33ee..7366aba5a199 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -20,7 +20,7 @@
#include "pinctrl-lpass-lpi.h"
-#define MAX_NR_GPIO 23
+#define MAX_NR_GPIO 32
#define GPIO_FUNC 0
#define MAX_LPI_NUM_CLKS 2
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index 1e694a966953..894c042cb524 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -1290,6 +1290,22 @@ static const int sdm670_reserved_gpios[] = {
58, 59, 60, 61, 62, 63, 64, 69, 70, 71, 72, 73, 74, 104, -1
};
+static const struct msm_gpio_wakeirq_map sdm670_pdc_map[] = {
+ { 1, 30 }, { 3, 31 }, { 5, 32 }, { 10, 33 }, { 11, 34 },
+ { 20, 35 }, { 22, 36 }, { 24, 37 }, { 26, 38 }, { 30, 39 },
+ { 31, 117 }, { 32, 41 }, { 34, 42 }, { 36, 43 }, { 37, 44 },
+ { 38, 45 }, { 39, 46 }, { 40, 47 }, { 41, 115 }, { 43, 49 },
+ { 44, 50 }, { 46, 51 }, { 48, 52 }, { 49, 118 }, { 52, 54 },
+ { 53, 55 }, { 54, 56 }, { 56, 57 }, { 57, 58 }, { 66, 66 },
+ { 68, 67 }, { 77, 70 }, { 78, 71 }, { 79, 72 }, { 80, 73 },
+ { 84, 74 }, { 85, 75 }, { 86, 76 }, { 88, 77 }, { 89, 116 },
+ { 91, 79 }, { 92, 80 }, { 95, 81 }, { 96, 82 }, { 97, 83 },
+ { 101, 84 }, { 103, 85 }, { 115, 90 }, { 116, 91 }, { 117, 92 },
+ { 118, 93 }, { 119, 94 }, { 120, 95 }, { 121, 96 }, { 122, 97 },
+ { 123, 98 }, { 124, 99 }, { 125, 100 }, { 127, 102 }, { 128, 103 },
+ { 129, 104 }, { 130, 105 }, { 132, 106 }, { 133, 107 }, { 145, 108 },
+};
+
static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
.pins = sdm670_pins,
.npins = ARRAY_SIZE(sdm670_pins),
@@ -1299,6 +1315,9 @@ static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
.ngroups = ARRAY_SIZE(sdm670_groups),
.ngpios = 151,
.reserved_gpios = sdm670_reserved_gpios,
+ .wakeirq_map = sdm670_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sdm670_pdc_map),
+ .wakeirq_dual_edge_errata = true,
};
static int sdm670_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c
new file mode 100644
index 000000000000..2d2c636a3e20
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, 2023 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic01_clk,
+ LPI_MUX_dmic01_data,
+ LPI_MUX_dmic23_clk,
+ LPI_MUX_dmic23_data,
+ LPI_MUX_dmic4_clk,
+ LPI_MUX_dmic4_data,
+ LPI_MUX_ext_mclk0_a,
+ LPI_MUX_ext_mclk0_b,
+ LPI_MUX_ext_mclk1_a,
+ LPI_MUX_ext_mclk1_b,
+ LPI_MUX_ext_mclk1_c,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_i2s3_clk,
+ LPI_MUX_i2s3_data,
+ LPI_MUX_i2s3_ws,
+ LPI_MUX_qup_io_00,
+ LPI_MUX_qup_io_01,
+ LPI_MUX_qup_io_05,
+ LPI_MUX_qup_io_10,
+ LPI_MUX_qup_io_11,
+ LPI_MUX_qup_io_25,
+ LPI_MUX_qup_io_21,
+ LPI_MUX_qup_io_26,
+ LPI_MUX_qup_io_31,
+ LPI_MUX_qup_io_36,
+ LPI_MUX_qua_mi2s_data,
+ LPI_MUX_qua_mi2s_sclk,
+ LPI_MUX_qua_mi2s_ws,
+ LPI_MUX_slim_clk,
+ LPI_MUX_slim_data,
+ LPI_MUX_sync_out,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_swr_wsa_clk,
+ LPI_MUX_swr_wsa_data,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static const struct pinctrl_pin_desc sm4250_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+};
+
+static const char * const dmic01_clk_groups[] = { "gpio6" };
+static const char * const dmic01_data_groups[] = { "gpio7" };
+static const char * const dmic23_clk_groups[] = { "gpio8" };
+static const char * const dmic23_data_groups[] = { "gpio9" };
+static const char * const dmic4_clk_groups[] = { "gpio10" };
+static const char * const dmic4_data_groups[] = { "gpio11" };
+static const char * const ext_mclk0_a_groups[] = { "gpio13" };
+static const char * const ext_mclk0_b_groups[] = { "gpio5" };
+static const char * const ext_mclk1_a_groups[] = { "gpio18" };
+static const char * const ext_mclk1_b_groups[] = { "gpio9" };
+static const char * const ext_mclk1_c_groups[] = { "gpio17" };
+static const char * const slim_clk_groups[] = { "gpio14" };
+static const char * const slim_data_groups[] = { "gpio15" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_data_groups[] = { "gpio12", "gpio13" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const i2s3_clk_groups[] = { "gpio14" };
+static const char * const i2s3_data_groups[] = { "gpio16", "gpio17" };
+static const char * const i2s3_ws_groups[] = { "gpio15" };
+static const char * const qup_io_00_groups[] = { "gpio19" };
+static const char * const qup_io_01_groups[] = { "gpio21" };
+static const char * const qup_io_05_groups[] = { "gpio23" };
+static const char * const qup_io_10_groups[] = { "gpio20" };
+static const char * const qup_io_11_groups[] = { "gpio22" };
+static const char * const qup_io_25_groups[] = { "gpio23" };
+static const char * const qup_io_21_groups[] = { "gpio25" };
+static const char * const qup_io_26_groups[] = { "gpio25" };
+static const char * const qup_io_31_groups[] = { "gpio26" };
+static const char * const qup_io_36_groups[] = { "gpio26" };
+static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4", "gpio5" };
+static const char * const qua_mi2s_sclk_groups[] = { "gpio0" };
+static const char * const qua_mi2s_ws_groups[] = { "gpio1" };
+static const char * const sync_out_groups[] = { "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26"};
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2" };
+static const char * const swr_wsa_clk_groups[] = { "gpio10" };
+static const char * const swr_wsa_data_groups[] = { "gpio11" };
+
+
+static const struct lpi_pingroup sm4250_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _),
+ LPI_PINGROUP(5, 12, swr_rx_data, ext_mclk0_b, qua_mi2s_data, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic01_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic01_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic23_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic23_data, i2s1_data, ext_mclk1_b, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, swr_wsa_clk, dmic4_clk, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, swr_wsa_data, dmic4_data, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic23_clk, i2s2_data, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic23_data, i2s2_data, ext_mclk0_a, _),
+ LPI_PINGROUP(14, LPI_NO_SLEW, i2s3_clk, slim_clk, _, _),
+ LPI_PINGROUP(15, LPI_NO_SLEW, i2s3_ws, slim_data, _, _),
+ LPI_PINGROUP(16, LPI_NO_SLEW, i2s3_data, _, _, _),
+ LPI_PINGROUP(17, LPI_NO_SLEW, i2s3_data, ext_mclk1_c, _, _),
+ LPI_PINGROUP(18, 20, ext_mclk1_a, swr_rx_data, _, _),
+ LPI_PINGROUP(19, LPI_NO_SLEW, qup_io_00, sync_out, _, _),
+ LPI_PINGROUP(20, LPI_NO_SLEW, qup_io_10, sync_out, _, _),
+ LPI_PINGROUP(21, LPI_NO_SLEW, qup_io_01, sync_out, _, _),
+ LPI_PINGROUP(22, LPI_NO_SLEW, qup_io_11, sync_out, _, _),
+ LPI_PINGROUP(23, LPI_NO_SLEW, qup_io_25, qup_io_05, sync_out, _),
+ LPI_PINGROUP(25, LPI_NO_SLEW, qup_io_26, qup_io_21, sync_out, _),
+ LPI_PINGROUP(26, LPI_NO_SLEW, qup_io_36, qup_io_31, sync_out, _),
+};
+
+static const struct lpi_function sm4250_functions[] = {
+ LPI_FUNCTION(dmic01_clk),
+ LPI_FUNCTION(dmic01_data),
+ LPI_FUNCTION(dmic23_clk),
+ LPI_FUNCTION(dmic23_data),
+ LPI_FUNCTION(dmic4_clk),
+ LPI_FUNCTION(dmic4_data),
+ LPI_FUNCTION(ext_mclk0_a),
+ LPI_FUNCTION(ext_mclk0_b),
+ LPI_FUNCTION(ext_mclk1_a),
+ LPI_FUNCTION(ext_mclk1_b),
+ LPI_FUNCTION(ext_mclk1_c),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(i2s3_clk),
+ LPI_FUNCTION(i2s3_data),
+ LPI_FUNCTION(i2s3_ws),
+ LPI_FUNCTION(qup_io_00),
+ LPI_FUNCTION(qup_io_01),
+ LPI_FUNCTION(qup_io_05),
+ LPI_FUNCTION(qup_io_10),
+ LPI_FUNCTION(qup_io_11),
+ LPI_FUNCTION(qup_io_25),
+ LPI_FUNCTION(qup_io_21),
+ LPI_FUNCTION(qup_io_26),
+ LPI_FUNCTION(qup_io_31),
+ LPI_FUNCTION(qup_io_36),
+ LPI_FUNCTION(qua_mi2s_data),
+ LPI_FUNCTION(qua_mi2s_sclk),
+ LPI_FUNCTION(qua_mi2s_ws),
+ LPI_FUNCTION(slim_clk),
+ LPI_FUNCTION(slim_data),
+ LPI_FUNCTION(sync_out),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(swr_wsa_clk),
+ LPI_FUNCTION(swr_wsa_data),
+};
+
+static const struct lpi_pinctrl_variant_data sm4250_lpi_data = {
+ .pins = sm4250_lpi_pins,
+ .npins = ARRAY_SIZE(sm4250_lpi_pins),
+ .groups = sm4250_groups,
+ .ngroups = ARRAY_SIZE(sm4250_groups),
+ .functions = sm4250_functions,
+ .nfunctions = ARRAY_SIZE(sm4250_functions),
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ { .compatible = "qcom,sm4250-lpass-lpi-pinctrl", .data = &sm4250_lpi_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sm4250-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove_new = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("QTI SM4250 LPI GPIO pin control driver");
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 4abd6f18bbef..d2dd66769aa8 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1234,6 +1234,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
{ .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
{ .compatible = "qcom,pma8084-gpio", .data = (void *) 22 },
+ { .compatible = "qcom,pmc8380-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pmd8028-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmi632-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pmi8950-gpio", .data = (void *) 2 },
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
index d2de526a3b58..cae3e6553499 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -68,20 +68,20 @@
#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
-#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
-#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
-#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
-#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_6 F_(IRQ0_A, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1_A, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2_A, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3_A, IP0SR0_15_12)
#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
/* GPSR1 */
-#define GPSR1_28 F_(HTX3, IP3SR1_19_16)
-#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12)
-#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8)
-#define GPSR1_25 F_(HSCK3, IP3SR1_7_4)
-#define GPSR1_24 F_(HRX3, IP3SR1_3_0)
+#define GPSR1_28 F_(HTX3_A, IP3SR1_19_16)
+#define GPSR1_27 F_(HCTS3_N_A, IP3SR1_15_12)
+#define GPSR1_26 F_(HRTS3_N_A, IP3SR1_11_8)
+#define GPSR1_25 F_(HSCK3_A, IP3SR1_7_4)
+#define GPSR1_24 F_(HRX3_A, IP3SR1_3_0)
#define GPSR1_23 F_(GP1_23, IP2SR1_31_28)
#define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24)
#define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20)
@@ -119,14 +119,14 @@
#define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12)
#define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8)
#define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4)
-#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0)
-#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28)
+#define GPSR2_8 F_(TPU0TO0_A, IP1SR2_3_0)
+#define GPSR2_7 F_(TPU0TO1_A, IP0SR2_31_28)
#define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24)
-#define GPSR2_5 F_(FXR_TXENB_N, IP0SR2_23_20)
+#define GPSR2_5 F_(FXR_TXENB_N_A, IP0SR2_23_20)
#define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16)
#define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12)
#define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8)
-#define GPSR2_1 F_(FXR_TXENA_N, IP0SR2_7_4)
+#define GPSR2_1 F_(FXR_TXENA_N_A, IP0SR2_7_4)
#define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0)
/* GPSR3 */
@@ -275,13 +275,13 @@
/* SR0 */
/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3_A) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2_A) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1_A) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0_A) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
@@ -290,72 +290,72 @@
#define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1_A) FM(IRQ2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1_A) FM(TX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1_A) FM(RX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N_A) FM(CTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N_A) FM(RTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1_A) FM(SCK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* SR1 */
/* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_B) FM(TX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_B) FM(RX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_B) FM(RTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_B) FM(CTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_B) FM(SCK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_B) FM(TX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_B) FM(RX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_B) FM(CTS1_N_B) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_B) FM(RTS1_N_B) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_B) FM(SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
#define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2SR1_31_28 F_(0, 0) FM(TCLK2_A) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_3_0 FM(HRX3_A) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_7_4 FM(HSCK3_A) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_11_8 FM(HRTS3_N_A) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_15_12 FM(HCTS3_N_A) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3SR1_19_16 FM(HTX3_A) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* SR2 */
/* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_7_4 FM(FXR_TXENA_N_A) FM(CANFD1_RX) FM(TPU0TO3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX_A) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX_A) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_23_20 FM(FXR_TXENB_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR2_31_28 FM(TPU0TO1_A) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
-#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_3_0 FM(TPU0TO0_A) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2_A) F_(0, 0) FM(TCLK3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3_A) FM(PWM1_B) FM(TCLK4_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
@@ -381,8 +381,8 @@
#define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_N_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_N_A) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
@@ -718,22 +718,22 @@ static const u16 pinmux_data[] = {
/* IP0SR0 */
PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_N_B),
- PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A),
+ PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_B),
PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1),
PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
- PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3_A),
PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
- PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2_A),
PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
- PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1_A),
PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
- PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0_A),
PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
@@ -750,75 +750,75 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD),
PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2),
- PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1),
- PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_A),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_B),
PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1),
- PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1),
- PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1_A),
PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC),
- PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1),
- PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1_A),
+ PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1_A),
/* IP2SR0 */
PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD),
- PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N),
- PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N_A),
+ PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N_A),
PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK),
- PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N),
- PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N_A),
+ PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N_A),
PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD),
- PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1),
- PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1_A),
+ PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1_A),
/* IP0SR1 */
PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2),
- PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_A),
- PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3_B),
PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1),
- PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_A),
- PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_B),
+ PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3_B),
PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC),
- PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_A),
- PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_B),
+ PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N_B),
PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK),
- PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_A),
- PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N_B),
PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD),
- PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_A),
- PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_B),
+ PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3_B),
PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD),
PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2),
- PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_X),
- PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_B),
+ PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_B),
PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1),
- PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_X),
- PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_X),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_B),
+ PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_B),
/* IP1SR1 */
PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC),
- PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_X),
- PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_B),
+ PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_B),
PINMUX_IPSR_GPSR(IP1SR1_3_0, CANFD5_TX_B),
PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD),
- PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_X),
- PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_X),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_B),
+ PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_B),
PINMUX_IPSR_GPSR(IP1SR1_7_4, CANFD5_RX_B),
PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK),
- PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_X),
- PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_X),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_B),
+ PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_B),
PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD),
@@ -827,15 +827,15 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N),
PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N),
- PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8_A),
+ PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8),
PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N),
PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N),
- PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9_A),
+ PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9),
PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0),
PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0),
- PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A),
+ PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0),
/* IP2SR1 */
PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0),
@@ -845,99 +845,99 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A),
PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK),
- PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3),
+ PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3_B),
PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS),
- PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4),
+ PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4_B),
PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD),
- PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_A),
+ PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_B),
PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT),
- PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_A),
+ PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_B),
PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN),
PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_A),
- PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2),
+ PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2_A),
PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1),
PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B),
/* IP3SR1 */
- PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3),
+ PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3_A),
PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A),
PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2),
- PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3_A),
PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A),
PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK),
- PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_A),
+ PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_B),
- PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N_A),
PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A),
PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD),
- PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_A),
+ PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_B),
- PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N),
+ PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N_A),
PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A),
PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD),
- PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3),
+ PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3_A),
PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A),
PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC),
/* IP0SR2 */
PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA),
PINMUX_IPSR_GPSR(IP0SR2_3_0, CANFD1_TX),
- PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_A),
+ PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_B),
- PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N_A),
PINMUX_IPSR_GPSR(IP0SR2_7_4, CANFD1_RX),
- PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_A),
+ PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_B),
PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR),
- PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX),
+ PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX_A),
PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5),
PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR),
- PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX),
+ PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX_A),
PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B),
PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR),
- PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N),
+ PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N_A),
PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB),
- PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1_A),
PINMUX_IPSR_GPSR(IP0SR2_31_28, CANFD6_TX),
- PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_B),
+ PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_C),
/* IP1SR2 */
- PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0_A),
PINMUX_IPSR_GPSR(IP1SR2_3_0, CANFD6_RX),
- PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_A),
+ PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_B),
PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK),
- PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_X),
+ PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_B),
PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX),
- PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_X),
+ PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_B),
PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX),
PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR),
PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX),
- PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2),
- PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_A),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2_A),
+ PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_C),
PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX),
- PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3_A),
PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B),
- PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_A),
+ PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_C),
PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX),
- PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B),
+ PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2),
PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX),
PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B),
@@ -979,12 +979,12 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKIN),
PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKEN_IN),
PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A),
- PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_X),
+ PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_A),
PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT),
PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT),
PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_N_A),
- PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X),
+ PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_A),
PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL),
@@ -1531,15 +1531,14 @@ static const unsigned int canfd4_data_mux[] = {
};
/* - CANFD5 ----------------------------------------------------------------- */
-static const unsigned int canfd5_data_pins[] = {
- /* CANFD5_TX, CANFD5_RX */
+static const unsigned int canfd5_data_a_pins[] = {
+ /* CANFD5_TX_A, CANFD5_RX_A */
RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
};
-static const unsigned int canfd5_data_mux[] = {
- CANFD5_TX_MARK, CANFD5_RX_MARK,
+static const unsigned int canfd5_data_a_mux[] = {
+ CANFD5_TX_A_MARK, CANFD5_RX_A_MARK,
};
-/* - CANFD5_B ----------------------------------------------------------------- */
static const unsigned int canfd5_data_b_pins[] = {
/* CANFD5_TX_B, CANFD5_RX_B */
RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
@@ -1599,49 +1598,48 @@ static const unsigned int hscif0_ctrl_mux[] = {
};
/* - HSCIF1 ----------------------------------------------------------------- */
-static const unsigned int hscif1_data_pins[] = {
- /* HRX1, HTX1 */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* HRX1_A, HTX1_A */
RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
};
-static const unsigned int hscif1_data_mux[] = {
- HRX1_MARK, HTX1_MARK,
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
};
-static const unsigned int hscif1_clk_pins[] = {
- /* HSCK1 */
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* HSCK1_A */
RCAR_GP_PIN(0, 18),
};
-static const unsigned int hscif1_clk_mux[] = {
- HSCK1_MARK,
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
};
-static const unsigned int hscif1_ctrl_pins[] = {
- /* HRTS1_N, HCTS1_N */
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* HRTS1_N_A, HCTS1_N_A */
RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
};
-static const unsigned int hscif1_ctrl_mux[] = {
- HRTS1_N_MARK, HCTS1_N_MARK,
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
};
-/* - HSCIF1_X---------------------------------------------------------------- */
-static const unsigned int hscif1_data_x_pins[] = {
- /* HRX1_X, HTX1_X */
+static const unsigned int hscif1_data_b_pins[] = {
+ /* HRX1_B, HTX1_B */
RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
};
-static const unsigned int hscif1_data_x_mux[] = {
- HRX1_X_MARK, HTX1_X_MARK,
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
};
-static const unsigned int hscif1_clk_x_pins[] = {
- /* HSCK1_X */
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* HSCK1_B */
RCAR_GP_PIN(1, 10),
};
-static const unsigned int hscif1_clk_x_mux[] = {
- HSCK1_X_MARK,
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
};
-static const unsigned int hscif1_ctrl_x_pins[] = {
- /* HRTS1_N_X, HCTS1_N_X */
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* HRTS1_N_B, HCTS1_N_B */
RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
};
-static const unsigned int hscif1_ctrl_x_mux[] = {
- HRTS1_N_X_MARK, HCTS1_N_X_MARK,
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
};
/* - HSCIF2 ----------------------------------------------------------------- */
@@ -1668,49 +1666,48 @@ static const unsigned int hscif2_ctrl_mux[] = {
};
/* - HSCIF3 ----------------------------------------------------------------- */
-static const unsigned int hscif3_data_pins[] = {
- /* HRX3, HTX3 */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* HRX3_A, HTX3_A */
RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
};
-static const unsigned int hscif3_data_mux[] = {
- HRX3_MARK, HTX3_MARK,
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
};
-static const unsigned int hscif3_clk_pins[] = {
- /* HSCK3 */
+static const unsigned int hscif3_clk_a_pins[] = {
+ /* HSCK3_A */
RCAR_GP_PIN(1, 25),
};
-static const unsigned int hscif3_clk_mux[] = {
- HSCK3_MARK,
+static const unsigned int hscif3_clk_a_mux[] = {
+ HSCK3_A_MARK,
};
-static const unsigned int hscif3_ctrl_pins[] = {
- /* HRTS3_N, HCTS3_N */
+static const unsigned int hscif3_ctrl_a_pins[] = {
+ /* HRTS3_N_A, HCTS3_N_A */
RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
};
-static const unsigned int hscif3_ctrl_mux[] = {
- HRTS3_N_MARK, HCTS3_N_MARK,
+static const unsigned int hscif3_ctrl_a_mux[] = {
+ HRTS3_N_A_MARK, HCTS3_N_A_MARK,
};
-/* - HSCIF3_A ----------------------------------------------------------------- */
-static const unsigned int hscif3_data_a_pins[] = {
- /* HRX3_A, HTX3_A */
+static const unsigned int hscif3_data_b_pins[] = {
+ /* HRX3_B, HTX3_B */
RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
};
-static const unsigned int hscif3_data_a_mux[] = {
- HRX3_A_MARK, HTX3_A_MARK,
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
};
-static const unsigned int hscif3_clk_a_pins[] = {
- /* HSCK3_A */
+static const unsigned int hscif3_clk_b_pins[] = {
+ /* HSCK3_B */
RCAR_GP_PIN(1, 3),
};
-static const unsigned int hscif3_clk_a_mux[] = {
- HSCK3_A_MARK,
+static const unsigned int hscif3_clk_b_mux[] = {
+ HSCK3_B_MARK,
};
-static const unsigned int hscif3_ctrl_a_pins[] = {
- /* HRTS3_N_A, HCTS3_N_A */
+static const unsigned int hscif3_ctrl_b_pins[] = {
+ /* HRTS3_N_B, HCTS3_N_B */
RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
};
-static const unsigned int hscif3_ctrl_a_mux[] = {
- HRTS3_N_A_MARK, HCTS3_N_A_MARK,
+static const unsigned int hscif3_ctrl_b_mux[] = {
+ HRTS3_N_B_MARK, HCTS3_N_B_MARK,
};
/* - I2C0 ------------------------------------------------------------------- */
@@ -1767,6 +1764,90 @@ static const unsigned int i2c5_mux[] = {
SDA5_MARK, SCL5_MARK,
};
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_a_pins[] = {
+ /* IRQ0_A */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int intc_ex_irq0_a_mux[] = {
+ IRQ0_A_MARK,
+};
+static const unsigned int intc_ex_irq0_b_pins[] = {
+ /* IRQ0_B */
+ RCAR_GP_PIN(1, 20),
+};
+static const unsigned int intc_ex_irq0_b_mux[] = {
+ IRQ0_B_MARK,
+};
+
+static const unsigned int intc_ex_irq1_a_pins[] = {
+ /* IRQ1_A */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int intc_ex_irq1_a_mux[] = {
+ IRQ1_A_MARK,
+};
+static const unsigned int intc_ex_irq1_b_pins[] = {
+ /* IRQ1_B */
+ RCAR_GP_PIN(1, 21),
+};
+static const unsigned int intc_ex_irq1_b_mux[] = {
+ IRQ1_B_MARK,
+};
+
+static const unsigned int intc_ex_irq2_a_pins[] = {
+ /* IRQ2_A */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int intc_ex_irq2_a_mux[] = {
+ IRQ2_A_MARK,
+};
+static const unsigned int intc_ex_irq2_b_pins[] = {
+ /* IRQ2_B */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int intc_ex_irq2_b_mux[] = {
+ IRQ2_B_MARK,
+};
+
+static const unsigned int intc_ex_irq3_a_pins[] = {
+ /* IRQ3_A */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int intc_ex_irq3_a_mux[] = {
+ IRQ3_A_MARK,
+};
+static const unsigned int intc_ex_irq3_b_pins[] = {
+ /* IRQ3_B */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int intc_ex_irq3_b_mux[] = {
+ IRQ3_B_MARK,
+};
+
+static const unsigned int intc_ex_irq4_a_pins[] = {
+ /* IRQ4_A */
+ RCAR_GP_PIN(1, 17),
+};
+static const unsigned int intc_ex_irq4_a_mux[] = {
+ IRQ4_A_MARK,
+};
+static const unsigned int intc_ex_irq4_b_pins[] = {
+ /* IRQ4_B */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq4_b_mux[] = {
+ IRQ4_B_MARK,
+};
+
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MMC -------------------------------------------------------------------- */
static const unsigned int mmc_data_pins[] = {
/* MMC_SD_D[0:3], MMC_D[4:7] */
@@ -2093,16 +2174,16 @@ static const unsigned int pcie1_clkreq_n_mux[] = {
PCIE1_CLKREQ_N_MARK,
};
-/* - PWM0_A ------------------------------------------------------------------- */
-static const unsigned int pwm0_a_pins[] = {
- /* PWM0_A */
+/* - PWM0 ------------------------------------------------------------------- */
+static const unsigned int pwm0_pins[] = {
+ /* PWM0 */
RCAR_GP_PIN(1, 15),
};
-static const unsigned int pwm0_a_mux[] = {
- PWM0_A_MARK,
+static const unsigned int pwm0_mux[] = {
+ PWM0_MARK,
};
-/* - PWM1_A ------------------------------------------------------------------- */
+/* - PWM1 ------------------------------------------------------------------- */
static const unsigned int pwm1_a_pins[] = {
/* PWM1_A */
RCAR_GP_PIN(3, 13),
@@ -2111,7 +2192,6 @@ static const unsigned int pwm1_a_mux[] = {
PWM1_A_MARK,
};
-/* - PWM1_B ------------------------------------------------------------------- */
static const unsigned int pwm1_b_pins[] = {
/* PWM1_B */
RCAR_GP_PIN(2, 13),
@@ -2120,16 +2200,16 @@ static const unsigned int pwm1_b_mux[] = {
PWM1_B_MARK,
};
-/* - PWM2_B ------------------------------------------------------------------- */
-static const unsigned int pwm2_b_pins[] = {
- /* PWM2_B */
+/* - PWM2 ------------------------------------------------------------------- */
+static const unsigned int pwm2_pins[] = {
+ /* PWM2 */
RCAR_GP_PIN(2, 14),
};
-static const unsigned int pwm2_b_mux[] = {
- PWM2_B_MARK,
+static const unsigned int pwm2_mux[] = {
+ PWM2_MARK,
};
-/* - PWM3_A ------------------------------------------------------------------- */
+/* - PWM3 ------------------------------------------------------------------- */
static const unsigned int pwm3_a_pins[] = {
/* PWM3_A */
RCAR_GP_PIN(1, 22),
@@ -2138,7 +2218,6 @@ static const unsigned int pwm3_a_mux[] = {
PWM3_A_MARK,
};
-/* - PWM3_B ------------------------------------------------------------------- */
static const unsigned int pwm3_b_pins[] = {
/* PWM3_B */
RCAR_GP_PIN(2, 15),
@@ -2183,22 +2262,22 @@ static const unsigned int pwm7_mux[] = {
PWM7_MARK,
};
-/* - PWM8_A ------------------------------------------------------------------- */
-static const unsigned int pwm8_a_pins[] = {
- /* PWM8_A */
+/* - PWM8 ------------------------------------------------------------------- */
+static const unsigned int pwm8_pins[] = {
+ /* PWM8 */
RCAR_GP_PIN(1, 13),
};
-static const unsigned int pwm8_a_mux[] = {
- PWM8_A_MARK,
+static const unsigned int pwm8_mux[] = {
+ PWM8_MARK,
};
-/* - PWM9_A ------------------------------------------------------------------- */
-static const unsigned int pwm9_a_pins[] = {
- /* PWM9_A */
+/* - PWM9 ------------------------------------------------------------------- */
+static const unsigned int pwm9_pins[] = {
+ /* PWM9 */
RCAR_GP_PIN(1, 14),
};
-static const unsigned int pwm9_a_mux[] = {
- PWM9_A_MARK,
+static const unsigned int pwm9_mux[] = {
+ PWM9_MARK,
};
/* - QSPI0 ------------------------------------------------------------------ */
@@ -2261,75 +2340,51 @@ static const unsigned int scif0_ctrl_mux[] = {
};
/* - SCIF1 ------------------------------------------------------------------ */
-static const unsigned int scif1_data_pins[] = {
- /* RX1, TX1 */
+static const unsigned int scif1_data_a_pins[] = {
+ /* RX1_A, TX1_A */
RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
};
-static const unsigned int scif1_data_mux[] = {
- RX1_MARK, TX1_MARK,
+static const unsigned int scif1_data_a_mux[] = {
+ RX1_A_MARK, TX1_A_MARK,
};
-static const unsigned int scif1_clk_pins[] = {
- /* SCK1 */
+static const unsigned int scif1_clk_a_pins[] = {
+ /* SCK1_A */
RCAR_GP_PIN(0, 18),
};
-static const unsigned int scif1_clk_mux[] = {
- SCK1_MARK,
+static const unsigned int scif1_clk_a_mux[] = {
+ SCK1_A_MARK,
};
-static const unsigned int scif1_ctrl_pins[] = {
- /* RTS1_N, CTS1_N */
+static const unsigned int scif1_ctrl_a_pins[] = {
+ /* RTS1_N_A, CTS1_N_A */
RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16),
};
-static const unsigned int scif1_ctrl_mux[] = {
- RTS1_N_MARK, CTS1_N_MARK,
+static const unsigned int scif1_ctrl_a_mux[] = {
+ RTS1_N_A_MARK, CTS1_N_A_MARK,
};
-/* - SCIF1_X ------------------------------------------------------------------ */
-static const unsigned int scif1_data_x_pins[] = {
- /* RX1_X, TX1_X */
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX1_B, TX1_B */
RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
};
-static const unsigned int scif1_data_x_mux[] = {
- RX1_X_MARK, TX1_X_MARK,
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
};
-static const unsigned int scif1_clk_x_pins[] = {
- /* SCK1_X */
+static const unsigned int scif1_clk_b_pins[] = {
+ /* SCK1_B */
RCAR_GP_PIN(1, 10),
};
-static const unsigned int scif1_clk_x_mux[] = {
- SCK1_X_MARK,
+static const unsigned int scif1_clk_b_mux[] = {
+ SCK1_B_MARK,
};
-static const unsigned int scif1_ctrl_x_pins[] = {
- /* RTS1_N_X, CTS1_N_X */
+static const unsigned int scif1_ctrl_b_pins[] = {
+ /* RTS1_N_B, CTS1_N_B */
RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8),
};
-static const unsigned int scif1_ctrl_x_mux[] = {
- RTS1_N_X_MARK, CTS1_N_X_MARK,
+static const unsigned int scif1_ctrl_b_mux[] = {
+ RTS1_N_B_MARK, CTS1_N_B_MARK,
};
/* - SCIF3 ------------------------------------------------------------------ */
-static const unsigned int scif3_data_pins[] = {
- /* RX3, TX3 */
- RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
-};
-static const unsigned int scif3_data_mux[] = {
- RX3_MARK, TX3_MARK,
-};
-static const unsigned int scif3_clk_pins[] = {
- /* SCK3 */
- RCAR_GP_PIN(1, 4),
-};
-static const unsigned int scif3_clk_mux[] = {
- SCK3_MARK,
-};
-static const unsigned int scif3_ctrl_pins[] = {
- /* RTS3_N, CTS3_N */
- RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
-};
-static const unsigned int scif3_ctrl_mux[] = {
- RTS3_N_MARK, CTS3_N_MARK,
-};
-
-/* - SCIF3_A ------------------------------------------------------------------ */
static const unsigned int scif3_data_a_pins[] = {
/* RX3_A, TX3_A */
RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
@@ -2352,6 +2407,28 @@ static const unsigned int scif3_ctrl_a_mux[] = {
RTS3_N_A_MARK, CTS3_N_A_MARK,
};
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX3_B, TX3_B */
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+static const unsigned int scif3_clk_b_pins[] = {
+ /* SCK3_B */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int scif3_clk_b_mux[] = {
+ SCK3_B_MARK,
+};
+static const unsigned int scif3_ctrl_b_pins[] = {
+ /* RTS3_N_B, CTS3_N_B */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif3_ctrl_b_mux[] = {
+ RTS3_N_B_MARK, CTS3_N_B_MARK,
+};
+
/* - SCIF4 ------------------------------------------------------------------ */
static const unsigned int scif4_data_pins[] = {
/* RX4, TX4 */
@@ -2408,64 +2485,63 @@ static const unsigned int ssi_ctrl_mux[] = {
SSI_SCK_MARK, SSI_WS_MARK,
};
-/* - TPU ------------------------------------------------------------------- */
-static const unsigned int tpu_to0_pins[] = {
- /* TPU0TO0 */
+/* - TPU -------------------------------------------------------------------- */
+static const unsigned int tpu_to0_a_pins[] = {
+ /* TPU0TO0_A */
RCAR_GP_PIN(2, 8),
};
-static const unsigned int tpu_to0_mux[] = {
- TPU0TO0_MARK,
+static const unsigned int tpu_to0_a_mux[] = {
+ TPU0TO0_A_MARK,
};
-static const unsigned int tpu_to1_pins[] = {
- /* TPU0TO1 */
+static const unsigned int tpu_to1_a_pins[] = {
+ /* TPU0TO1_A */
RCAR_GP_PIN(2, 7),
};
-static const unsigned int tpu_to1_mux[] = {
- TPU0TO1_MARK,
+static const unsigned int tpu_to1_a_mux[] = {
+ TPU0TO1_A_MARK,
};
-static const unsigned int tpu_to2_pins[] = {
- /* TPU0TO2 */
+static const unsigned int tpu_to2_a_pins[] = {
+ /* TPU0TO2_A */
RCAR_GP_PIN(2, 12),
};
-static const unsigned int tpu_to2_mux[] = {
- TPU0TO2_MARK,
+static const unsigned int tpu_to2_a_mux[] = {
+ TPU0TO2_A_MARK,
};
-static const unsigned int tpu_to3_pins[] = {
- /* TPU0TO3 */
+static const unsigned int tpu_to3_a_pins[] = {
+ /* TPU0TO3_A */
RCAR_GP_PIN(2, 13),
};
-static const unsigned int tpu_to3_mux[] = {
- TPU0TO3_MARK,
+static const unsigned int tpu_to3_a_mux[] = {
+ TPU0TO3_A_MARK,
};
-/* - TPU_A ------------------------------------------------------------------- */
-static const unsigned int tpu_to0_a_pins[] = {
- /* TPU0TO0_A */
+static const unsigned int tpu_to0_b_pins[] = {
+ /* TPU0TO0_B */
RCAR_GP_PIN(1, 25),
};
-static const unsigned int tpu_to0_a_mux[] = {
- TPU0TO0_A_MARK,
+static const unsigned int tpu_to0_b_mux[] = {
+ TPU0TO0_B_MARK,
};
-static const unsigned int tpu_to1_a_pins[] = {
- /* TPU0TO1_A */
+static const unsigned int tpu_to1_b_pins[] = {
+ /* TPU0TO1_B */
RCAR_GP_PIN(1, 26),
};
-static const unsigned int tpu_to1_a_mux[] = {
- TPU0TO1_A_MARK,
+static const unsigned int tpu_to1_b_mux[] = {
+ TPU0TO1_B_MARK,
};
-static const unsigned int tpu_to2_a_pins[] = {
- /* TPU0TO2_A */
+static const unsigned int tpu_to2_b_pins[] = {
+ /* TPU0TO2_B */
RCAR_GP_PIN(2, 0),
};
-static const unsigned int tpu_to2_a_mux[] = {
- TPU0TO2_A_MARK,
+static const unsigned int tpu_to2_b_mux[] = {
+ TPU0TO2_B_MARK,
};
-static const unsigned int tpu_to3_a_pins[] = {
- /* TPU0TO3_A */
+static const unsigned int tpu_to3_b_pins[] = {
+ /* TPU0TO3_B */
RCAR_GP_PIN(2, 1),
};
-static const unsigned int tpu_to3_a_mux[] = {
- TPU0TO3_A_MARK,
+static const unsigned int tpu_to3_b_mux[] = {
+ TPU0TO3_B_MARK,
};
/* - TSN0 ------------------------------------------------ */
@@ -2578,8 +2654,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(canfd2_data),
SH_PFC_PIN_GROUP(canfd3_data),
SH_PFC_PIN_GROUP(canfd4_data),
- SH_PFC_PIN_GROUP(canfd5_data), /* suffix might be updated */
- SH_PFC_PIN_GROUP(canfd5_data_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(canfd5_data_a),
+ SH_PFC_PIN_GROUP(canfd5_data_b),
SH_PFC_PIN_GROUP(canfd6_data),
SH_PFC_PIN_GROUP(canfd7_data),
SH_PFC_PIN_GROUP(can_clk),
@@ -2587,21 +2663,21 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(hscif0_data),
SH_PFC_PIN_GROUP(hscif0_clk),
SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif1_clk), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif1_ctrl), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif1_data_x), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif1_clk_x), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif1_ctrl_x), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
SH_PFC_PIN_GROUP(hscif2_data),
SH_PFC_PIN_GROUP(hscif2_clk),
SH_PFC_PIN_GROUP(hscif2_ctrl),
- SH_PFC_PIN_GROUP(hscif3_data), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif3_clk), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif3_ctrl), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif3_data_a), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif3_clk_a), /* suffix might be updated */
- SH_PFC_PIN_GROUP(hscif3_ctrl_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk_a),
+ SH_PFC_PIN_GROUP(hscif3_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_clk_b),
+ SH_PFC_PIN_GROUP(hscif3_ctrl_b),
SH_PFC_PIN_GROUP(i2c0),
SH_PFC_PIN_GROUP(i2c1),
@@ -2610,6 +2686,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c4),
SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(intc_ex_irq0_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq0_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq1_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq1_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq2_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq2_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq3_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq3_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq4_a),
+ SH_PFC_PIN_GROUP(intc_ex_irq4_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+
BUS_DATA_PIN_GROUP(mmc_data, 1),
BUS_DATA_PIN_GROUP(mmc_data, 4),
BUS_DATA_PIN_GROUP(mmc_data, 8),
@@ -2663,18 +2751,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(pcie0_clkreq_n),
SH_PFC_PIN_GROUP(pcie1_clkreq_n),
- SH_PFC_PIN_GROUP(pwm0_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm0),
SH_PFC_PIN_GROUP(pwm1_a),
SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2_b), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm2),
SH_PFC_PIN_GROUP(pwm3_a),
SH_PFC_PIN_GROUP(pwm3_b),
SH_PFC_PIN_GROUP(pwm4),
SH_PFC_PIN_GROUP(pwm5),
SH_PFC_PIN_GROUP(pwm6),
SH_PFC_PIN_GROUP(pwm7),
- SH_PFC_PIN_GROUP(pwm8_a), /* suffix might be updated */
- SH_PFC_PIN_GROUP(pwm9_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(pwm8),
+ SH_PFC_PIN_GROUP(pwm9),
SH_PFC_PIN_GROUP(qspi0_ctrl),
BUS_DATA_PIN_GROUP(qspi0_data, 2),
@@ -2686,18 +2774,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
- SH_PFC_PIN_GROUP(scif1_data), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif1_clk), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif1_ctrl), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif1_data_x), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif1_clk_x), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif1_ctrl_x), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif3_data), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif3_clk), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif3_ctrl), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif3_data_a), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif3_clk_a), /* suffix might be updated */
- SH_PFC_PIN_GROUP(scif3_ctrl_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk_a),
+ SH_PFC_PIN_GROUP(scif1_ctrl_a),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk_b),
+ SH_PFC_PIN_GROUP(scif1_ctrl_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk_a),
+ SH_PFC_PIN_GROUP(scif3_ctrl_a),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_clk_b),
+ SH_PFC_PIN_GROUP(scif3_ctrl_b),
SH_PFC_PIN_GROUP(scif4_data),
SH_PFC_PIN_GROUP(scif4_clk),
SH_PFC_PIN_GROUP(scif4_ctrl),
@@ -2707,14 +2795,14 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(ssi_data),
SH_PFC_PIN_GROUP(ssi_ctrl),
- SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */
- SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */
- SH_PFC_PIN_GROUP(tpu_to1), /* suffix might be updated */
- SH_PFC_PIN_GROUP(tpu_to1_a), /* suffix might be updated */
- SH_PFC_PIN_GROUP(tpu_to2), /* suffix might be updated */
- SH_PFC_PIN_GROUP(tpu_to2_a), /* suffix might be updated */
- SH_PFC_PIN_GROUP(tpu_to3), /* suffix might be updated */
- SH_PFC_PIN_GROUP(tpu_to3_a), /* suffix might be updated */
+ SH_PFC_PIN_GROUP(tpu_to0_a),
+ SH_PFC_PIN_GROUP(tpu_to0_b),
+ SH_PFC_PIN_GROUP(tpu_to1_a),
+ SH_PFC_PIN_GROUP(tpu_to1_b),
+ SH_PFC_PIN_GROUP(tpu_to2_a),
+ SH_PFC_PIN_GROUP(tpu_to2_b),
+ SH_PFC_PIN_GROUP(tpu_to3_a),
+ SH_PFC_PIN_GROUP(tpu_to3_b),
SH_PFC_PIN_GROUP(tsn0_link),
SH_PFC_PIN_GROUP(tsn0_phy_int),
@@ -2788,8 +2876,7 @@ static const char * const canfd4_groups[] = {
};
static const char * const canfd5_groups[] = {
- /* suffix might be updated */
- "canfd5_data",
+ "canfd5_data_a",
"canfd5_data_b",
};
@@ -2812,13 +2899,12 @@ static const char * const hscif0_groups[] = {
};
static const char * const hscif1_groups[] = {
- /* suffix might be updated */
- "hscif1_data",
- "hscif1_clk",
- "hscif1_ctrl",
- "hscif1_data_x",
- "hscif1_clk_x",
- "hscif1_ctrl_x",
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
};
static const char * const hscif2_groups[] = {
@@ -2828,13 +2914,12 @@ static const char * const hscif2_groups[] = {
};
static const char * const hscif3_groups[] = {
- /* suffix might be updated */
- "hscif3_data",
- "hscif3_clk",
- "hscif3_ctrl",
"hscif3_data_a",
"hscif3_clk_a",
"hscif3_ctrl_a",
+ "hscif3_data_b",
+ "hscif3_clk_b",
+ "hscif3_ctrl_b",
};
static const char * const i2c0_groups[] = {
@@ -2861,6 +2946,20 @@ static const char * const i2c5_groups[] = {
"i2c5",
};
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0_a",
+ "intc_ex_irq0_b",
+ "intc_ex_irq1_a",
+ "intc_ex_irq1_b",
+ "intc_ex_irq2_a",
+ "intc_ex_irq2_b",
+ "intc_ex_irq3_a",
+ "intc_ex_irq3_b",
+ "intc_ex_irq4_a",
+ "intc_ex_irq4_b",
+ "intc_ex_irq5",
+};
+
static const char * const mmc_groups[] = {
"mmc_data1",
"mmc_data4",
@@ -2931,8 +3030,7 @@ static const char * const pcie_groups[] = {
};
static const char * const pwm0_groups[] = {
- /* suffix might be updated */
- "pwm0_a",
+ "pwm0",
};
static const char * const pwm1_groups[] = {
@@ -2941,8 +3039,7 @@ static const char * const pwm1_groups[] = {
};
static const char * const pwm2_groups[] = {
- /* suffix might be updated */
- "pwm2_b",
+ "pwm2",
};
static const char * const pwm3_groups[] = {
@@ -2967,13 +3064,11 @@ static const char * const pwm7_groups[] = {
};
static const char * const pwm8_groups[] = {
- /* suffix might be updated */
- "pwm8_a",
+ "pwm8",
};
static const char * const pwm9_groups[] = {
- /* suffix might be updated */
- "pwm9_a",
+ "pwm9",
};
static const char * const qspi0_groups[] = {
@@ -2995,23 +3090,21 @@ static const char * const scif0_groups[] = {
};
static const char * const scif1_groups[] = {
- /* suffix might be updated */
- "scif1_data",
- "scif1_clk",
- "scif1_ctrl",
- "scif1_data_x",
- "scif1_clk_x",
- "scif1_ctrl_x",
+ "scif1_data_a",
+ "scif1_clk_a",
+ "scif1_ctrl_a",
+ "scif1_data_b",
+ "scif1_clk_b",
+ "scif1_ctrl_b",
};
static const char * const scif3_groups[] = {
- /* suffix might be updated */
- "scif3_data",
- "scif3_clk",
- "scif3_ctrl",
"scif3_data_a",
"scif3_clk_a",
"scif3_ctrl_a",
+ "scif3_data_b",
+ "scif3_clk_b",
+ "scif3_ctrl_b",
};
static const char * const scif4_groups[] = {
@@ -3034,15 +3127,14 @@ static const char * const ssi_groups[] = {
};
static const char * const tpu_groups[] = {
- /* suffix might be updated */
- "tpu_to0",
"tpu_to0_a",
- "tpu_to1",
+ "tpu_to0_b",
"tpu_to1_a",
- "tpu_to2",
+ "tpu_to1_b",
"tpu_to2_a",
- "tpu_to3",
+ "tpu_to2_b",
"tpu_to3_a",
+ "tpu_to3_b",
};
static const char * const tsn0_groups[] = {
@@ -3085,6 +3177,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c4),
SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(intc_ex),
+
SH_PFC_FUNCTION(mmc),
SH_PFC_FUNCTION(msiof0),
diff --git a/drivers/pinctrl/renesas/pfc-r8a779h0.c b/drivers/pinctrl/renesas/pfc-r8a779h0.c
index 438d1f2739dd..48b1eef250d9 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779h0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779h0.c
@@ -1236,6 +1236,30 @@ static const unsigned int avb0_mdio_pins[] = {
static const unsigned int avb0_mdio_mux[] = {
AVB0_MDC_MARK, AVB0_MDIO_MARK,
};
+static const unsigned int avb0_mii_pins[] = {
+ /*
+ * AVB0_MII_TD0, AVB0_MII_TD1, AVB0_MII_TD2,
+ * AVB0_MII_TD3, AVB0_MII_RD0, AVB0_MII_RD1,
+ * AVB0_MII_RD2, AVB0_MII_RD3, AVB0_MII_TXC,
+ * AVB0_MII_TX_EN, AVB0_MII_TX_ER, AVB0_MII_RXC,
+ * AVB0_MII_RX_DV, AVB0_MII_RX_ER, AVB0_MII_CRS,
+ * AVB0_MII_COL
+ */
+ RCAR_GP_PIN(7, 11), RCAR_GP_PIN(7, 7), RCAR_GP_PIN(7, 6),
+ RCAR_GP_PIN(7, 3), RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17),
+ RCAR_GP_PIN(7, 12), RCAR_GP_PIN(7, 8), RCAR_GP_PIN(7, 15),
+ RCAR_GP_PIN(7, 16), RCAR_GP_PIN(7, 4), RCAR_GP_PIN(7, 19),
+ RCAR_GP_PIN(7, 20), RCAR_GP_PIN(7, 2), RCAR_GP_PIN(7, 1),
+ RCAR_GP_PIN(7, 0),
+};
+static const unsigned int avb0_mii_mux[] = {
+ AVB0_MII_TD0_MARK, AVB0_MII_TD1_MARK, AVB0_MII_TD2_MARK,
+ AVB0_MII_TD3_MARK, AVB0_MII_RD0_MARK, AVB0_MII_RD1_MARK,
+ AVB0_MII_RD2_MARK, AVB0_MII_RD3_MARK, AVB0_MII_TXC_MARK,
+ AVB0_MII_TX_EN_MARK, AVB0_MII_TX_ER_MARK, AVB0_MII_RXC_MARK,
+ AVB0_MII_RX_DV_MARK, AVB0_MII_RX_ER_MARK, AVB0_MII_CRS_MARK,
+ AVB0_MII_COL_MARK,
+};
static const unsigned int avb0_rgmii_pins[] = {
/*
* AVB0_TX_CTL, AVB0_TXC, AVB0_TD0, AVB0_TD1, AVB0_TD2, AVB0_TD3,
@@ -1314,6 +1338,30 @@ static const unsigned int avb1_mdio_pins[] = {
static const unsigned int avb1_mdio_mux[] = {
AVB1_MDC_MARK, AVB1_MDIO_MARK,
};
+static const unsigned int avb1_mii_pins[] = {
+ /*
+ * AVB1_MII_TD0, AVB1_MII_TD1, AVB1_MII_TD2,
+ * AVB1_MII_TD3, AVB1_MII_RD0, AVB1_MII_RD1,
+ * AVB1_MII_RD2, AVB1_MII_RD3, AVB1_MII_TXC,
+ * AVB1_MII_TX_EN, AVB1_MII_TX_ER, AVB1_MII_RXC,
+ * AVB1_MII_RX_DV, AVB1_MII_RX_ER, AVB1_MII_CRS,
+ * AVB1_MII_COL
+ */
+ RCAR_GP_PIN(6, 13), RCAR_GP_PIN(6, 12), RCAR_GP_PIN(6, 16),
+ RCAR_GP_PIN(6, 18), RCAR_GP_PIN(6, 15), RCAR_GP_PIN(6, 14),
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 19), RCAR_GP_PIN(6, 6),
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 8),
+ RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 11),
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int avb1_mii_mux[] = {
+ AVB1_MII_TD0_MARK, AVB1_MII_TD1_MARK, AVB1_MII_TD2_MARK,
+ AVB1_MII_TD3_MARK, AVB1_MII_RD0_MARK, AVB1_MII_RD1_MARK,
+ AVB1_MII_RD2_MARK, AVB1_MII_RD3_MARK, AVB1_MII_TXC_MARK,
+ AVB1_MII_TX_EN_MARK, AVB1_MII_TX_ER_MARK, AVB1_MII_RXC_MARK,
+ AVB1_MII_RX_DV_MARK, AVB1_MII_RX_ER_MARK, AVB1_MII_CRS_MARK,
+ AVB1_MII_COL_MARK,
+};
static const unsigned int avb1_rgmii_pins[] = {
/*
* AVB1_TX_CTL, AVB1_TXC, AVB1_TD0, AVB1_TD1, AVB1_TD2, AVB1_TD3,
@@ -1509,7 +1557,7 @@ static const unsigned int hscif0_ctrl_mux[] = {
HRTS0_N_MARK, HCTS0_N_MARK,
};
-/* - HSCIF1_A ----------------------------------------------------------------- */
+/* - HSCIF1 ------------------------------------------------------------------- */
static const unsigned int hscif1_data_a_pins[] = {
/* HRX1_A, HTX1_A */
RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
@@ -1532,7 +1580,6 @@ static const unsigned int hscif1_ctrl_a_mux[] = {
HRTS1_N_A_MARK, HCTS1_N_A_MARK,
};
-/* - HSCIF1_B ---------------------------------------------------------------- */
static const unsigned int hscif1_data_b_pins[] = {
/* HRX1_B, HTX1_B */
RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
@@ -1578,7 +1625,7 @@ static const unsigned int hscif2_ctrl_mux[] = {
HRTS2_N_MARK, HCTS2_N_MARK,
};
-/* - HSCIF3_A ----------------------------------------------------------------- */
+/* - HSCIF3 ------------------------------------------------------------------- */
static const unsigned int hscif3_data_a_pins[] = {
/* HRX3_A, HTX3_A */
RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28),
@@ -1601,7 +1648,6 @@ static const unsigned int hscif3_ctrl_a_mux[] = {
HRTS3_N_A_MARK, HCTS3_N_A_MARK,
};
-/* - HSCIF3_B ----------------------------------------------------------------- */
static const unsigned int hscif3_data_b_pins[] = {
/* HRX3_B, HTX3_B */
RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0),
@@ -2061,7 +2107,7 @@ static const unsigned int pcie0_clkreq_n_mux[] = {
PCIE0_CLKREQ_N_MARK,
};
-/* - PWM0_A ------------------------------------------------------------------- */
+/* - PWM0 --------------------------------------------------------------------- */
static const unsigned int pwm0_a_pins[] = {
/* PWM0_A */
RCAR_GP_PIN(1, 15),
@@ -2070,7 +2116,6 @@ static const unsigned int pwm0_a_mux[] = {
PWM0_A_MARK,
};
-/* - PWM0_B ------------------------------------------------------------------- */
static const unsigned int pwm0_b_pins[] = {
/* PWM0_B */
RCAR_GP_PIN(1, 14),
@@ -2079,7 +2124,7 @@ static const unsigned int pwm0_b_mux[] = {
PWM0_B_MARK,
};
-/* - PWM1_A ------------------------------------------------------------------- */
+/* - PWM1 --------------------------------------------------------------------- */
static const unsigned int pwm1_a_pins[] = {
/* PWM1_A */
RCAR_GP_PIN(3, 13),
@@ -2088,7 +2133,6 @@ static const unsigned int pwm1_a_mux[] = {
PWM1_A_MARK,
};
-/* - PWM1_B ------------------------------------------------------------------- */
static const unsigned int pwm1_b_pins[] = {
/* PWM1_B */
RCAR_GP_PIN(2, 13),
@@ -2097,7 +2141,6 @@ static const unsigned int pwm1_b_mux[] = {
PWM1_B_MARK,
};
-/* - PWM1_C ------------------------------------------------------------------- */
static const unsigned int pwm1_c_pins[] = {
/* PWM1_C */
RCAR_GP_PIN(2, 17),
@@ -2106,7 +2149,7 @@ static const unsigned int pwm1_c_mux[] = {
PWM1_C_MARK,
};
-/* - PWM2_A ------------------------------------------------------------------- */
+/* - PWM2 --------------------------------------------------------------------- */
static const unsigned int pwm2_a_pins[] = {
/* PWM2_A */
RCAR_GP_PIN(3, 14),
@@ -2115,7 +2158,6 @@ static const unsigned int pwm2_a_mux[] = {
PWM2_A_MARK,
};
-/* - PWM2_B ------------------------------------------------------------------- */
static const unsigned int pwm2_b_pins[] = {
/* PWM2_B */
RCAR_GP_PIN(2, 14),
@@ -2124,7 +2166,6 @@ static const unsigned int pwm2_b_mux[] = {
PWM2_B_MARK,
};
-/* - PWM2_C ------------------------------------------------------------------- */
static const unsigned int pwm2_c_pins[] = {
/* PWM2_C */
RCAR_GP_PIN(2, 19),
@@ -2133,7 +2174,7 @@ static const unsigned int pwm2_c_mux[] = {
PWM2_C_MARK,
};
-/* - PWM3_A ------------------------------------------------------------------- */
+/* - PWM3 --------------------------------------------------------------------- */
static const unsigned int pwm3_a_pins[] = {
/* PWM3_A */
RCAR_GP_PIN(4, 14),
@@ -2142,7 +2183,6 @@ static const unsigned int pwm3_a_mux[] = {
PWM3_A_MARK,
};
-/* - PWM3_B ------------------------------------------------------------------- */
static const unsigned int pwm3_b_pins[] = {
/* PWM3_B */
RCAR_GP_PIN(2, 15),
@@ -2151,7 +2191,6 @@ static const unsigned int pwm3_b_mux[] = {
PWM3_B_MARK,
};
-/* - PWM3_C ------------------------------------------------------------------- */
static const unsigned int pwm3_c_pins[] = {
/* PWM3_C */
RCAR_GP_PIN(1, 22),
@@ -2228,7 +2267,7 @@ static const unsigned int scif0_ctrl_mux[] = {
RTS0_N_MARK, CTS0_N_MARK,
};
-/* - SCIF1_A ------------------------------------------------------------------ */
+/* - SCIF1 -------------------------------------------------------------------- */
static const unsigned int scif1_data_a_pins[] = {
/* RX1_A, TX1_A */
RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
@@ -2251,7 +2290,6 @@ static const unsigned int scif1_ctrl_a_mux[] = {
RTS1_N_A_MARK, CTS1_N_A_MARK,
};
-/* - SCIF1_B ------------------------------------------------------------------ */
static const unsigned int scif1_data_b_pins[] = {
/* RX1_B, TX1_B */
RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6),
@@ -2274,7 +2312,7 @@ static const unsigned int scif1_ctrl_b_mux[] = {
RTS1_N_B_MARK, CTS1_N_B_MARK,
};
-/* - SCIF3_A ------------------------------------------------------------------ */
+/* - SCIF3 -------------------------------------------------------------------- */
static const unsigned int scif3_data_a_pins[] = {
/* RX3_A, TX3_A */
RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28),
@@ -2297,7 +2335,6 @@ static const unsigned int scif3_ctrl_a_mux[] = {
RTS3_N_A_MARK, CTS3_N_A_MARK,
};
-/* - SCIF3_B ------------------------------------------------------------------ */
static const unsigned int scif3_data_b_pins[] = {
/* RX3_B, TX3_B */
RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
@@ -2376,7 +2413,7 @@ static const unsigned int ssi_ctrl_mux[] = {
SSI_SCK_MARK, SSI_WS_MARK,
};
-/* - TPU_A ------------------------------------------------------------------- */
+/* - TPU --------------------------------------------------------------------- */
static const unsigned int tpu_to0_a_pins[] = {
/* TPU0TO0_A */
RCAR_GP_PIN(2, 8),
@@ -2406,7 +2443,6 @@ static const unsigned int tpu_to3_a_mux[] = {
TPU0TO3_A_MARK,
};
-/* - TPU_B ------------------------------------------------------------------- */
static const unsigned int tpu_to0_b_pins[] = {
/* TPU0TO0_B */
RCAR_GP_PIN(1, 25),
@@ -2444,6 +2480,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb0_magic),
SH_PFC_PIN_GROUP(avb0_phy_int),
SH_PFC_PIN_GROUP(avb0_mdio),
+ SH_PFC_PIN_GROUP(avb0_mii),
SH_PFC_PIN_GROUP(avb0_rgmii),
SH_PFC_PIN_GROUP(avb0_txcrefclk),
SH_PFC_PIN_GROUP(avb0_avtp_pps),
@@ -2454,6 +2491,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb1_magic),
SH_PFC_PIN_GROUP(avb1_phy_int),
SH_PFC_PIN_GROUP(avb1_mdio),
+ SH_PFC_PIN_GROUP(avb1_mii),
SH_PFC_PIN_GROUP(avb1_rgmii),
SH_PFC_PIN_GROUP(avb1_txcrefclk),
SH_PFC_PIN_GROUP(avb1_avtp_pps),
@@ -2628,6 +2666,7 @@ static const char * const avb0_groups[] = {
"avb0_magic",
"avb0_phy_int",
"avb0_mdio",
+ "avb0_mii",
"avb0_rgmii",
"avb0_txcrefclk",
"avb0_avtp_pps",
@@ -2640,6 +2679,7 @@ static const char * const avb1_groups[] = {
"avb1_magic",
"avb1_phy_int",
"avb1_mdio",
+ "avb1_mii",
"avb1_rgmii",
"avb1_txcrefclk",
"avb1_avtp_pps",
diff --git a/drivers/pinctrl/renesas/pfc-sh73a0.c b/drivers/pinctrl/renesas/pfc-sh73a0.c
index ca5adf2095be..41587233aa44 100644
--- a/drivers/pinctrl/renesas/pfc-sh73a0.c
+++ b/drivers/pinctrl/renesas/pfc-sh73a0.c
@@ -4024,7 +4024,7 @@ static const struct pinmux_irq pinmux_irqs[] = {
static void sh73a0_vccq_mc0_endisable(struct regulator_dev *reg, bool enable)
{
- struct sh_pfc *pfc = reg->reg_data;
+ struct sh_pfc *pfc = rdev_get_drvdata(reg);
void __iomem *addr = pfc->windows[1].virt + 4;
unsigned long flags;
u32 value;
@@ -4057,7 +4057,7 @@ static int sh73a0_vccq_mc0_disable(struct regulator_dev *reg)
static int sh73a0_vccq_mc0_is_enabled(struct regulator_dev *reg)
{
- struct sh_pfc *pfc = reg->reg_data;
+ struct sh_pfc *pfc = rdev_get_drvdata(reg);
void __iomem *addr = pfc->windows[1].virt + 4;
unsigned long flags;
u32 value;
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index edcbe7c9ad56..6527872813dc 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -852,7 +852,6 @@ static const struct gpio_chip rza1_gpiochip_template = {
*/
static int rza1_dt_node_pin_count(struct device_node *np)
{
- struct device_node *child;
struct property *of_pins;
unsigned int npins;
@@ -861,12 +860,10 @@ static int rza1_dt_node_pin_count(struct device_node *np)
return of_pins->length / sizeof(u32);
npins = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
of_pins = of_find_property(child, "pinmux", NULL);
- if (!of_pins) {
- of_node_put(child);
+ if (!of_pins)
return -EINVAL;
- }
npins += of_pins->length / sizeof(u32);
}
@@ -986,7 +983,6 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
struct rza1_pinctrl *rza1_pctl = pinctrl_dev_get_drvdata(pctldev);
struct rza1_mux_conf *mux_confs, *mux_conf;
unsigned int *grpins, *grpin;
- struct device_node *child;
const char *grpname;
const char **fngrps;
int ret, npins;
@@ -1023,13 +1019,11 @@ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = rza1_parse_pinmux_node(rza1_pctl, np, mux_conf, grpin);
if (ret == -ENOENT)
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = rza1_parse_pinmux_node(rza1_pctl, child, mux_conf,
grpin);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
grpin += ret;
mux_conf += ret;
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 60be78da9f52..632180570b70 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -57,8 +57,14 @@
#define PIN_CFG_IOLH_C BIT(13)
#define PIN_CFG_SOFT_PS BIT(14)
#define PIN_CFG_OEN BIT(15)
-#define PIN_CFG_VARIABLE BIT(16)
-#define PIN_CFG_NOGPIO_INT BIT(17)
+#define PIN_CFG_NOGPIO_INT BIT(16)
+#define PIN_CFG_NOD BIT(17) /* N-ch Open Drain */
+#define PIN_CFG_SMT BIT(18) /* Schmitt-trigger input control */
+#define PIN_CFG_ELC BIT(19)
+#define PIN_CFG_IOLH_RZV2H BIT(20)
+
+#define RZG2L_SINGLE_PIN BIT_ULL(63) /* Dedicated pin */
+#define RZG2L_VARIABLE_CFG BIT_ULL(62) /* Variable cfg for port pins */
#define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \
(PIN_CFG_IOLH_##group | \
@@ -73,14 +79,19 @@
#define RZG3S_MPXED_PIN_FUNCS(group) (RZG2L_MPXED_COMMON_PIN_FUNCS(group) | \
PIN_CFG_SOFT_PS)
+#define RZV2H_MPXED_PIN_FUNCS (RZG2L_MPXED_COMMON_PIN_FUNCS(RZV2H) | \
+ PIN_CFG_NOD | \
+ PIN_CFG_SR | \
+ PIN_CFG_SMT)
+
#define RZG2L_MPXED_ETH_PIN_FUNCS(x) ((x) | \
PIN_CFG_FILONOFF | \
PIN_CFG_FILNUM | \
PIN_CFG_FILCLKSEL)
-#define PIN_CFG_PIN_MAP_MASK GENMASK_ULL(35, 28)
-#define PIN_CFG_PIN_REG_MASK GENMASK(27, 20)
-#define PIN_CFG_MASK GENMASK(19, 0)
+#define PIN_CFG_PIN_MAP_MASK GENMASK_ULL(61, 54)
+#define PIN_CFG_PIN_REG_MASK GENMASK_ULL(53, 46)
+#define PIN_CFG_MASK GENMASK_ULL(31, 0)
/*
* m indicates the bitmap of supported pins, a is the register index
@@ -89,22 +100,25 @@
#define RZG2L_GPIO_PORT_SPARSE_PACK(m, a, f) (FIELD_PREP_CONST(PIN_CFG_PIN_MAP_MASK, (m)) | \
FIELD_PREP_CONST(PIN_CFG_PIN_REG_MASK, (a)) | \
FIELD_PREP_CONST(PIN_CFG_MASK, (f)))
+#define RZG2L_GPIO_PORT_SPARSE_PACK_VARIABLE(m, a) \
+ (RZG2L_VARIABLE_CFG | \
+ RZG2L_GPIO_PORT_SPARSE_PACK(m, a, 0))
/*
* n indicates number of pins in the port, a is the register index
* and f is pin configuration capabilities supported.
*/
#define RZG2L_GPIO_PORT_PACK(n, a, f) RZG2L_GPIO_PORT_SPARSE_PACK((1ULL << (n)) - 1, (a), (f))
+#define RZG2L_GPIO_PORT_PACK_VARIABLE(n, a) (RZG2L_VARIABLE_CFG | \
+ RZG2L_GPIO_PORT_PACK(n, a, 0))
+#define RZG2L_SINGLE_PIN_INDEX_MASK GENMASK_ULL(62, 56)
+#define RZG2L_SINGLE_PIN_BITS_MASK GENMASK_ULL(55, 53)
/*
- * BIT(63) indicates dedicated pin, p is the register index while
- * referencing to SR/IEN/IOLH/FILxx registers, b is the register bits
- * (b * 8) and f is the pin configuration capabilities supported.
+ * p is the register index while referencing to SR/IEN/IOLH/FILxx
+ * registers, b is the register bits (b * 8) and f is the pin
+ * configuration capabilities supported.
*/
-#define RZG2L_SINGLE_PIN BIT_ULL(63)
-#define RZG2L_SINGLE_PIN_INDEX_MASK GENMASK(30, 24)
-#define RZG2L_SINGLE_PIN_BITS_MASK GENMASK(22, 20)
-
#define RZG2L_SINGLE_PIN_PACK(p, b, f) (RZG2L_SINGLE_PIN | \
FIELD_PREP_CONST(RZG2L_SINGLE_PIN_INDEX_MASK, (p)) | \
FIELD_PREP_CONST(RZG2L_SINGLE_PIN_BITS_MASK, (b)) | \
@@ -114,18 +128,28 @@
FIELD_GET(RZG2L_SINGLE_PIN_INDEX_MASK, (cfg)) : \
FIELD_GET(PIN_CFG_PIN_REG_MASK, (cfg)))
+#define VARIABLE_PIN_CFG_PIN_MASK GENMASK_ULL(54, 52)
+#define VARIABLE_PIN_CFG_PORT_MASK GENMASK_ULL(51, 47)
+#define RZG2L_VARIABLE_PIN_CFG_PACK(port, pin, cfg) \
+ (FIELD_PREP_CONST(VARIABLE_PIN_CFG_PIN_MASK, (pin)) | \
+ FIELD_PREP_CONST(VARIABLE_PIN_CFG_PORT_MASK, (port)) | \
+ FIELD_PREP_CONST(PIN_CFG_MASK, (cfg)))
+
#define P(off) (0x0000 + (off))
#define PM(off) (0x0100 + (off) * 2)
#define PMC(off) (0x0200 + (off))
#define PFC(off) (0x0400 + (off) * 4)
#define PIN(off) (0x0800 + (off))
#define IOLH(off) (0x1000 + (off) * 8)
+#define SR(off) (0x1400 + (off) * 8)
#define IEN(off) (0x1800 + (off) * 8)
+#define PUPD(off) (0x1C00 + (off) * 8)
#define ISEL(off) (0x2C00 + (off) * 8)
#define SD_CH(off, ch) ((off) + (ch) * 4)
#define ETH_POC(off, ch) ((off) + (ch) * 4)
#define QSPI (0x3008)
#define ETH_MODE (0x3018)
+#define PFC_OEN (0x3C40) /* known on RZ/V2H(P) only */
#define PVDD_2500 2 /* I/O domain voltage 2.5V */
#define PVDD_1800 1 /* I/O domain voltage <= 1.8V */
@@ -133,11 +157,15 @@
#define PWPR_B0WI BIT(7) /* Bit Write Disable */
#define PWPR_PFCWE BIT(6) /* PFC Register Write Enable */
+#define PWPR_REGWE_A BIT(6) /* PFC and PMC Register Write Enable on RZ/V2H(P) */
+#define PWPR_REGWE_B BIT(5) /* OEN Register Write Enable, known only in RZ/V2H(P) */
#define PM_MASK 0x03
#define PFC_MASK 0x07
#define IEN_MASK 0x01
#define IOLH_MASK 0x03
+#define SR_MASK 0x01
+#define PUPD_MASK 0x03
#define PM_INPUT 0x1
#define PM_OUTPUT 0x2
@@ -149,6 +177,19 @@
#define RZG2L_TINT_IRQ_START_INDEX 9
#define RZG2L_PACK_HWIRQ(t, i) (((t) << 16) | (i))
+/* Custom pinconf parameters */
+#define RENESAS_RZV2H_PIN_CONFIG_OUTPUT_IMPEDANCE (PIN_CONFIG_END + 1)
+
+static const struct pinconf_generic_params renesas_rzv2h_custom_bindings[] = {
+ { "renesas,output-impedance", RENESAS_RZV2H_PIN_CONFIG_OUTPUT_IMPEDANCE, 1 },
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item renesas_rzv2h_conf_items[] = {
+ PCONFDUMP(RENESAS_RZV2H_PIN_CONFIG_OUTPUT_IMPEDANCE, "output-impedance", "x", true),
+};
+#endif
+
/* Read/write 8 bits register */
#define RZG2L_PCTRL_REG_ACCESS8(_read, _addr, _val) \
do { \
@@ -234,17 +275,7 @@ struct rzg2l_dedicated_configs {
u64 config;
};
-/**
- * struct rzg2l_variable_pin_cfg - pin data cfg
- * @cfg: port pin configuration
- * @port: port number
- * @pin: port pin
- */
-struct rzg2l_variable_pin_cfg {
- u32 cfg:20;
- u32 port:5;
- u32 pin:3;
-};
+struct rzg2l_pinctrl;
struct rzg2l_pinctrl_data {
const char * const *port_pins;
@@ -254,8 +285,19 @@ struct rzg2l_pinctrl_data {
unsigned int n_port_pins;
unsigned int n_dedicated_pins;
const struct rzg2l_hwcfg *hwcfg;
- const struct rzg2l_variable_pin_cfg *variable_pin_cfg;
+ const u64 *variable_pin_cfg;
unsigned int n_variable_pin_cfg;
+ unsigned int num_custom_params;
+ const struct pinconf_generic_params *custom_params;
+#ifdef CONFIG_DEBUG_FS
+ const struct pin_config_item *custom_conf_items;
+#endif
+ void (*pwpr_pfc_lock_unlock)(struct rzg2l_pinctrl *pctrl, bool lock);
+ void (*pmc_writeb)(struct rzg2l_pinctrl *pctrl, u8 val, u16 offset);
+ u32 (*oen_read)(struct rzg2l_pinctrl *pctrl, unsigned int _pin);
+ int (*oen_write)(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen);
+ int (*hw_to_bias_param)(unsigned int val);
+ int (*bias_param_to_hw)(enum pin_config_param param);
};
/**
@@ -322,7 +364,6 @@ struct rzg2l_pinctrl {
static const u16 available_ps[] = { 1800, 2500, 3300 };
-#ifdef CONFIG_RISCV
static u64 rzg2l_pinctrl_get_variable_pin_cfg(struct rzg2l_pinctrl *pctrl,
u64 pincfg,
unsigned int port,
@@ -331,138 +372,89 @@ static u64 rzg2l_pinctrl_get_variable_pin_cfg(struct rzg2l_pinctrl *pctrl,
unsigned int i;
for (i = 0; i < pctrl->data->n_variable_pin_cfg; i++) {
- if (pctrl->data->variable_pin_cfg[i].port == port &&
- pctrl->data->variable_pin_cfg[i].pin == pin)
- return (pincfg & ~PIN_CFG_VARIABLE) | pctrl->data->variable_pin_cfg[i].cfg;
+ u64 cfg = pctrl->data->variable_pin_cfg[i];
+
+ if (FIELD_GET(VARIABLE_PIN_CFG_PORT_MASK, cfg) == port &&
+ FIELD_GET(VARIABLE_PIN_CFG_PIN_MASK, cfg) == pin)
+ return (pincfg & ~RZG2L_VARIABLE_CFG) | FIELD_GET(PIN_CFG_MASK, cfg);
}
return 0;
}
-static const struct rzg2l_variable_pin_cfg r9a07g043f_variable_pin_cfg[] = {
- {
- .port = 20,
- .pin = 0,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 20,
- .pin = 1,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 20,
- .pin = 2,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 20,
- .pin = 3,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 20,
- .pin = 4,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 20,
- .pin = 5,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 20,
- .pin = 6,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 20,
- .pin = 7,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_IEN | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 23,
- .pin = 1,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_NOGPIO_INT
- },
- {
- .port = 23,
- .pin = 2,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 23,
- .pin = 3,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 23,
- .pin = 4,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 23,
- .pin = 5,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 24,
- .pin = 0,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 24,
- .pin = 1,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 24,
- .pin = 2,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 24,
- .pin = 3,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 24,
- .pin = 4,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_NOGPIO_INT,
- },
- {
- .port = 24,
- .pin = 5,
- .cfg = PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
- PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
- PIN_CFG_NOGPIO_INT,
- },
+static const u64 r9a09g057_variable_pin_cfg[] = {
+ RZG2L_VARIABLE_PIN_CFG_PACK(11, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(11, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(11, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(11, 3, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(11, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(11, 5, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
+};
+
+#ifdef CONFIG_RISCV
+static const u64 r9a07g043f_variable_pin_cfg[] = {
+ RZG2L_VARIABLE_PIN_CFG_PACK(20, 0, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(20, 1, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(20, 2, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(20, 3, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(20, 4, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(20, 5, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(20, 6, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(20, 7, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_IEN | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(23, 1, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(23, 2, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(23, 3, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(23, 4, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(23, 5, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(24, 0, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(24, 1, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(24, 2, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(24, 3, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(24, 4, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_NOGPIO_INT),
+ RZG2L_VARIABLE_PIN_CFG_PACK(24, 5, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
+ PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
+ PIN_CFG_NOGPIO_INT),
};
#endif
+static void rzg2l_pmc_writeb(struct rzg2l_pinctrl *pctrl, u8 val, u16 offset)
+{
+ writeb(val, pctrl->base + offset);
+}
+
+static void rzv2h_pmc_writeb(struct rzg2l_pinctrl *pctrl, u8 val, u16 offset)
+{
+ const struct rzg2l_register_offsets *regs = &pctrl->data->hwcfg->regs;
+ u8 pwpr;
+
+ pwpr = readb(pctrl->base + regs->pwpr);
+ writeb(pwpr | PWPR_REGWE_A, pctrl->base + regs->pwpr);
+ writeb(val, pctrl->base + offset);
+ writeb(pwpr & ~PWPR_REGWE_A, pctrl->base + regs->pwpr);
+}
+
static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
u8 pin, u8 off, u8 func)
{
- const struct rzg2l_register_offsets *regs = &pctrl->data->hwcfg->regs;
unsigned long flags;
u32 reg;
@@ -473,27 +465,23 @@ static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
reg &= ~(PM_MASK << (pin * 2));
writew(reg, pctrl->base + PM(off));
+ pctrl->data->pwpr_pfc_lock_unlock(pctrl, false);
+
/* Temporarily switch to GPIO mode with PMC register */
reg = readb(pctrl->base + PMC(off));
writeb(reg & ~BIT(pin), pctrl->base + PMC(off));
- /* Set the PWPR register to allow PFC register to write */
- writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
- writel(PWPR_PFCWE, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=1 */
-
/* Select Pin function mode with PFC register */
reg = readl(pctrl->base + PFC(off));
reg &= ~(PFC_MASK << (pin * 4));
writel(reg | (func << (pin * 4)), pctrl->base + PFC(off));
- /* Set the PWPR register to be write-protected */
- writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
- writel(PWPR_B0WI, pctrl->base + regs->pwpr); /* B0WI=1, PFCWE=0 */
-
/* Switch to Peripheral pin function with PMC register */
reg = readb(pctrl->base + PMC(off));
writeb(reg | BIT(pin), pctrl->base + PMC(off));
+ pctrl->data->pwpr_pfc_lock_unlock(pctrl, true);
+
spin_unlock_irqrestore(&pctrl->lock, flags);
};
@@ -599,7 +587,7 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
return -EINVAL;
}
- ret = pinconf_generic_parse_dt_config(np, NULL, &configs, &num_configs);
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs, &num_configs);
if (ret < 0)
return ret;
@@ -745,7 +733,6 @@ static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
unsigned int *num_maps)
{
struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- struct device_node *child;
unsigned int index;
int ret;
@@ -753,13 +740,11 @@ static int rzg2l_dt_node_to_map(struct pinctrl_dev *pctldev,
*num_maps = 0;
index = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = rzg2l_dt_subnode_to_map(pctldev, child, np, map,
num_maps, &index);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
goto done;
- }
}
if (*num_maps == 0) {
@@ -1014,53 +999,100 @@ static bool rzg2l_ds_is_supported(struct rzg2l_pinctrl *pctrl, u32 caps,
return false;
}
-static bool rzg2l_oen_is_supported(u32 caps, u8 pin, u8 max_pin)
+static int rzg2l_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
- if (!(caps & PIN_CFG_OEN))
- return false;
+ u64 *pin_data = pctrl->desc.pins[_pin].drv_data;
+ u64 caps = FIELD_GET(PIN_CFG_MASK, *pin_data);
+ u8 pin = RZG2L_PIN_ID_TO_PIN(_pin);
- if (pin > max_pin)
- return false;
+ if (pin > pctrl->data->hwcfg->oen_max_pin)
+ return -EINVAL;
+
+ /*
+ * We can determine which Ethernet interface we're dealing with from
+ * the caps.
+ */
+ if (caps & PIN_CFG_IO_VMC_ETH0)
+ return 0;
+ if (caps & PIN_CFG_IO_VMC_ETH1)
+ return 1;
- return true;
+ return -EINVAL;
}
-static u8 rzg2l_pin_to_oen_bit(u32 offset, u8 pin, u8 max_port)
+static u32 rzg2l_read_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
- if (pin)
- pin *= 2;
+ int bit;
- if (offset / RZG2L_PINS_PER_PORT == max_port)
- pin += 1;
+ bit = rzg2l_pin_to_oen_bit(pctrl, _pin);
+ if (bit < 0)
+ return 0;
- return pin;
+ return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
}
-static u32 rzg2l_read_oen(struct rzg2l_pinctrl *pctrl, u32 caps, u32 offset, u8 pin)
+static int rzg2l_write_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
{
- u8 max_port = pctrl->data->hwcfg->oen_max_port;
- u8 max_pin = pctrl->data->hwcfg->oen_max_pin;
- u8 bit;
+ unsigned long flags;
+ int bit;
+ u8 val;
- if (!rzg2l_oen_is_supported(caps, pin, max_pin))
- return 0;
+ bit = rzg2l_pin_to_oen_bit(pctrl, _pin);
+ if (bit < 0)
+ return bit;
- bit = rzg2l_pin_to_oen_bit(offset, pin, max_port);
+ spin_lock_irqsave(&pctrl->lock, flags);
+ val = readb(pctrl->base + ETH_MODE);
+ if (oen)
+ val &= ~BIT(bit);
+ else
+ val |= BIT(bit);
+ writeb(val, pctrl->base + ETH_MODE);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
- return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
+ return 0;
}
-static int rzg2l_write_oen(struct rzg2l_pinctrl *pctrl, u32 caps, u32 offset, u8 pin, u8 oen)
+static int rzg3s_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
- u8 max_port = pctrl->data->hwcfg->oen_max_port;
- u8 max_pin = pctrl->data->hwcfg->oen_max_pin;
- unsigned long flags;
- u8 val, bit;
+ u64 *pin_data = pctrl->desc.pins[_pin].drv_data;
+ u8 port, pin, bit;
+
+ if (*pin_data & RZG2L_SINGLE_PIN)
+ return -EINVAL;
- if (!rzg2l_oen_is_supported(caps, pin, max_pin))
+ port = RZG2L_PIN_ID_TO_PORT(_pin);
+ pin = RZG2L_PIN_ID_TO_PIN(_pin);
+ if (pin > pctrl->data->hwcfg->oen_max_pin)
return -EINVAL;
- bit = rzg2l_pin_to_oen_bit(offset, pin, max_port);
+ bit = pin * 2;
+ if (port == pctrl->data->hwcfg->oen_max_port)
+ bit += 1;
+
+ return bit;
+}
+
+static u32 rzg3s_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+{
+ int bit;
+
+ bit = rzg3s_pin_to_oen_bit(pctrl, _pin);
+ if (bit < 0)
+ return bit;
+
+ return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
+}
+
+static int rzg3s_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
+{
+ unsigned long flags;
+ int bit;
+ u8 val;
+
+ bit = rzg3s_pin_to_oen_bit(pctrl, _pin);
+ if (bit < 0)
+ return bit;
spin_lock_irqsave(&pctrl->lock, flags);
val = readb(pctrl->base + ETH_MODE);
@@ -1074,17 +1106,134 @@ static int rzg2l_write_oen(struct rzg2l_pinctrl *pctrl, u32 caps, u32 offset, u8
return 0;
}
+static int rzg2l_hw_to_bias_param(unsigned int bias)
+{
+ switch (bias) {
+ case 0:
+ return PIN_CONFIG_BIAS_DISABLE;
+ case 1:
+ return PIN_CONFIG_BIAS_PULL_UP;
+ case 2:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int rzg2l_bias_param_to_hw(enum pin_config_param param)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ return 0;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ return 1;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return 2;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int rzv2h_hw_to_bias_param(unsigned int bias)
+{
+ switch (bias) {
+ case 0:
+ case 1:
+ return PIN_CONFIG_BIAS_DISABLE;
+ case 2:
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+ case 3:
+ return PIN_CONFIG_BIAS_PULL_UP;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int rzv2h_bias_param_to_hw(enum pin_config_param param)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ return 0;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return 2;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ return 3;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static u8 rzv2h_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+{
+ static const char * const pin_names[] = { "ET0_TXC_TXCLK", "ET1_TXC_TXCLK",
+ "XSPI0_RESET0N", "XSPI0_CS0N",
+ "XSPI0_CKN", "XSPI0_CKP" };
+ const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[_pin];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pin_names); i++) {
+ if (!strcmp(pin_desc->name, pin_names[i]))
+ return i;
+ }
+
+ /* Should not happen. */
+ return 0;
+}
+
+static u32 rzv2h_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+{
+ u8 bit;
+
+ bit = rzv2h_pin_to_oen_bit(pctrl, _pin);
+
+ return !(readb(pctrl->base + PFC_OEN) & BIT(bit));
+}
+
+static int rzv2h_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
+{
+ const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
+ const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+ unsigned long flags;
+ u8 val, bit;
+ u8 pwpr;
+
+ bit = rzv2h_pin_to_oen_bit(pctrl, _pin);
+ spin_lock_irqsave(&pctrl->lock, flags);
+ val = readb(pctrl->base + PFC_OEN);
+ if (oen)
+ val &= ~BIT(bit);
+ else
+ val |= BIT(bit);
+
+ pwpr = readb(pctrl->base + regs->pwpr);
+ writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ writeb(val, pctrl->base + PFC_OEN);
+ writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int _pin,
unsigned long *config)
{
struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- enum pin_config_param param = pinconf_to_config_param(*config);
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
const struct pinctrl_pin_desc *pin = &pctrl->desc.pins[_pin];
+ u32 param = pinconf_to_config_param(*config);
u64 *pin_data = pin->drv_data;
unsigned int arg = 0;
- u32 off, cfg;
+ u32 off;
+ u32 cfg;
int ret;
u8 bit;
@@ -1112,7 +1261,9 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT_ENABLE:
- arg = rzg2l_read_oen(pctrl, cfg, _pin, bit);
+ if (!pctrl->data->oen_read || !(cfg & PIN_CFG_OEN))
+ return -EOPNOTSUPP;
+ arg = pctrl->data->oen_read(pctrl, _pin);
if (!arg)
return -EINVAL;
break;
@@ -1124,6 +1275,30 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
arg = ret;
break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (!(cfg & PIN_CFG_SR))
+ return -EINVAL;
+
+ arg = rzg2l_read_pin_config(pctrl, SR(off), bit, SR_MASK);
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!(cfg & PIN_CFG_PUPD))
+ return -EINVAL;
+
+ arg = rzg2l_read_pin_config(pctrl, PUPD(off), bit, PUPD_MASK);
+ ret = pctrl->data->hw_to_bias_param(arg);
+ if (ret < 0)
+ return ret;
+
+ if (ret != param)
+ return -EINVAL;
+ /* for PIN_CONFIG_BIAS_PULL_UP/DOWN when enabled we just return 1 */
+ arg = 1;
+ break;
+
case PIN_CONFIG_DRIVE_STRENGTH: {
unsigned int index;
@@ -1167,6 +1342,13 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
break;
}
+ case RENESAS_RZV2H_PIN_CONFIG_OUTPUT_IMPEDANCE:
+ if (!(cfg & PIN_CFG_IOLH_RZV2H))
+ return -EINVAL;
+
+ arg = rzg2l_read_pin_config(pctrl, IOLH(off), bit, IOLH_MASK);
+ break;
+
default:
return -ENOTSUPP;
}
@@ -1186,9 +1368,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
struct rzg2l_pinctrl_pin_settings settings = pctrl->settings[_pin];
u64 *pin_data = pin->drv_data;
- enum pin_config_param param;
unsigned int i, arg, index;
- u32 cfg, off;
+ u32 off, param;
+ u32 cfg;
int ret;
u8 bit;
@@ -1220,7 +1402,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT_ENABLE:
arg = pinconf_to_config_argument(_configs[i]);
- ret = rzg2l_write_oen(pctrl, cfg, _pin, bit, !!arg);
+ if (!pctrl->data->oen_write || !(cfg & PIN_CFG_OEN))
+ return -EOPNOTSUPP;
+ ret = pctrl->data->oen_write(pctrl, _pin, !!arg);
if (ret)
return ret;
break;
@@ -1229,6 +1413,28 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
settings.power_source = pinconf_to_config_argument(_configs[i]);
break;
+ case PIN_CONFIG_SLEW_RATE:
+ arg = pinconf_to_config_argument(_configs[i]);
+
+ if (!(cfg & PIN_CFG_SR) || arg > 1)
+ return -EINVAL;
+
+ rzg2l_rmw_pin_config(pctrl, SR(off), bit, SR_MASK, arg);
+ break;
+
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!(cfg & PIN_CFG_PUPD))
+ return -EINVAL;
+
+ ret = pctrl->data->bias_param_to_hw(param);
+ if (ret < 0)
+ return ret;
+
+ rzg2l_rmw_pin_config(pctrl, PUPD(off), bit, PUPD_MASK, ret);
+ break;
+
case PIN_CONFIG_DRIVE_STRENGTH:
arg = pinconf_to_config_argument(_configs[i]);
@@ -1270,6 +1476,16 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, index);
break;
+ case RENESAS_RZV2H_PIN_CONFIG_OUTPUT_IMPEDANCE:
+ if (!(cfg & PIN_CFG_IOLH_RZV2H))
+ return -EINVAL;
+
+ arg = pinconf_to_config_argument(_configs[i]);
+ if (arg > 3)
+ return -EINVAL;
+ rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, arg);
+ break;
+
default:
return -EOPNOTSUPP;
}
@@ -1411,7 +1627,7 @@ static int rzg2l_gpio_request(struct gpio_chip *chip, unsigned int offset)
/* Select GPIO mode in PMC Register */
reg8 = readb(pctrl->base + PMC(off));
reg8 &= ~BIT(bit);
- writeb(reg8, pctrl->base + PMC(off));
+ pctrl->data->pmc_writeb(pctrl, reg8, PMC(off));
spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -1613,7 +1829,7 @@ static const u64 r9a07g044_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(3, 0x21, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x22, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x23, RZG2L_MPXED_PIN_FUNCS),
- RZG2L_GPIO_PORT_PACK(3, 0x24, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(3, 0x24, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0) | PIN_CFG_OEN),
RZG2L_GPIO_PORT_PACK(2, 0x25, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(2, 0x26, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(2, 0x27, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
@@ -1622,7 +1838,7 @@ static const u64 r9a07g044_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(2, 0x2a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(2, 0x2b, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(2, 0x2c, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2d, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2d, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1) | PIN_CFG_OEN),
RZG2L_GPIO_PORT_PACK(2, 0x2e, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
RZG2L_GPIO_PORT_PACK(2, 0x2f, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
RZG2L_GPIO_PORT_PACK(2, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
@@ -1646,13 +1862,13 @@ static const u64 r9a07g044_gpio_configs[] = {
static const u64 r9a07g043_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(4, 0x10, RZG2L_MPXED_PIN_FUNCS),
- RZG2L_GPIO_PORT_PACK(5, 0x11, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(5, 0x11, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0) | PIN_CFG_OEN),
RZG2L_GPIO_PORT_PACK(4, 0x12, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(4, 0x13, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(6, 0x14, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
RZG2L_GPIO_PORT_PACK(5, 0x15, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(5, 0x16, RZG2L_MPXED_PIN_FUNCS),
- RZG2L_GPIO_PORT_PACK(5, 0x17, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(5, 0x17, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1) | PIN_CFG_OEN),
RZG2L_GPIO_PORT_PACK(5, 0x18, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
RZG2L_GPIO_PORT_PACK(4, 0x19, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
RZG2L_GPIO_PORT_PACK(5, 0x1a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
@@ -1669,13 +1885,13 @@ static const u64 r9a07g043_gpio_configs[] = {
RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x06, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P19 */
- RZG2L_GPIO_PORT_PACK(8, 0x07, PIN_CFG_VARIABLE), /* P20 */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x07), /* P20 */
RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x08, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P21 */
RZG2L_GPIO_PORT_PACK(4, 0x09, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_PUPD |
PIN_CFG_IEN | PIN_CFG_NOGPIO_INT), /* P22 */
- RZG2L_GPIO_PORT_SPARSE_PACK(0x3e, 0x0a, PIN_CFG_VARIABLE), /* P23 */
- RZG2L_GPIO_PORT_PACK(6, 0x0b, PIN_CFG_VARIABLE), /* P24 */
+ RZG2L_GPIO_PORT_SPARSE_PACK_VARIABLE(0x3e, 0x0a), /* P23 */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x0b), /* P24 */
RZG2L_GPIO_PORT_SPARSE_PACK(0x2, 0x0c, PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_FILONOFF |
PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL |
PIN_CFG_NOGPIO_INT), /* P25 */
@@ -1717,6 +1933,39 @@ static const u64 r9a08g045_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(6, 0x2a, RZG3S_MPXED_PIN_FUNCS(A)), /* P18 */
};
+static const char * const rzv2h_gpio_names[] = {
+ "P00", "P01", "P02", "P03", "P04", "P05", "P06", "P07",
+ "P10", "P11", "P12", "P13", "P14", "P15", "P16", "P17",
+ "P20", "P21", "P22", "P23", "P24", "P25", "P26", "P27",
+ "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37",
+ "P40", "P41", "P42", "P43", "P44", "P45", "P46", "P47",
+ "P50", "P51", "P52", "P53", "P54", "P55", "P56", "P57",
+ "P60", "P61", "P62", "P63", "P64", "P65", "P66", "P67",
+ "P70", "P71", "P72", "P73", "P74", "P75", "P76", "P77",
+ "P80", "P81", "P82", "P83", "P84", "P85", "P86", "P87",
+ "P90", "P91", "P92", "P93", "P94", "P95", "P96", "P97",
+ "PA0", "PA1", "PA2", "PA3", "PA4", "PA5", "PA6", "PA7",
+ "PB0", "PB1", "PB2", "PB3", "PB4", "PB5", "PB6", "PB7",
+};
+
+static const u64 r9a09g057_gpio_configs[] = {
+ RZG2L_GPIO_PORT_PACK(8, 0x20, RZV2H_MPXED_PIN_FUNCS), /* P0 */
+ RZG2L_GPIO_PORT_PACK(6, 0x21, RZV2H_MPXED_PIN_FUNCS), /* P1 */
+ RZG2L_GPIO_PORT_PACK(2, 0x22, RZG2L_MPXED_COMMON_PIN_FUNCS(RZV2H) |
+ PIN_CFG_NOD), /* P2 */
+ RZG2L_GPIO_PORT_PACK(8, 0x23, RZV2H_MPXED_PIN_FUNCS), /* P3 */
+ RZG2L_GPIO_PORT_PACK(8, 0x24, RZV2H_MPXED_PIN_FUNCS), /* P4 */
+ RZG2L_GPIO_PORT_PACK(8, 0x25, RZV2H_MPXED_PIN_FUNCS), /* P5 */
+ RZG2L_GPIO_PORT_PACK(8, 0x26, RZV2H_MPXED_PIN_FUNCS |
+ PIN_CFG_ELC), /* P6 */
+ RZG2L_GPIO_PORT_PACK(8, 0x27, RZV2H_MPXED_PIN_FUNCS), /* P7 */
+ RZG2L_GPIO_PORT_PACK(8, 0x28, RZV2H_MPXED_PIN_FUNCS |
+ PIN_CFG_ELC), /* P8 */
+ RZG2L_GPIO_PORT_PACK(8, 0x29, RZV2H_MPXED_PIN_FUNCS), /* P9 */
+ RZG2L_GPIO_PORT_PACK(8, 0x2a, RZV2H_MPXED_PIN_FUNCS), /* PA */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x2b), /* PB */
+};
+
static const struct {
struct rzg2l_dedicated_configs common[35];
struct rzg2l_dedicated_configs rzg2l_pins[7];
@@ -1843,6 +2092,138 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
PIN_CFG_IO_VMC_SD1)) },
};
+static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = {
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, (PIN_CFG_FILONOFF | PIN_CFG_FILNUM |
+ PIN_CFG_FILCLKSEL)) },
+ { "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN)) },
+ { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_CKP", RZG2L_SINGLE_PIN_PACK(0x7, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_CKN", RZG2L_SINGLE_PIN_PACK(0x7, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_CS0N", RZG2L_SINGLE_PIN_PACK(0x7, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_DS", RZG2L_SINGLE_PIN_PACK(0x7, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_RESET0N", RZG2L_SINGLE_PIN_PACK(0x7, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_RSTO0N", RZG2L_SINGLE_PIN_PACK(0x7, 5, (PIN_CFG_PUPD)) },
+ { "XSPI0_INT0N", RZG2L_SINGLE_PIN_PACK(0x7, 6, (PIN_CFG_PUPD)) },
+ { "XSPI0_ECS0N", RZG2L_SINGLE_PIN_PACK(0x7, 7, (PIN_CFG_PUPD)) },
+ { "XSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0x8, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0x8, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0x8, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0x8, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO4", RZG2L_SINGLE_PIN_PACK(0x8, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO5", RZG2L_SINGLE_PIN_PACK(0x8, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO6", RZG2L_SINGLE_PIN_PACK(0x8, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO7", RZG2L_SINGLE_PIN_PACK(0x8, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1CLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD1CMD", RZG2L_SINGLE_PIN_PACK(0xb, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT0", RZG2L_SINGLE_PIN_PACK(0xc, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT1", RZG2L_SINGLE_PIN_PACK(0xc, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT2", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT3", RZG2L_SINGLE_PIN_PACK(0xc, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "PCIE0_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "PCIE1_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "ET0_MDIO", RZG2L_SINGLE_PIN_PACK(0xf, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "ET0_MDC", RZG2L_SINGLE_PIN_PACK(0xf, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_PUPD)) },
+ { "ET0_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXER", RZG2L_SINGLE_PIN_PACK(0x10, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_RXER", RZG2L_SINGLE_PIN_PACK(0x10, 3, (PIN_CFG_PUPD)) },
+ { "ET0_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 4, (PIN_CFG_PUPD)) },
+ { "ET0_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "ET0_CRS", RZG2L_SINGLE_PIN_PACK(0x10, 6, (PIN_CFG_PUPD)) },
+ { "ET0_COL", RZG2L_SINGLE_PIN_PACK(0x10, 7, (PIN_CFG_PUPD)) },
+ { "ET0_TXD0", RZG2L_SINGLE_PIN_PACK(0x11, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD1", RZG2L_SINGLE_PIN_PACK(0x11, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD2", RZG2L_SINGLE_PIN_PACK(0x11, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD3", RZG2L_SINGLE_PIN_PACK(0x11, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_RXD0", RZG2L_SINGLE_PIN_PACK(0x11, 4, (PIN_CFG_PUPD)) },
+ { "ET0_RXD1", RZG2L_SINGLE_PIN_PACK(0x11, 5, (PIN_CFG_PUPD)) },
+ { "ET0_RXD2", RZG2L_SINGLE_PIN_PACK(0x11, 6, (PIN_CFG_PUPD)) },
+ { "ET0_RXD3", RZG2L_SINGLE_PIN_PACK(0x11, 7, (PIN_CFG_PUPD)) },
+ { "ET1_MDIO", RZG2L_SINGLE_PIN_PACK(0x12, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "ET1_MDC", RZG2L_SINGLE_PIN_PACK(0x12, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x13, 0, (PIN_CFG_PUPD)) },
+ { "ET1_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x13, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXER", RZG2L_SINGLE_PIN_PACK(0x13, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXER", RZG2L_SINGLE_PIN_PACK(0x13, 3, (PIN_CFG_PUPD)) },
+ { "ET1_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 4, (PIN_CFG_PUPD)) },
+ { "ET1_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "ET1_CRS", RZG2L_SINGLE_PIN_PACK(0x13, 6, (PIN_CFG_PUPD)) },
+ { "ET1_COL", RZG2L_SINGLE_PIN_PACK(0x13, 7, (PIN_CFG_PUPD)) },
+ { "ET1_TXD0", RZG2L_SINGLE_PIN_PACK(0x14, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD1", RZG2L_SINGLE_PIN_PACK(0x14, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD2", RZG2L_SINGLE_PIN_PACK(0x14, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD3", RZG2L_SINGLE_PIN_PACK(0x14, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXD0", RZG2L_SINGLE_PIN_PACK(0x14, 4, (PIN_CFG_PUPD)) },
+ { "ET1_RXD1", RZG2L_SINGLE_PIN_PACK(0x14, 5, (PIN_CFG_PUPD)) },
+ { "ET1_RXD2", RZG2L_SINGLE_PIN_PACK(0x14, 6, (PIN_CFG_PUPD)) },
+ { "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) },
+};
+
static int rzg2l_gpio_get_gpioint(unsigned int virq, struct rzg2l_pinctrl *pctrl)
{
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[virq];
@@ -2280,6 +2661,13 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
pctrl->desc.pmxops = &rzg2l_pinctrl_pmxops;
pctrl->desc.confops = &rzg2l_pinctrl_confops;
pctrl->desc.owner = THIS_MODULE;
+ if (pctrl->data->num_custom_params) {
+ pctrl->desc.num_custom_params = pctrl->data->num_custom_params;
+ pctrl->desc.custom_params = pctrl->data->custom_params;
+#ifdef CONFIG_DEBUG_FS
+ pctrl->desc.custom_conf_items = pctrl->data->custom_conf_items;
+#endif
+ }
pins = devm_kcalloc(pctrl->dev, pctrl->desc.npins, sizeof(*pins), GFP_KERNEL);
if (!pins)
@@ -2299,13 +2687,11 @@ static int rzg2l_pinctrl_register(struct rzg2l_pinctrl *pctrl)
if (i && !(i % RZG2L_PINS_PER_PORT))
j++;
pin_data[i] = pctrl->data->port_pin_configs[j];
-#ifdef CONFIG_RISCV
- if (pin_data[i] & PIN_CFG_VARIABLE)
+ if (pin_data[i] & RZG2L_VARIABLE_CFG)
pin_data[i] = rzg2l_pinctrl_get_variable_pin_cfg(pctrl,
pin_data[i],
j,
i % RZG2L_PINS_PER_PORT);
-#endif
pins[i].drv_data = &pin_data[i];
}
@@ -2374,6 +2760,9 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
BUILD_BUG_ON(ARRAY_SIZE(r9a08g045_gpio_configs) * RZG2L_PINS_PER_PORT >
ARRAY_SIZE(rzg2l_gpio_names));
+ BUILD_BUG_ON(ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT >
+ ARRAY_SIZE(rzv2h_gpio_names));
+
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
@@ -2462,12 +2851,14 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
static void rzg2l_pinctrl_pm_setup_dedicated_regs(struct rzg2l_pinctrl *pctrl, bool suspend)
{
struct rzg2l_pinctrl_reg_cache *cache = pctrl->dedicated_cache;
+ u32 caps;
+ u32 i;
/*
* Make sure entries in pctrl->data->n_dedicated_pins[] having the same
* port offset are close together.
*/
- for (u32 i = 0, caps = 0; i < pctrl->data->n_dedicated_pins; i++) {
+ for (i = 0, caps = 0; i < pctrl->data->n_dedicated_pins; i++) {
bool has_iolh, has_ien;
u32 off, next_off = 0;
u64 cfg, next_cfg;
@@ -2519,12 +2910,10 @@ static void rzg2l_pinctrl_pm_setup_dedicated_regs(struct rzg2l_pinctrl *pctrl, b
static void rzg2l_pinctrl_pm_setup_pfc(struct rzg2l_pinctrl *pctrl)
{
u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT;
- const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
- const struct rzg2l_register_offsets *regs = &hwcfg->regs;
+ unsigned long flags;
- /* Set the PWPR register to allow PFC register to write. */
- writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
- writel(PWPR_PFCWE, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=1 */
+ spin_lock_irqsave(&pctrl->lock, flags);
+ pctrl->data->pwpr_pfc_lock_unlock(pctrl, false);
/* Restore port registers. */
for (u32 port = 0; port < nports; port++) {
@@ -2567,9 +2956,8 @@ static void rzg2l_pinctrl_pm_setup_pfc(struct rzg2l_pinctrl *pctrl)
}
}
- /* Set the PWPR register to be write-protected. */
- writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
- writel(PWPR_B0WI, pctrl->base + regs->pwpr); /* B0WI=1, PFCWE=0 */
+ pctrl->data->pwpr_pfc_lock_unlock(pctrl, true);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
}
static int rzg2l_pinctrl_suspend_noirq(struct device *dev)
@@ -2583,8 +2971,10 @@ static int rzg2l_pinctrl_suspend_noirq(struct device *dev)
rzg2l_pinctrl_pm_setup_dedicated_regs(pctrl, true);
for (u8 i = 0; i < 2; i++) {
- cache->sd_ch[i] = readb(pctrl->base + SD_CH(regs->sd_ch, i));
- cache->eth_poc[i] = readb(pctrl->base + ETH_POC(regs->eth_poc, i));
+ if (regs->sd_ch)
+ cache->sd_ch[i] = readb(pctrl->base + SD_CH(regs->sd_ch, i));
+ if (regs->eth_poc)
+ cache->eth_poc[i] = readb(pctrl->base + ETH_POC(regs->eth_poc, i));
}
cache->qspi = readb(pctrl->base + QSPI);
@@ -2615,8 +3005,10 @@ static int rzg2l_pinctrl_resume_noirq(struct device *dev)
writeb(cache->qspi, pctrl->base + QSPI);
writeb(cache->eth_mode, pctrl->base + ETH_MODE);
for (u8 i = 0; i < 2; i++) {
- writeb(cache->sd_ch[i], pctrl->base + SD_CH(regs->sd_ch, i));
- writeb(cache->eth_poc[i], pctrl->base + ETH_POC(regs->eth_poc, i));
+ if (regs->sd_ch)
+ writeb(cache->sd_ch[i], pctrl->base + SD_CH(regs->sd_ch, i));
+ if (regs->eth_poc)
+ writeb(cache->eth_poc[i], pctrl->base + ETH_POC(regs->eth_poc, i));
}
rzg2l_pinctrl_pm_setup_pfc(pctrl);
@@ -2627,6 +3019,37 @@ static int rzg2l_pinctrl_resume_noirq(struct device *dev)
return 0;
}
+static void rzg2l_pwpr_pfc_lock_unlock(struct rzg2l_pinctrl *pctrl, bool lock)
+{
+ const struct rzg2l_register_offsets *regs = &pctrl->data->hwcfg->regs;
+
+ if (lock) {
+ /* Set the PWPR register to be write-protected */
+ writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_B0WI, pctrl->base + regs->pwpr); /* B0WI=1, PFCWE=0 */
+ } else {
+ /* Set the PWPR register to allow PFC register to write */
+ writel(0x0, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=0 */
+ writel(PWPR_PFCWE, pctrl->base + regs->pwpr); /* B0WI=0, PFCWE=1 */
+ }
+}
+
+static void rzv2h_pwpr_pfc_lock_unlock(struct rzg2l_pinctrl *pctrl, bool lock)
+{
+ const struct rzg2l_register_offsets *regs = &pctrl->data->hwcfg->regs;
+ u8 pwpr;
+
+ if (lock) {
+ /* Set the PWPR register to be write-protected */
+ pwpr = readb(pctrl->base + regs->pwpr);
+ writeb(pwpr & ~PWPR_REGWE_A, pctrl->base + regs->pwpr);
+ } else {
+ /* Set the PWPR register to allow PFC and PMC register to write */
+ pwpr = readb(pctrl->base + regs->pwpr);
+ writeb(PWPR_REGWE_A | pwpr, pctrl->base + regs->pwpr);
+ }
+}
+
static const struct rzg2l_hwcfg rzg2l_hwcfg = {
.regs = {
.pwpr = 0x3014,
@@ -2638,6 +3061,7 @@ static const struct rzg2l_hwcfg rzg2l_hwcfg = {
[RZG2L_IOLH_IDX_3V3] = 2000, 4000, 8000, 12000,
},
.iolh_groupb_oi = { 100, 66, 50, 33, },
+ .oen_max_pin = 0,
};
static const struct rzg2l_hwcfg rzg3s_hwcfg = {
@@ -2672,6 +3096,12 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
.oen_max_port = 7, /* P7_1 is the maximum OEN port. */
};
+static const struct rzg2l_hwcfg rzv2h_hwcfg = {
+ .regs = {
+ .pwpr = 0x3c04,
+ },
+};
+
static struct rzg2l_pinctrl_data r9a07g043_data = {
.port_pins = rzg2l_gpio_names,
.port_pin_configs = r9a07g043_gpio_configs,
@@ -2684,6 +3114,12 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
.variable_pin_cfg = r9a07g043f_variable_pin_cfg,
.n_variable_pin_cfg = ARRAY_SIZE(r9a07g043f_variable_pin_cfg),
#endif
+ .pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzg2l_pmc_writeb,
+ .oen_read = &rzg2l_read_oen,
+ .oen_write = &rzg2l_write_oen,
+ .hw_to_bias_param = &rzg2l_hw_to_bias_param,
+ .bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
static struct rzg2l_pinctrl_data r9a07g044_data = {
@@ -2695,6 +3131,12 @@ static struct rzg2l_pinctrl_data r9a07g044_data = {
.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common) +
ARRAY_SIZE(rzg2l_dedicated_pins.rzg2l_pins),
.hwcfg = &rzg2l_hwcfg,
+ .pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzg2l_pmc_writeb,
+ .oen_read = &rzg2l_read_oen,
+ .oen_write = &rzg2l_write_oen,
+ .hw_to_bias_param = &rzg2l_hw_to_bias_param,
+ .bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
static struct rzg2l_pinctrl_data r9a08g045_data = {
@@ -2705,6 +3147,35 @@ static struct rzg2l_pinctrl_data r9a08g045_data = {
.n_port_pins = ARRAY_SIZE(r9a08g045_gpio_configs) * RZG2L_PINS_PER_PORT,
.n_dedicated_pins = ARRAY_SIZE(rzg3s_dedicated_pins),
.hwcfg = &rzg3s_hwcfg,
+ .pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzg2l_pmc_writeb,
+ .oen_read = &rzg3s_oen_read,
+ .oen_write = &rzg3s_oen_write,
+ .hw_to_bias_param = &rzg2l_hw_to_bias_param,
+ .bias_param_to_hw = &rzg2l_bias_param_to_hw,
+};
+
+static struct rzg2l_pinctrl_data r9a09g057_data = {
+ .port_pins = rzv2h_gpio_names,
+ .port_pin_configs = r9a09g057_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g057_gpio_configs),
+ .dedicated_pins = rzv2h_dedicated_pins,
+ .n_port_pins = ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins),
+ .hwcfg = &rzv2h_hwcfg,
+ .variable_pin_cfg = r9a09g057_variable_pin_cfg,
+ .n_variable_pin_cfg = ARRAY_SIZE(r9a09g057_variable_pin_cfg),
+ .num_custom_params = ARRAY_SIZE(renesas_rzv2h_custom_bindings),
+ .custom_params = renesas_rzv2h_custom_bindings,
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = renesas_rzv2h_conf_items,
+#endif
+ .pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzv2h_pmc_writeb,
+ .oen_read = &rzv2h_oen_read,
+ .oen_write = &rzv2h_oen_write,
+ .hw_to_bias_param = &rzv2h_hw_to_bias_param,
+ .bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
static const struct of_device_id rzg2l_pinctrl_of_table[] = {
@@ -2720,6 +3191,10 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
.compatible = "renesas,r9a08g045-pinctrl",
.data = &r9a08g045_data,
},
+ {
+ .compatible = "renesas,r9a09g057-pinctrl",
+ .data = &r9a09g057_data,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c
index 4b2f107824fe..39af1fe79c84 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzn1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c
@@ -404,7 +404,6 @@ static int rzn1_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map **map,
unsigned int *num_maps)
{
- struct device_node *child;
int ret;
*map = NULL;
@@ -414,12 +413,10 @@ static int rzn1_dt_node_to_map(struct pinctrl_dev *pctldev,
if (ret < 0)
return ret;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = rzn1_dt_node_to_map_one(pctldev, child, map, num_maps);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
}
return 0;
@@ -740,13 +737,12 @@ static int rzn1_pinctrl_parse_groups(struct device_node *np,
static int rzn1_pinctrl_count_function_groups(struct device_node *np)
{
- struct device_node *child;
int count = 0;
if (of_property_count_u32_elems(np, RZN1_PINS_PROP) > 0)
count++;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (of_property_count_u32_elems(child, RZN1_PINS_PROP) > 0)
count++;
}
@@ -760,7 +756,6 @@ static int rzn1_pinctrl_parse_functions(struct device_node *np,
{
struct rzn1_pmx_func *func;
struct rzn1_pin_group *grp;
- struct device_node *child;
unsigned int i = 0;
int ret;
@@ -793,15 +788,13 @@ static int rzn1_pinctrl_parse_functions(struct device_node *np,
ipctl->ngroups++;
}
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
func->groups[i] = child->name;
grp = &ipctl->groups[ipctl->ngroups];
grp->func = func->name;
ret = rzn1_pinctrl_parse_groups(child, grp, ipctl);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
i++;
ipctl->ngroups++;
}
@@ -816,7 +809,6 @@ static int rzn1_pinctrl_probe_dt(struct platform_device *pdev,
struct rzn1_pinctrl *ipctl)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *child;
unsigned int maxgroups = 0;
unsigned int i = 0;
int nfuncs = 0;
@@ -834,7 +826,7 @@ static int rzn1_pinctrl_probe_dt(struct platform_device *pdev,
return -ENOMEM;
ipctl->ngroups = 0;
- for_each_child_of_node(np, child)
+ for_each_child_of_node_scoped(np, child)
maxgroups += rzn1_pinctrl_count_function_groups(child);
ipctl->groups = devm_kmalloc_array(&pdev->dev,
@@ -844,12 +836,10 @@ static int rzn1_pinctrl_probe_dt(struct platform_device *pdev,
if (!ipctl->groups)
return -ENOMEM;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = rzn1_pinctrl_parse_functions(child, ipctl, i++);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
return ret;
- }
}
return 0;
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 0767a5ac23e0..0cae5472ac67 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -388,7 +388,6 @@ static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
unsigned int *num_maps)
{
struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- struct device_node *child;
unsigned int index;
int ret;
@@ -396,13 +395,11 @@ static int rzv2m_dt_node_to_map(struct pinctrl_dev *pctldev,
*num_maps = 0;
index = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = rzv2m_dt_subnode_to_map(pctldev, child, np, map,
num_maps, &index);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
goto done;
- }
}
if (*num_maps == 0) {
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 4d9d58fc1356..03e9bdbc82b9 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -241,7 +241,6 @@ static int sh_pfc_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
struct device *dev = pmx->pfc->dev;
- struct device_node *child;
unsigned int index;
int ret;
@@ -249,13 +248,11 @@ static int sh_pfc_dt_node_to_map(struct pinctrl_dev *pctldev,
*num_maps = 0;
index = 0;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = sh_pfc_dt_subnode_to_map(pctldev, child, map, num_maps,
&index);
- if (ret < 0) {
- of_node_put(child);
+ if (ret < 0)
goto done;
- }
}
/* If no mapping has been found in child nodes try the config node. */
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index b8caaa5a2d4e..a8c5fe973cd4 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -151,24 +151,19 @@ static int spear_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
unsigned *num_maps)
{
struct spear_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct device_node *np;
struct property *prop;
const char *function, *group;
int ret, index = 0, count = 0;
/* calculate number of maps required */
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = of_property_read_string(np, "st,function", &function);
- if (ret < 0) {
- of_node_put(np);
+ if (ret < 0)
return ret;
- }
ret = of_property_count_strings(np, "st,pins");
- if (ret < 0) {
- of_node_put(np);
+ if (ret < 0)
return ret;
- }
count += ret;
}
@@ -182,7 +177,7 @@ static int spear_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
if (!*map)
return -ENOMEM;
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
of_property_read_string(np, "st,function", &function);
of_property_for_each_string(np, "st,pins", prop, group) {
(*map)[index].type = PIN_MAP_TYPE_MUX_GROUP;
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index d0b6d3e655a2..c4a1d99dfed0 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -934,7 +934,6 @@ static int sprd_pinctrl_parse_dt(struct sprd_pinctrl *sprd_pctl)
{
struct sprd_pinctrl_soc_info *info = sprd_pctl->info;
struct device_node *np = sprd_pctl->dev->of_node;
- struct device_node *child, *sub_child;
struct sprd_pin_group *grp;
const char **temp;
int ret;
@@ -962,25 +961,20 @@ static int sprd_pinctrl_parse_dt(struct sprd_pinctrl *sprd_pctl)
temp = info->grp_names;
grp = info->groups;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
ret = sprd_pinctrl_parse_groups(child, sprd_pctl, grp);
- if (ret) {
- of_node_put(child);
+ if (ret)
return ret;
- }
*temp++ = grp->name;
grp++;
if (of_get_child_count(child) > 0) {
- for_each_child_of_node(child, sub_child) {
+ for_each_child_of_node_scoped(child, sub_child) {
ret = sprd_pinctrl_parse_groups(sub_child,
sprd_pctl, grp);
- if (ret) {
- of_node_put(sub_child);
- of_node_put(child);
+ if (ret)
return ret;
- }
*temp++ = grp->name;
grp++;
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
index 6df7a310c7ed..27f99183d994 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
@@ -480,7 +480,6 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
struct device *dev = sfp->gc.parent;
- struct device_node *child;
struct pinctrl_map *map;
const char **pgnames;
const char *grpname;
@@ -492,20 +491,18 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
int npinmux = of_property_count_u32_elems(child, "pinmux");
int npins = of_property_count_u32_elems(child, "pins");
if (npinmux > 0 && npins > 0) {
dev_err(dev, "invalid pinctrl group %pOFn.%pOFn: both pinmux and pins set\n",
np, child);
- of_node_put(child);
return -EINVAL;
}
if (npinmux == 0 && npins == 0) {
dev_err(dev, "invalid pinctrl group %pOFn.%pOFn: neither pinmux nor pins set\n",
np, child);
- of_node_put(child);
return -EINVAL;
}
@@ -527,14 +524,14 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
mutex_lock(&sfp->mutex);
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
int npins;
int i;
grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn", np, child);
if (!grpname) {
ret = -ENOMEM;
- goto put_child;
+ goto free_map;
}
pgnames[ngroups++] = grpname;
@@ -543,18 +540,18 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
if (!pins) {
ret = -ENOMEM;
- goto put_child;
+ goto free_map;
}
pinmux = devm_kcalloc(dev, npins, sizeof(*pinmux), GFP_KERNEL);
if (!pinmux) {
ret = -ENOMEM;
- goto put_child;
+ goto free_map;
}
ret = of_property_read_u32_array(child, "pinmux", pinmux, npins);
if (ret)
- goto put_child;
+ goto free_map;
for (i = 0; i < npins; i++) {
unsigned int gpio = starfive_pinmux_to_gpio(pinmux[i]);
@@ -570,7 +567,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
if (!pins) {
ret = -ENOMEM;
- goto put_child;
+ goto free_map;
}
pinmux = NULL;
@@ -580,18 +577,18 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = of_property_read_u32_index(child, "pins", i, &v);
if (ret)
- goto put_child;
+ goto free_map;
pins[i] = v;
}
} else {
ret = -EINVAL;
- goto put_child;
+ goto free_map;
}
ret = pinctrl_generic_add_group(pctldev, grpname, pins, npins, pinmux);
if (ret < 0) {
dev_err(dev, "error adding group %s: %d\n", grpname, ret);
- goto put_child;
+ goto free_map;
}
ret = pinconf_generic_parse_dt_config(child, pctldev,
@@ -600,7 +597,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
if (ret) {
dev_err(dev, "error parsing pin config of group %s: %d\n",
grpname, ret);
- goto put_child;
+ goto free_map;
}
/* don't create a map if there are no pinconf settings */
@@ -623,8 +620,6 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
mutex_unlock(&sfp->mutex);
return 0;
-put_child:
- of_node_put(child);
free_map:
pinctrl_utils_free_map(pctldev, map, nmaps);
mutex_unlock(&sfp->mutex);
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
index 9609eb1ecc3d..4ce080caa233 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -150,7 +150,7 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
nmaps = 0;
ngroups = 0;
mutex_lock(&sfp->mutex);
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
int npins = of_property_count_u32_elems(child, "pinmux");
int *pins;
u32 *pinmux;
@@ -161,13 +161,13 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
"invalid pinctrl group %pOFn.%pOFn: pinmux not set\n",
np, child);
ret = -EINVAL;
- goto put_child;
+ goto free_map;
}
grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn", np, child);
if (!grpname) {
ret = -ENOMEM;
- goto put_child;
+ goto free_map;
}
pgnames[ngroups++] = grpname;
@@ -175,18 +175,18 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
if (!pins) {
ret = -ENOMEM;
- goto put_child;
+ goto free_map;
}
pinmux = devm_kcalloc(dev, npins, sizeof(*pinmux), GFP_KERNEL);
if (!pinmux) {
ret = -ENOMEM;
- goto put_child;
+ goto free_map;
}
ret = of_property_read_u32_array(child, "pinmux", pinmux, npins);
if (ret)
- goto put_child;
+ goto free_map;
for (i = 0; i < npins; i++)
pins[i] = jh7110_pinmux_pin(pinmux[i]);
@@ -200,7 +200,7 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
pins, npins, pinmux);
if (ret < 0) {
dev_err(dev, "error adding group %s: %d\n", grpname, ret);
- goto put_child;
+ goto free_map;
}
ret = pinconf_generic_parse_dt_config(child, pctldev,
@@ -209,7 +209,7 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
if (ret) {
dev_err(dev, "error parsing pin config of group %s: %d\n",
grpname, ret);
- goto put_child;
+ goto free_map;
}
/* don't create a map if there are no pinconf settings */
@@ -233,8 +233,6 @@ static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
*num_maps = nmaps;
return 0;
-put_child:
- of_node_put(child);
free_map:
pinctrl_utils_free_map(pctldev, map, nmaps);
mutex_unlock(&sfp->mutex);
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 978ccdbaf3d3..a8673739871d 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -670,7 +670,6 @@ static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config,
struct pinctrl_map **map, unsigned *num_maps)
{
- struct device_node *np;
unsigned reserved_maps;
int ret;
@@ -678,12 +677,11 @@ static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
*num_maps = 0;
reserved_maps = 0;
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = stm32_pctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
pinctrl_utils_free_map(pctldev, *map, *num_maps);
- of_node_put(np);
return ret;
}
}
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 96ef57a7d385..49c5edeba87f 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -238,20 +238,17 @@ static int tegra_xusb_padctl_dt_node_to_map(struct pinctrl_dev *pinctrl,
{
struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
unsigned int reserved_maps = 0;
- struct device_node *np;
int err;
*num_maps = 0;
*maps = NULL;
- for_each_child_of_node(parent, np) {
+ for_each_child_of_node_scoped(parent, np) {
err = tegra_xusb_padctl_parse_subnode(padctl, np, maps,
&reserved_maps,
num_maps);
- if (err < 0) {
- of_node_put(np);
+ if (err < 0)
return err;
- }
}
return 0;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index ccfa3870a67d..c83e5a65e680 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -188,20 +188,18 @@ static int tegra_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
unsigned *num_maps)
{
unsigned reserved_maps;
- struct device_node *np;
int ret;
reserved_maps = 0;
*map = NULL;
*num_maps = 0;
- for_each_child_of_node(np_config, np) {
+ for_each_child_of_node_scoped(np_config, np) {
ret = tegra_pinctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
pinctrl_utils_free_map(pctldev, *map,
*num_maps);
- of_node_put(np);
return ret;
}
}
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 040f2c46a868..f5e5a23d2226 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -822,53 +822,48 @@ MODULE_DEVICE_TABLE(of, ti_iodelay_of_match);
static int ti_iodelay_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = of_node_get(dev->of_node);
+ struct device_node *np __free(device_node) = of_node_get(dev->of_node);
struct resource *res;
struct ti_iodelay_device *iod;
- int ret = 0;
+ int ret;
if (!np) {
- ret = -EINVAL;
dev_err(dev, "No OF node\n");
- goto exit_out;
+ return -EINVAL;
}
iod = devm_kzalloc(dev, sizeof(*iod), GFP_KERNEL);
- if (!iod) {
- ret = -ENOMEM;
- goto exit_out;
- }
+ if (!iod)
+ return -ENOMEM;
+
iod->dev = dev;
iod->reg_data = device_get_match_data(dev);
if (!iod->reg_data) {
- ret = -EINVAL;
dev_err(dev, "No DATA match\n");
- goto exit_out;
+ return -EINVAL;
}
/* So far We can assume there is only 1 bank of registers */
iod->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(iod->reg_base)) {
- ret = PTR_ERR(iod->reg_base);
- goto exit_out;
- }
+ if (IS_ERR(iod->reg_base))
+ return PTR_ERR(iod->reg_base);
+
iod->phys_base = res->start;
iod->regmap = devm_regmap_init_mmio(dev, iod->reg_base,
iod->reg_data->regmap_config);
if (IS_ERR(iod->regmap)) {
dev_err(dev, "Regmap MMIO init failed.\n");
- ret = PTR_ERR(iod->regmap);
- goto exit_out;
+ return PTR_ERR(iod->regmap);
}
ret = ti_iodelay_pinconf_init_dev(iod);
if (ret)
- goto exit_out;
+ return ret;
ret = ti_iodelay_alloc_pins(dev, iod, res->start);
if (ret)
- goto exit_out;
+ return ret;
iod->desc.pctlops = &ti_iodelay_pinctrl_ops;
/* no pinmux ops - we are pinconf */
@@ -876,19 +871,15 @@ static int ti_iodelay_probe(struct platform_device *pdev)
iod->desc.name = dev_name(dev);
iod->desc.owner = THIS_MODULE;
- ret = pinctrl_register_and_init(&iod->desc, dev, iod, &iod->pctl);
+ ret = devm_pinctrl_register_and_init(dev, &iod->desc, iod, &iod->pctl);
if (ret) {
dev_err(dev, "Failed to register pinctrl\n");
- goto exit_out;
+ return ret;
}
platform_set_drvdata(pdev, iod);
return pinctrl_enable(iod->pctl);
-
-exit_out:
- of_node_put(np);
- return ret;
}
/**
@@ -899,9 +890,6 @@ static void ti_iodelay_remove(struct platform_device *pdev)
{
struct ti_iodelay_device *iod = platform_get_drvdata(pdev);
- if (iod->pctl)
- pinctrl_unregister(iod->pctl);
-
ti_iodelay_pinconf_deinit_dev(iod);
/* Expect other allocations to be freed by devm */
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index d8c5f9195f85..2ac2f31090f9 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -139,6 +139,9 @@ static int __init loongson_hwmon_init(void)
csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
LOONGSON_CSRF_TEMP;
+ if (!csr_temp_enable && !loongson_chiptemp[0])
+ return -ENODEV;
+
nr_packages = loongson_sysconf.nr_cpus /
loongson_sysconf.cores_per_package;
diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c
index 7f308292d7e3..e6822c021000 100644
--- a/drivers/power/reset/piix4-poweroff.c
+++ b/drivers/power/reset/piix4-poweroff.c
@@ -106,4 +106,5 @@ static struct pci_driver piix4_poweroff_driver = {
module_pci_driver(piix4_poweroff_driver);
MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
+MODULE_DESCRIPTION("Intel PIIX4 power-off driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index f6321a42aa53..bcfa63fb9f1e 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -167,6 +167,15 @@ config BATTERY_LEGO_EV3
help
Say Y here to enable support for the LEGO MINDSTORMS EV3 battery.
+config BATTERY_LENOVO_YOGA_C630
+ tristate "Lenovo Yoga C630 battery"
+ depends on EC_LENOVO_YOGA_C630
+ help
+ This driver enables battery support on the Lenovo Yoga C630 laptop.
+
+ To compile the driver as a module, choose M here: the module will be
+ called lenovo_yoga_c630_battery.
+
config BATTERY_PMU
tristate "Apple PMU battery"
depends on PPC32 && ADB_PMU
@@ -402,6 +411,18 @@ config BATTERY_MAX17042
Driver can be build as a module (max17042_battery).
+config BATTERY_MAX1720X
+ tristate "Maxim MAX17201/MAX17205 Fuel Gauge"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ MAX1720x is a family of fuel-gauge systems for lithium-ion (Li+)
+ batteries in handheld and portable equipment. MAX17201 are
+ configured to operate with a single lithium cell, the MAX17205
+ can operate with multiple cells.
+
+ Say Y to include support for the MAX17201/MAX17205 Fuel Gauges.
+
config BATTERY_MAX1721X
tristate "MAX17211/MAX17215 standalone gas-gauge"
depends on W1
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 31ca6653a564..8dcb41545317 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
obj-$(CONFIG_BATTERY_GAUGE_LTC2941) += ltc2941-battery-gauge.o
obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
obj-$(CONFIG_BATTERY_LEGO_EV3) += lego_ev3_battery.o
+obj-$(CONFIG_BATTERY_LENOVO_YOGA_C630) += lenovo_yoga_c630_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_QCOM_BATTMGR) += qcom_battmgr.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
@@ -52,6 +53,7 @@ obj-$(CONFIG_CHARGER_DA9150) += da9150-charger.o
obj-$(CONFIG_BATTERY_DA9150) += da9150-fg.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
+obj-$(CONFIG_BATTERY_MAX1720X) += max1720x_battery.o
obj-$(CONFIG_BATTERY_MAX1721X) += max1721x_battery.o
obj-$(CONFIG_BATTERY_RT5033) += rt5033_battery.o
obj-$(CONFIG_CHARGER_RT5033) += rt5033_charger.o
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 55ab7a28056e..854491ad3ecd 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1225,8 +1225,8 @@ static bool ab8500_chargalg_time_to_restart(struct ab8500_chargalg *di)
*/
static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
{
+ const struct power_supply_maintenance_charge_table *mt;
struct power_supply_battery_info *bi = di->bm->bi;
- struct power_supply_maintenance_charge_table *mt;
int charger_status;
int ret;
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 9b34d1a60f66..93181ebfb324 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -487,14 +487,17 @@ static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
/* Only measure voltage if the charger is connected */
if (di->ac.charger_connected) {
- ret = iio_read_channel_processed(di->adc_main_charger_v, &vch);
- if (ret < 0)
- dev_err(di->dev, "%s ADC conv failed,\n", __func__);
+ /* Convert to microvolt, IIO returns millivolt */
+ ret = iio_read_channel_processed_scale(di->adc_main_charger_v,
+ &vch, 1000);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ADC conv failed\n", __func__);
+ return ret;
+ }
} else {
vch = 0;
}
- /* Convert to microvolt, IIO returns millivolt */
- return vch * 1000;
+ return vch;
}
/**
@@ -539,14 +542,17 @@ static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
/* Only measure voltage if the charger is connected */
if (di->usb.charger_connected) {
- ret = iio_read_channel_processed(di->adc_vbus_v, &vch);
- if (ret < 0)
- dev_err(di->dev, "%s ADC conv failed,\n", __func__);
+ /* Convert to microvolt, IIO returns millivolt */
+ ret = iio_read_channel_processed_scale(di->adc_vbus_v,
+ &vch, 1000);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ADC conv failed\n", __func__);
+ return ret;
+ }
} else {
vch = 0;
}
- /* Convert to microvolt, IIO returns millivolt */
- return vch * 1000;
+ return vch;
}
/**
@@ -562,14 +568,17 @@ static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
/* Only measure current if the charger is online */
if (di->usb.charger_online) {
- ret = iio_read_channel_processed(di->adc_usb_charger_c, &ich);
- if (ret < 0)
- dev_err(di->dev, "%s ADC conv failed,\n", __func__);
+ /* Return microamperes */
+ ret = iio_read_channel_processed_scale(di->adc_usb_charger_c,
+ &ich, 1000);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ADC conv failed\n", __func__);
+ return ret;
+ }
} else {
ich = 0;
}
- /* Return microamperes */
- return ich * 1000;
+ return ich;
}
/**
@@ -585,14 +594,17 @@ static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
/* Only measure current if the charger is online */
if (di->ac.charger_online) {
- ret = iio_read_channel_processed(di->adc_main_charger_c, &ich);
- if (ret < 0)
- dev_err(di->dev, "%s ADC conv failed,\n", __func__);
+ /* Return microamperes */
+ ret = iio_read_channel_processed_scale(di->adc_main_charger_c,
+ &ich, 1000);
+ if (ret < 0) {
+ dev_err(di->dev, "%s ADC conv failed\n", __func__);
+ return ret;
+ }
} else {
ich = 0;
}
- /* Return microamperes */
- return ich * 1000;
+ return ich;
}
/**
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 2ccaf6116c09..270874eeb934 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -149,11 +149,6 @@ struct ab8500_fg_flags {
bool batt_id_received;
};
-struct inst_curr_result_list {
- struct list_head list;
- int *result;
-};
-
/**
* struct ab8500_fg - ab8500 FG device information
* @dev: Pointer to the structure device
diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c
index 3e3a0d118ce5..dac9875d993c 100644
--- a/drivers/power/supply/adp5061.c
+++ b/drivers/power/supply/adp5061.c
@@ -727,7 +727,7 @@ static int adp5061_probe(struct i2c_client *client)
}
static const struct i2c_device_id adp5061_id[] = {
- { "adp5061", 0},
+ { "adp5061" },
{ }
};
MODULE_DEVICE_TABLE(i2c, adp5061_id);
diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c
index 1ed1d9f99fb3..54bf88262510 100644
--- a/drivers/power/supply/bd99954-charger.c
+++ b/drivers/power/supply/bd99954-charger.c
@@ -70,13 +70,6 @@
#include "bd99954-charger.h"
-struct battery_data {
- u16 precharge_current; /* Trickle-charge Current */
- u16 fc_reg_voltage; /* Fast Charging Regulation Voltage */
- u16 voltage_min;
- u16 voltage_max;
-};
-
/* Initial field values, converted to initial register values */
struct bd9995x_init_data {
u16 vsysreg_set; /* VSYS Regulation Setting */
diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index 8efceeae864c..73a7fc867b03 100644
--- a/drivers/power/supply/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -489,7 +489,7 @@ static int bq24735_charger_probe(struct i2c_client *client)
}
static const struct i2c_device_id bq24735_charger_id[] = {
- { "bq24735-charger", 0 },
+ { "bq24735-charger" },
{}
};
MODULE_DEVICE_TABLE(i2c, bq24735_charger_id);
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 03fa11a1c9b6..2f5ceaf00b94 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -1617,11 +1617,11 @@ static const struct dev_pm_ops bq25890_pm = {
};
static const struct i2c_device_id bq25890_i2c_ids[] = {
- { "bq25890", 0 },
- { "bq25892", 0 },
- { "bq25895", 0 },
- { "bq25896", 0 },
- {},
+ { "bq25890" },
+ { "bq25892" },
+ { "bq25895" },
+ { "bq25896" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, bq25890_i2c_ids);
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 99f3ccdc30a6..f63c3c410451 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -731,7 +731,7 @@ static int __maybe_unused cw_bat_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume);
static const struct i2c_device_id cw_bat_id_table[] = {
- { "cw2015", 0 },
+ { "cw2015" },
{ }
};
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index 2e7fdfde47ec..0a40f425c277 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -31,8 +31,9 @@ static int ingenic_battery_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_HEALTH:
- ret = iio_read_channel_processed(bat->channel, &val->intval);
- val->intval *= 1000;
+ ret = iio_read_channel_processed_scale(bat->channel,
+ &val->intval,
+ 1000);
if (val->intval < info->voltage_min_design_uv)
val->intval = POWER_SUPPLY_HEALTH_DEAD;
else if (val->intval > info->voltage_max_design_uv)
@@ -41,8 +42,9 @@ static int ingenic_battery_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_HEALTH_GOOD;
return ret;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = iio_read_channel_processed(bat->channel, &val->intval);
- val->intval *= 1000;
+ ret = iio_read_channel_processed_scale(bat->channel,
+ &val->intval,
+ 1000);
return ret;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = info->voltage_min_design_uv;
diff --git a/drivers/power/supply/lenovo_yoga_c630_battery.c b/drivers/power/supply/lenovo_yoga_c630_battery.c
new file mode 100644
index 000000000000..d4d422cc5353
--- /dev/null
+++ b/drivers/power/supply/lenovo_yoga_c630_battery.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024, Linaro Ltd
+ * Authors:
+ * Bjorn Andersson
+ * Dmitry Baryshkov
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/power_supply.h>
+#include <linux/platform_data/lenovo-yoga-c630.h>
+
+struct yoga_c630_psy {
+ struct yoga_c630_ec *ec;
+ struct device *dev;
+ struct fwnode_handle *fwnode;
+ struct notifier_block nb;
+
+ /* guards all battery properties and registration of power supplies */
+ struct mutex lock;
+
+ struct power_supply *adp_psy;
+ struct power_supply *bat_psy;
+
+ unsigned long last_status_update;
+
+ bool adapter_online;
+
+ bool unit_mA;
+
+ bool bat_present;
+ unsigned int bat_status;
+ unsigned int design_capacity;
+ unsigned int design_voltage;
+ unsigned int full_charge_capacity;
+
+ unsigned int capacity_now;
+ unsigned int voltage_now;
+
+ int current_now;
+ int rate_now;
+};
+
+#define LENOVO_EC_CACHE_TIME (10 * HZ)
+
+#define LENOVO_EC_ADPT_STATUS 0xa3
+#define LENOVO_EC_ADPT_STATUS_PRESENT BIT(7)
+#define LENOVO_EC_BAT_ATTRIBUTES 0xc0
+#define LENOVO_EC_BAT_ATTRIBUTES_UNIT_IS_MA BIT(1)
+#define LENOVO_EC_BAT_STATUS 0xc1
+#define LENOVO_EC_BAT_STATUS_DISCHARGING BIT(0)
+#define LENOVO_EC_BAT_STATUS_CHARGING BIT(1)
+#define LENOVO_EC_BAT_REMAIN_CAPACITY 0xc2
+#define LENOVO_EC_BAT_VOLTAGE 0xc6
+#define LENOVO_EC_BAT_DESIGN_VOLTAGE 0xc8
+#define LENOVO_EC_BAT_DESIGN_CAPACITY 0xca
+#define LENOVO_EC_BAT_FULL_CAPACITY 0xcc
+#define LENOVO_EC_BAT_CURRENT 0xd2
+#define LENOVO_EC_BAT_FULL_FACTORY 0xd6
+#define LENOVO_EC_BAT_PRESENT 0xda
+#define LENOVO_EC_BAT_PRESENT_IS_PRESENT BIT(0)
+#define LENOVO_EC_BAT_FULL_REGISTER 0xdb
+#define LENOVO_EC_BAT_FULL_REGISTER_IS_FACTORY BIT(0)
+
+static int yoga_c630_psy_update_bat_info(struct yoga_c630_psy *ecbat)
+{
+ struct yoga_c630_ec *ec = ecbat->ec;
+ int val;
+
+ lockdep_assert_held(&ecbat->lock);
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_BAT_PRESENT);
+ if (val < 0)
+ return val;
+ ecbat->bat_present = !!(val & LENOVO_EC_BAT_PRESENT_IS_PRESENT);
+ if (!ecbat->bat_present)
+ return val;
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_BAT_ATTRIBUTES);
+ if (val < 0)
+ return val;
+ ecbat->unit_mA = val & LENOVO_EC_BAT_ATTRIBUTES_UNIT_IS_MA;
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_DESIGN_CAPACITY);
+ if (val < 0)
+ return val;
+ ecbat->design_capacity = val * 1000;
+
+ /*
+ * DSDT has delays after most of EC reads in these methods.
+ * Having no documentation for the EC we have to follow and sleep here.
+ */
+ msleep(50);
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_DESIGN_VOLTAGE);
+ if (val < 0)
+ return val;
+ ecbat->design_voltage = val;
+
+ msleep(50);
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_BAT_FULL_REGISTER);
+ if (val < 0)
+ return val;
+ val = yoga_c630_ec_read16(ec,
+ val & LENOVO_EC_BAT_FULL_REGISTER_IS_FACTORY ?
+ LENOVO_EC_BAT_FULL_FACTORY :
+ LENOVO_EC_BAT_FULL_CAPACITY);
+ if (val < 0)
+ return val;
+
+ ecbat->full_charge_capacity = val * 1000;
+
+ if (!ecbat->unit_mA) {
+ ecbat->design_capacity *= 10;
+ ecbat->full_charge_capacity *= 10;
+ }
+
+ return 0;
+}
+
+static int yoga_c630_psy_maybe_update_bat_status(struct yoga_c630_psy *ecbat)
+{
+ struct yoga_c630_ec *ec = ecbat->ec;
+ int current_mA;
+ int val;
+
+ guard(mutex)(&ecbat->lock);
+ if (time_before(jiffies, ecbat->last_status_update + LENOVO_EC_CACHE_TIME))
+ return 0;
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_BAT_STATUS);
+ if (val < 0)
+ return val;
+ ecbat->bat_status = val;
+
+ msleep(50);
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_REMAIN_CAPACITY);
+ if (val < 0)
+ return val;
+ ecbat->capacity_now = val * 1000;
+
+ msleep(50);
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_VOLTAGE);
+ if (val < 0)
+ return val;
+ ecbat->voltage_now = val * 1000;
+
+ msleep(50);
+
+ val = yoga_c630_ec_read16(ec, LENOVO_EC_BAT_CURRENT);
+ if (val < 0)
+ return val;
+ current_mA = sign_extend32(val, 15);
+ ecbat->current_now = current_mA * 1000;
+ ecbat->rate_now = current_mA * (ecbat->voltage_now / 1000);
+
+ msleep(50);
+
+ if (!ecbat->unit_mA)
+ ecbat->capacity_now *= 10;
+
+ ecbat->last_status_update = jiffies;
+
+ return 0;
+}
+
+static int yoga_c630_psy_update_adapter_status(struct yoga_c630_psy *ecbat)
+{
+ struct yoga_c630_ec *ec = ecbat->ec;
+ int val;
+
+ guard(mutex)(&ecbat->lock);
+
+ val = yoga_c630_ec_read8(ec, LENOVO_EC_ADPT_STATUS);
+ if (val < 0)
+ return val;
+
+ ecbat->adapter_online = !!(val & LENOVO_EC_ADPT_STATUS_PRESENT);
+
+ return 0;
+}
+
+static bool yoga_c630_psy_is_charged(struct yoga_c630_psy *ecbat)
+{
+ if (ecbat->bat_status != 0)
+ return false;
+
+ if (ecbat->full_charge_capacity <= ecbat->capacity_now)
+ return true;
+
+ if (ecbat->design_capacity <= ecbat->capacity_now)
+ return true;
+
+ return false;
+}
+
+static int yoga_c630_psy_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct yoga_c630_psy *ecbat = power_supply_get_drvdata(psy);
+ int rc = 0;
+
+ if (!ecbat->bat_present && psp != POWER_SUPPLY_PROP_PRESENT)
+ return -ENODEV;
+
+ rc = yoga_c630_psy_maybe_update_bat_status(ecbat);
+ if (rc)
+ return rc;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (ecbat->bat_status & LENOVO_EC_BAT_STATUS_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (ecbat->bat_status & LENOVO_EC_BAT_STATUS_CHARGING)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (yoga_c630_psy_is_charged(ecbat))
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = ecbat->bat_present;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = ecbat->design_voltage;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = ecbat->design_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = ecbat->full_charge_capacity;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ val->intval = ecbat->capacity_now;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = ecbat->current_now;
+ break;
+ case POWER_SUPPLY_PROP_POWER_NOW:
+ val->intval = ecbat->rate_now;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = ecbat->voltage_now;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = "PABAS0241231";
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "Compal";
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static enum power_supply_property yoga_c630_psy_bat_mA_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static enum power_supply_property yoga_c630_psy_bat_mWh_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static const struct power_supply_desc yoga_c630_psy_bat_psy_desc_mA = {
+ .name = "yoga-c630-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = yoga_c630_psy_bat_mA_properties,
+ .num_properties = ARRAY_SIZE(yoga_c630_psy_bat_mA_properties),
+ .get_property = yoga_c630_psy_bat_get_property,
+};
+
+static const struct power_supply_desc yoga_c630_psy_bat_psy_desc_mWh = {
+ .name = "yoga-c630-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = yoga_c630_psy_bat_mWh_properties,
+ .num_properties = ARRAY_SIZE(yoga_c630_psy_bat_mWh_properties),
+ .get_property = yoga_c630_psy_bat_get_property,
+};
+
+static int yoga_c630_psy_adpt_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct yoga_c630_psy *ecbat = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ ret = yoga_c630_psy_update_adapter_status(ecbat);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = ecbat->adapter_online;
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = POWER_SUPPLY_USB_TYPE_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property yoga_c630_psy_adpt_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static const enum power_supply_usb_type yoga_c630_psy_adpt_usb_type[] = {
+ POWER_SUPPLY_USB_TYPE_C,
+};
+
+static const struct power_supply_desc yoga_c630_psy_adpt_psy_desc = {
+ .name = "yoga-c630-adapter",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = yoga_c630_psy_adpt_usb_type,
+ .num_usb_types = ARRAY_SIZE(yoga_c630_psy_adpt_usb_type),
+ .properties = yoga_c630_psy_adpt_properties,
+ .num_properties = ARRAY_SIZE(yoga_c630_psy_adpt_properties),
+ .get_property = yoga_c630_psy_adpt_get_property,
+};
+
+static int yoga_c630_psy_register_bat_psy(struct yoga_c630_psy *ecbat)
+{
+ struct power_supply_config bat_cfg = {};
+
+ bat_cfg.drv_data = ecbat;
+ bat_cfg.fwnode = ecbat->fwnode;
+ ecbat->bat_psy = power_supply_register_no_ws(ecbat->dev,
+ ecbat->unit_mA ?
+ &yoga_c630_psy_bat_psy_desc_mA :
+ &yoga_c630_psy_bat_psy_desc_mWh,
+ &bat_cfg);
+ if (IS_ERR(ecbat->bat_psy)) {
+ dev_err(ecbat->dev, "failed to register battery supply\n");
+ return PTR_ERR(ecbat->bat_psy);
+ }
+
+ return 0;
+}
+
+static void yoga_c630_ec_refresh_bat_info(struct yoga_c630_psy *ecbat)
+{
+ bool current_unit;
+
+ guard(mutex)(&ecbat->lock);
+
+ current_unit = ecbat->unit_mA;
+
+ yoga_c630_psy_update_bat_info(ecbat);
+
+ if (current_unit != ecbat->unit_mA) {
+ power_supply_unregister(ecbat->bat_psy);
+ yoga_c630_psy_register_bat_psy(ecbat);
+ }
+}
+
+static int yoga_c630_psy_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct yoga_c630_psy *ecbat = container_of(nb, struct yoga_c630_psy, nb);
+
+ switch (action) {
+ case LENOVO_EC_EVENT_BAT_INFO:
+ yoga_c630_ec_refresh_bat_info(ecbat);
+ break;
+ case LENOVO_EC_EVENT_BAT_ADPT_STATUS:
+ power_supply_changed(ecbat->adp_psy);
+ fallthrough;
+ case LENOVO_EC_EVENT_BAT_STATUS:
+ power_supply_changed(ecbat->bat_psy);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int yoga_c630_psy_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct yoga_c630_ec *ec = adev->dev.platform_data;
+ struct power_supply_config adp_cfg = {};
+ struct device *dev = &adev->dev;
+ struct yoga_c630_psy *ecbat;
+ int ret;
+
+ ecbat = devm_kzalloc(&adev->dev, sizeof(*ecbat), GFP_KERNEL);
+ if (!ecbat)
+ return -ENOMEM;
+
+ ecbat->ec = ec;
+ ecbat->dev = dev;
+ mutex_init(&ecbat->lock);
+ ecbat->fwnode = adev->dev.parent->fwnode;
+ ecbat->nb.notifier_call = yoga_c630_psy_notify;
+
+ auxiliary_set_drvdata(adev, ecbat);
+
+ adp_cfg.drv_data = ecbat;
+ adp_cfg.fwnode = ecbat->fwnode;
+ adp_cfg.supplied_to = (char **)&yoga_c630_psy_bat_psy_desc_mA.name;
+ adp_cfg.num_supplicants = 1;
+ ecbat->adp_psy = devm_power_supply_register_no_ws(dev, &yoga_c630_psy_adpt_psy_desc, &adp_cfg);
+ if (IS_ERR(ecbat->adp_psy)) {
+ dev_err(dev, "failed to register AC adapter supply\n");
+ return PTR_ERR(ecbat->adp_psy);
+ }
+
+ scoped_guard(mutex, &ecbat->lock) {
+ ret = yoga_c630_psy_update_bat_info(ecbat);
+ if (ret)
+ goto err_unreg_bat;
+
+ ret = yoga_c630_psy_register_bat_psy(ecbat);
+ if (ret)
+ goto err_unreg_bat;
+ }
+
+ ret = yoga_c630_ec_register_notify(ecbat->ec, &ecbat->nb);
+ if (ret)
+ goto err_unreg_bat;
+
+ return 0;
+
+err_unreg_bat:
+ power_supply_unregister(ecbat->bat_psy);
+ return ret;
+}
+
+static void yoga_c630_psy_remove(struct auxiliary_device *adev)
+{
+ struct yoga_c630_psy *ecbat = auxiliary_get_drvdata(adev);
+
+ yoga_c630_ec_unregister_notify(ecbat->ec, &ecbat->nb);
+ power_supply_unregister(ecbat->bat_psy);
+}
+
+static const struct auxiliary_device_id yoga_c630_psy_id_table[] = {
+ { .name = YOGA_C630_MOD_NAME "." YOGA_C630_DEV_PSY, },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, yoga_c630_psy_id_table);
+
+static struct auxiliary_driver yoga_c630_psy_driver = {
+ .name = YOGA_C630_DEV_PSY,
+ .id_table = yoga_c630_psy_id_table,
+ .probe = yoga_c630_psy_probe,
+ .remove = yoga_c630_psy_remove,
+};
+
+module_auxiliary_driver(yoga_c630_psy_driver);
+
+MODULE_DESCRIPTION("Lenovo Yoga C630 psy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c
index 34548a4da90b..4186fcd37512 100644
--- a/drivers/power/supply/lp8727_charger.c
+++ b/drivers/power/supply/lp8727_charger.c
@@ -584,7 +584,7 @@ static const struct of_device_id lp8727_dt_ids[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, lp8727_dt_ids);
static const struct i2c_device_id lp8727_ids[] = {
- {"lp8727", 0},
+ { "lp8727" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lp8727_ids);
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
index f0eace731480..2e4bc74e1c4a 100644
--- a/drivers/power/supply/ltc4162-l-charger.c
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -903,8 +903,8 @@ static void ltc4162l_alert(struct i2c_client *client,
}
static const struct i2c_device_id ltc4162l_i2c_id_table[] = {
- { "ltc4162-l", 0 },
- { },
+ { "ltc4162-l" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, ltc4162l_i2c_id_table);
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index 89f2af72dfcd..a5b42b42d134 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -300,7 +300,7 @@ static int max14656_probe(struct i2c_client *client)
}
static const struct i2c_device_id max14656_id[] = {
- { "max14656", 0 },
+ { "max14656" },
{}
};
MODULE_DEVICE_TABLE(i2c, max14656_id);
diff --git a/drivers/power/supply/max1720x_battery.c b/drivers/power/supply/max1720x_battery.c
new file mode 100644
index 000000000000..edc262f0a62f
--- /dev/null
+++ b/drivers/power/supply/max1720x_battery.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Fuel gauge driver for Maxim 17201/17205
+ *
+ * based on max1721x_battery.c
+ *
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+#include <asm/unaligned.h>
+
+/* Nonvolatile registers */
+#define MAX1720X_NRSENSE 0xCF /* RSense in 10^-5 Ohm */
+
+/* ModelGauge m5 */
+#define MAX172XX_STATUS 0x00 /* Status */
+#define MAX172XX_STATUS_BAT_ABSENT BIT(3) /* Battery absent */
+#define MAX172XX_REPCAP 0x05 /* Average capacity */
+#define MAX172XX_REPSOC 0x06 /* Percentage of charge */
+#define MAX172XX_TEMP 0x08 /* Temperature */
+#define MAX172XX_CURRENT 0x0A /* Actual current */
+#define MAX172XX_AVG_CURRENT 0x0B /* Average current */
+#define MAX172XX_TTE 0x11 /* Time to empty */
+#define MAX172XX_AVG_TA 0x16 /* Average temperature */
+#define MAX172XX_CYCLES 0x17
+#define MAX172XX_DESIGN_CAP 0x18 /* Design capacity */
+#define MAX172XX_AVG_VCELL 0x19
+#define MAX172XX_TTF 0x20 /* Time to full */
+#define MAX172XX_DEV_NAME 0x21 /* Device name */
+#define MAX172XX_DEV_NAME_TYPE_MASK GENMASK(3, 0)
+#define MAX172XX_DEV_NAME_TYPE_MAX17201 BIT(0)
+#define MAX172XX_DEV_NAME_TYPE_MAX17205 (BIT(0) | BIT(2))
+#define MAX172XX_QR_TABLE10 0x22
+#define MAX172XX_BATT 0xDA /* Battery voltage */
+#define MAX172XX_ATAVCAP 0xDF
+
+static const char *const max1720x_manufacturer = "Maxim Integrated";
+static const char *const max17201_model = "MAX17201";
+static const char *const max17205_model = "MAX17205";
+
+struct max1720x_device_info {
+ struct regmap *regmap;
+ int rsense;
+};
+
+/*
+ * Model Gauge M5 Algorithm output register
+ * Volatile data (must not be cached)
+ */
+static const struct regmap_range max1720x_volatile_allow[] = {
+ regmap_reg_range(MAX172XX_STATUS, MAX172XX_CYCLES),
+ regmap_reg_range(MAX172XX_AVG_VCELL, MAX172XX_TTF),
+ regmap_reg_range(MAX172XX_QR_TABLE10, MAX172XX_ATAVCAP),
+};
+
+static const struct regmap_range max1720x_readable_allow[] = {
+ regmap_reg_range(MAX172XX_STATUS, MAX172XX_ATAVCAP),
+};
+
+static const struct regmap_range max1720x_readable_deny[] = {
+ /* unused registers */
+ regmap_reg_range(0x24, 0x26),
+ regmap_reg_range(0x30, 0x31),
+ regmap_reg_range(0x33, 0x34),
+ regmap_reg_range(0x37, 0x37),
+ regmap_reg_range(0x3B, 0x3C),
+ regmap_reg_range(0x40, 0x41),
+ regmap_reg_range(0x43, 0x44),
+ regmap_reg_range(0x47, 0x49),
+ regmap_reg_range(0x4B, 0x4C),
+ regmap_reg_range(0x4E, 0xAF),
+ regmap_reg_range(0xB1, 0xB3),
+ regmap_reg_range(0xB5, 0xB7),
+ regmap_reg_range(0xBF, 0xD0),
+ regmap_reg_range(0xDB, 0xDB),
+ regmap_reg_range(0xE0, 0xFF),
+};
+
+static const struct regmap_access_table max1720x_readable_regs = {
+ .yes_ranges = max1720x_readable_allow,
+ .n_yes_ranges = ARRAY_SIZE(max1720x_readable_allow),
+ .no_ranges = max1720x_readable_deny,
+ .n_no_ranges = ARRAY_SIZE(max1720x_readable_deny),
+};
+
+static const struct regmap_access_table max1720x_volatile_regs = {
+ .yes_ranges = max1720x_volatile_allow,
+ .n_yes_ranges = ARRAY_SIZE(max1720x_volatile_allow),
+ .no_ranges = max1720x_readable_deny,
+ .n_no_ranges = ARRAY_SIZE(max1720x_readable_deny),
+};
+
+static const struct regmap_config max1720x_regmap_cfg = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = MAX172XX_ATAVCAP,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .rd_table = &max1720x_readable_regs,
+ .volatile_table = &max1720x_volatile_regs,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const enum power_supply_property max1720x_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+/* Convert regs value to power_supply units */
+
+static int max172xx_time_to_ps(unsigned int reg)
+{
+ return reg * 5625 / 1000; /* in sec. */
+}
+
+static int max172xx_percent_to_ps(unsigned int reg)
+{
+ return reg / 256; /* in percent from 0 to 100 */
+}
+
+static int max172xx_voltage_to_ps(unsigned int reg)
+{
+ return reg * 1250; /* in uV */
+}
+
+static int max172xx_capacity_to_ps(unsigned int reg)
+{
+ return reg * 500; /* in uAh */
+}
+
+/*
+ * Current and temperature is signed values, so unsigned regs
+ * value must be converted to signed type
+ */
+
+static int max172xx_temperature_to_ps(unsigned int reg)
+{
+ int val = (int16_t)reg;
+
+ return val * 10 / 256; /* in tenths of deg. C */
+}
+
+/*
+ * Calculating current registers resolution:
+ *
+ * RSense stored in 10^-5 Ohm, so mesaurment voltage must be
+ * in 10^-11 Volts for get current in uA.
+ * 16 bit current reg fullscale +/-51.2mV is 102400 uV.
+ * So: 102400 / 65535 * 10^5 = 156252
+ */
+static int max172xx_current_to_voltage(unsigned int reg)
+{
+ int val = (int16_t)reg;
+
+ return val * 156252;
+}
+
+static int max1720x_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max1720x_device_info *info = power_supply_get_drvdata(psy);
+ unsigned int reg_val;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ /*
+ * POWER_SUPPLY_PROP_PRESENT will always readable via
+ * sysfs interface. Value return 0 if battery not
+ * present or unaccesable via I2c.
+ */
+ ret = regmap_read(info->regmap, MAX172XX_STATUS, &reg_val);
+ if (ret < 0) {
+ val->intval = 0;
+ return 0;
+ }
+
+ val->intval = !FIELD_GET(MAX172XX_STATUS_BAT_ABSENT, reg_val);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = regmap_read(info->regmap, MAX172XX_REPSOC, &reg_val);
+ val->intval = max172xx_percent_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = regmap_read(info->regmap, MAX172XX_BATT, &reg_val);
+ val->intval = max172xx_voltage_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ ret = regmap_read(info->regmap, MAX172XX_DESIGN_CAP, &reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_AVG:
+ ret = regmap_read(info->regmap, MAX172XX_REPCAP, &reg_val);
+ val->intval = max172xx_capacity_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ ret = regmap_read(info->regmap, MAX172XX_TTE, &reg_val);
+ val->intval = max172xx_time_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ ret = regmap_read(info->regmap, MAX172XX_TTF, &reg_val);
+ val->intval = max172xx_time_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = regmap_read(info->regmap, MAX172XX_TEMP, &reg_val);
+ val->intval = max172xx_temperature_to_ps(reg_val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = regmap_read(info->regmap, MAX172XX_CURRENT, &reg_val);
+ val->intval = max172xx_current_to_voltage(reg_val) / info->rsense;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ ret = regmap_read(info->regmap, MAX172XX_AVG_CURRENT, &reg_val);
+ val->intval = max172xx_current_to_voltage(reg_val) / info->rsense;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ ret = regmap_read(info->regmap, MAX172XX_DEV_NAME, &reg_val);
+ reg_val = FIELD_GET(MAX172XX_DEV_NAME_TYPE_MASK, reg_val);
+ if (reg_val == MAX172XX_DEV_NAME_TYPE_MAX17201)
+ val->strval = max17201_model;
+ else if (reg_val == MAX172XX_DEV_NAME_TYPE_MAX17205)
+ val->strval = max17205_model;
+ else
+ return -ENODEV;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = max1720x_manufacturer;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int max1720x_probe_sense_resistor(struct i2c_client *client,
+ struct max1720x_device_info *info)
+{
+ struct device *dev = &client->dev;
+ struct i2c_client *ancillary;
+ int ret;
+
+ ancillary = i2c_new_ancillary_device(client, "nvmem", 0xb);
+ if (IS_ERR(ancillary)) {
+ dev_err(dev, "Failed to initialize ancillary i2c device\n");
+ return PTR_ERR(ancillary);
+ }
+
+ ret = i2c_smbus_read_word_data(ancillary, MAX1720X_NRSENSE);
+ i2c_unregister_device(ancillary);
+ if (ret < 0)
+ return ret;
+
+ info->rsense = ret;
+ if (!info->rsense) {
+ dev_warn(dev, "RSense not calibrated, set 10 mOhms!\n");
+ info->rsense = 1000; /* in regs in 10^-5 */
+ }
+
+ return 0;
+}
+
+static const struct power_supply_desc max1720x_bat_desc = {
+ .name = "max1720x",
+ .no_thermal = true,
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = max1720x_battery_props,
+ .num_properties = ARRAY_SIZE(max1720x_battery_props),
+ .get_property = max1720x_battery_get_property,
+};
+
+static int max1720x_probe(struct i2c_client *client)
+{
+ struct power_supply_config psy_cfg = {};
+ struct device *dev = &client->dev;
+ struct max1720x_device_info *info;
+ struct power_supply *bat;
+ int ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ psy_cfg.drv_data = info;
+ psy_cfg.fwnode = dev_fwnode(dev);
+ info->regmap = devm_regmap_init_i2c(client, &max1720x_regmap_cfg);
+ if (IS_ERR(info->regmap))
+ return dev_err_probe(dev, PTR_ERR(info->regmap),
+ "regmap initialization failed\n");
+
+ ret = max1720x_probe_sense_resistor(client, info);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to read sense resistor value\n");
+
+ bat = devm_power_supply_register(dev, &max1720x_bat_desc, &psy_cfg);
+ if (IS_ERR(bat))
+ return dev_err_probe(dev, PTR_ERR(bat),
+ "Failed to register power supply\n");
+
+ return 0;
+}
+
+static const struct of_device_id max1720x_of_match[] = {
+ { .compatible = "maxim,max17201" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max1720x_of_match);
+
+static struct i2c_driver max1720x_i2c_driver = {
+ .driver = {
+ .name = "max1720x",
+ .of_match_table = max1720x_of_match,
+ },
+ .probe = max1720x_probe,
+};
+module_i2c_driver(max1720x_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dimitri Fedrau <dima.fedrau@gmail.com>");
+MODULE_DESCRIPTION("Maxim MAX17201/MAX17205 Fuel Gauge IC driver");
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index 99659dc8f5a6..d7e520da7688 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -483,8 +483,8 @@ static int max77976_probe(struct i2c_client *client)
}
static const struct i2c_device_id max77976_i2c_id[] = {
- { MAX77976_DRIVER_NAME, 0 },
- { },
+ { MAX77976_DRIVER_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, max77976_i2c_id);
diff --git a/drivers/power/supply/mm8013.c b/drivers/power/supply/mm8013.c
index 20c1651ca38e..5bcfaeeda3db 100644
--- a/drivers/power/supply/mm8013.c
+++ b/drivers/power/supply/mm8013.c
@@ -284,7 +284,7 @@ static int mm8013_probe(struct i2c_client *client)
}
static const struct i2c_device_id mm8013_id_table[] = {
- { "mm8013", 0 },
+ { "mm8013" },
{}
};
MODULE_DEVICE_TABLE(i2c, mm8013_id_table);
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index fefe938c9342..8f6025acd10a 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1024,7 +1024,7 @@ EXPORT_SYMBOL_GPL(power_supply_temp2resist_simple);
int power_supply_vbat2ri(struct power_supply_battery_info *info,
int vbat_uv, bool charging)
{
- struct power_supply_vbat_ri_table *vbat2ri;
+ const struct power_supply_vbat_ri_table *vbat2ri;
int table_len;
int i, high, low;
@@ -1072,7 +1072,7 @@ int power_supply_vbat2ri(struct power_supply_battery_info *info,
}
EXPORT_SYMBOL_GPL(power_supply_vbat2ri);
-struct power_supply_maintenance_charge_table *
+const struct power_supply_maintenance_charge_table *
power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info,
int index)
{
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index c97893d4c25e..baacefbdf768 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -48,6 +48,18 @@ static int power_supply_hwmon_curr_to_property(u32 attr)
}
}
+static int power_supply_hwmon_power_to_property(u32 attr)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ return POWER_SUPPLY_PROP_POWER_NOW;
+ case hwmon_power_average:
+ return POWER_SUPPLY_PROP_POWER_AVG;
+ default:
+ return -EINVAL;
+ }
+}
+
static int power_supply_hwmon_temp_to_property(u32 attr, int channel)
{
if (channel) {
@@ -90,6 +102,8 @@ power_supply_hwmon_to_property(enum hwmon_sensor_types type,
return power_supply_hwmon_in_to_property(attr);
case hwmon_curr:
return power_supply_hwmon_curr_to_property(attr);
+ case hwmon_power:
+ return power_supply_hwmon_power_to_property(attr);
case hwmon_temp:
return power_supply_hwmon_temp_to_property(attr, channel);
default:
@@ -229,6 +243,11 @@ power_supply_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_in:
pspval.intval = DIV_ROUND_CLOSEST(pspval.intval, 1000);
break;
+ case hwmon_power:
+ /*
+ * Power properties are already in microwatts.
+ */
+ break;
/*
* Temp needs to be converted from 1/10 C to milli-C
*/
@@ -311,6 +330,10 @@ static const struct hwmon_channel_info * const power_supply_hwmon_info[] = {
HWMON_C_MAX |
HWMON_C_INPUT),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT |
+ HWMON_P_AVERAGE),
+
HWMON_CHANNEL_INFO(in,
HWMON_I_AVERAGE |
HWMON_I_MIN |
@@ -359,6 +382,8 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy)
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_MAX:
case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_POWER_AVG:
+ case POWER_SUPPLY_PROP_POWER_NOW:
case POWER_SUPPLY_PROP_TEMP:
case POWER_SUPPLY_PROP_TEMP_MAX:
case POWER_SUPPLY_PROP_TEMP_MIN:
diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index 73935de844d9..f4a7e566bea1 100644
--- a/drivers/power/supply/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
@@ -19,6 +19,76 @@
/* Battery specific LEDs triggers. */
+struct power_supply_led_trigger {
+ struct led_trigger trig;
+ struct power_supply *psy;
+};
+
+#define trigger_to_psy_trigger(trigger) \
+ container_of(trigger, struct power_supply_led_trigger, trig)
+
+static int power_supply_led_trigger_activate(struct led_classdev *led_cdev)
+{
+ struct power_supply_led_trigger *psy_trig =
+ trigger_to_psy_trigger(led_cdev->trigger);
+
+ /* Sync current power-supply state to LED being activated */
+ power_supply_update_leds(psy_trig->psy);
+ return 0;
+}
+
+static int power_supply_register_led_trigger(struct power_supply *psy,
+ const char *name_template,
+ struct led_trigger **tp, int *err)
+{
+ struct power_supply_led_trigger *psy_trig;
+ int ret = -ENOMEM;
+
+ /* Bail on previous errors */
+ if (err && *err)
+ return *err;
+
+ psy_trig = kzalloc(sizeof(*psy_trig), GFP_KERNEL);
+ if (!psy_trig)
+ goto err_free_trigger;
+
+ psy_trig->trig.name = kasprintf(GFP_KERNEL, name_template, psy->desc->name);
+ if (!psy_trig->trig.name)
+ goto err_free_trigger;
+
+ psy_trig->trig.activate = power_supply_led_trigger_activate;
+ psy_trig->psy = psy;
+
+ ret = led_trigger_register(&psy_trig->trig);
+ if (ret)
+ goto err_free_name;
+
+ *tp = &psy_trig->trig;
+ return 0;
+
+err_free_name:
+ kfree(psy_trig->trig.name);
+err_free_trigger:
+ kfree(psy_trig);
+ if (err)
+ *err = ret;
+
+ return ret;
+}
+
+static void power_supply_unregister_led_trigger(struct led_trigger *trig)
+{
+ struct power_supply_led_trigger *psy_trig;
+
+ if (!trig)
+ return;
+
+ psy_trig = trigger_to_psy_trigger(trig);
+ led_trigger_unregister(&psy_trig->trig);
+ kfree(psy_trig->trig.name);
+ kfree(psy_trig);
+}
+
static void power_supply_update_bat_leds(struct power_supply *psy)
{
union power_supply_propval status;
@@ -32,7 +102,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
switch (status.intval) {
case POWER_SUPPLY_STATUS_FULL:
- led_trigger_event(psy->charging_full_trig, LED_FULL);
+ led_trigger_event(psy->trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_FULL);
/* Going from blink to LED on requires a LED_OFF event to stop blink */
@@ -44,7 +114,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
- led_trigger_event(psy->charging_full_trig, LED_FULL);
+ led_trigger_event(psy->trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_FULL);
led_trigger_event(psy->full_trig, LED_OFF);
led_trigger_blink(psy->charging_blink_full_solid_trig, 0, 0);
@@ -54,7 +124,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
LED_FULL);
break;
default:
- led_trigger_event(psy->charging_full_trig, LED_OFF);
+ led_trigger_event(psy->trig, LED_OFF);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_OFF);
led_trigger_event(psy->charging_blink_full_solid_trig,
@@ -65,69 +135,33 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
}
}
-static int power_supply_create_bat_triggers(struct power_supply *psy)
+static void power_supply_remove_bat_triggers(struct power_supply *psy)
{
- psy->charging_full_trig_name = kasprintf(GFP_KERNEL,
- "%s-charging-or-full", psy->desc->name);
- if (!psy->charging_full_trig_name)
- goto charging_full_failed;
-
- psy->charging_trig_name = kasprintf(GFP_KERNEL,
- "%s-charging", psy->desc->name);
- if (!psy->charging_trig_name)
- goto charging_failed;
-
- psy->full_trig_name = kasprintf(GFP_KERNEL, "%s-full", psy->desc->name);
- if (!psy->full_trig_name)
- goto full_failed;
-
- psy->charging_blink_full_solid_trig_name = kasprintf(GFP_KERNEL,
- "%s-charging-blink-full-solid", psy->desc->name);
- if (!psy->charging_blink_full_solid_trig_name)
- goto charging_blink_full_solid_failed;
-
- psy->charging_orange_full_green_trig_name = kasprintf(GFP_KERNEL,
- "%s-charging-orange-full-green", psy->desc->name);
- if (!psy->charging_orange_full_green_trig_name)
- goto charging_red_full_green_failed;
-
- led_trigger_register_simple(psy->charging_full_trig_name,
- &psy->charging_full_trig);
- led_trigger_register_simple(psy->charging_trig_name,
- &psy->charging_trig);
- led_trigger_register_simple(psy->full_trig_name,
- &psy->full_trig);
- led_trigger_register_simple(psy->charging_blink_full_solid_trig_name,
- &psy->charging_blink_full_solid_trig);
- led_trigger_register_simple(psy->charging_orange_full_green_trig_name,
- &psy->charging_orange_full_green_trig);
-
- return 0;
-
-charging_red_full_green_failed:
- kfree(psy->charging_blink_full_solid_trig_name);
-charging_blink_full_solid_failed:
- kfree(psy->full_trig_name);
-full_failed:
- kfree(psy->charging_trig_name);
-charging_failed:
- kfree(psy->charging_full_trig_name);
-charging_full_failed:
- return -ENOMEM;
+ power_supply_unregister_led_trigger(psy->trig);
+ power_supply_unregister_led_trigger(psy->charging_trig);
+ power_supply_unregister_led_trigger(psy->full_trig);
+ power_supply_unregister_led_trigger(psy->charging_blink_full_solid_trig);
+ power_supply_unregister_led_trigger(psy->charging_orange_full_green_trig);
}
-static void power_supply_remove_bat_triggers(struct power_supply *psy)
+static int power_supply_create_bat_triggers(struct power_supply *psy)
{
- led_trigger_unregister_simple(psy->charging_full_trig);
- led_trigger_unregister_simple(psy->charging_trig);
- led_trigger_unregister_simple(psy->full_trig);
- led_trigger_unregister_simple(psy->charging_blink_full_solid_trig);
- led_trigger_unregister_simple(psy->charging_orange_full_green_trig);
- kfree(psy->charging_blink_full_solid_trig_name);
- kfree(psy->full_trig_name);
- kfree(psy->charging_trig_name);
- kfree(psy->charging_full_trig_name);
- kfree(psy->charging_orange_full_green_trig_name);
+ int err = 0;
+
+ power_supply_register_led_trigger(psy, "%s-charging-or-full",
+ &psy->trig, &err);
+ power_supply_register_led_trigger(psy, "%s-charging",
+ &psy->charging_trig, &err);
+ power_supply_register_led_trigger(psy, "%s-full",
+ &psy->full_trig, &err);
+ power_supply_register_led_trigger(psy, "%s-charging-blink-full-solid",
+ &psy->charging_blink_full_solid_trig, &err);
+ power_supply_register_led_trigger(psy, "%s-charging-orange-full-green",
+ &psy->charging_orange_full_green_trig, &err);
+ if (err)
+ power_supply_remove_bat_triggers(psy);
+
+ return err;
}
/* Generated power specific LEDs triggers. */
@@ -142,27 +176,19 @@ static void power_supply_update_gen_leds(struct power_supply *psy)
dev_dbg(&psy->dev, "%s %d\n", __func__, online.intval);
if (online.intval)
- led_trigger_event(psy->online_trig, LED_FULL);
+ led_trigger_event(psy->trig, LED_FULL);
else
- led_trigger_event(psy->online_trig, LED_OFF);
+ led_trigger_event(psy->trig, LED_OFF);
}
static int power_supply_create_gen_triggers(struct power_supply *psy)
{
- psy->online_trig_name = kasprintf(GFP_KERNEL, "%s-online",
- psy->desc->name);
- if (!psy->online_trig_name)
- return -ENOMEM;
-
- led_trigger_register_simple(psy->online_trig_name, &psy->online_trig);
-
- return 0;
+ return power_supply_register_led_trigger(psy, "%s-online", &psy->trig, NULL);
}
static void power_supply_remove_gen_triggers(struct power_supply *psy)
{
- led_trigger_unregister_simple(psy->online_trig);
- kfree(psy->online_trig_name);
+ power_supply_unregister_led_trigger(psy->trig);
}
/* Choice what triggers to create&update. */
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index b86e11bdc07e..3e63d165b2f7 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -379,8 +379,7 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj,
int property = psy->desc->properties[i];
if (property == attrno) {
- if (psy->desc->property_is_writeable &&
- psy->desc->property_is_writeable(psy, property) > 0)
+ if (power_supply_property_is_writeable(psy, property) > 0)
mode |= S_IWUSR;
return mode;
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index ec163d1bcd18..46f36dcb185c 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -1308,6 +1308,7 @@ static void qcom_battmgr_pdr_notify(void *priv, int state)
static const struct of_device_id qcom_battmgr_of_variants[] = {
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
+ { .compatible = "qcom,x1e80100-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
/* Unmatched devices falls back to QCOM_BATTMGR_SM8350 */
{}
};
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index e4dbacd50a43..64a23e3d7bb0 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -1718,8 +1718,8 @@ static void rt9455_remove(struct i2c_client *client)
}
static const struct i2c_device_id rt9455_i2c_id_table[] = {
- { RT9455_DRIVER_NAME, 0 },
- { },
+ { RT9455_DRIVER_NAME },
+ { }
};
MODULE_DEVICE_TABLE(i2c, rt9455_i2c_id_table);
diff --git a/drivers/power/supply/samsung-sdi-battery.c b/drivers/power/supply/samsung-sdi-battery.c
index b33daab798b9..b63fd2758c2f 100644
--- a/drivers/power/supply/samsung-sdi-battery.c
+++ b/drivers/power/supply/samsung-sdi-battery.c
@@ -25,7 +25,7 @@ struct samsung_sdi_battery {
* tables apply depending on whether we are charging or not.
*/
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb_l1m7flu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb_l1m7flu[] = {
{ .vbat_uv = 4240000, .ri_uohm = 160000 },
{ .vbat_uv = 4210000, .ri_uohm = 179000 },
{ .vbat_uv = 4180000, .ri_uohm = 183000 },
@@ -53,7 +53,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb_l1m7flu
{ .vbat_uv = 3300000, .ri_uohm = 339000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb_l1m7flu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb_l1m7flu[] = {
{ .vbat_uv = 4302000, .ri_uohm = 230000 },
{ .vbat_uv = 4276000, .ri_uohm = 345000 },
{ .vbat_uv = 4227000, .ri_uohm = 345000 },
@@ -73,7 +73,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb_l1m7flu[]
{ .vbat_uv = 3590000, .ri_uohm = 164000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161la[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161la[] = {
{ .vbat_uv = 4240000, .ri_uohm = 160000 },
{ .vbat_uv = 4210000, .ri_uohm = 179000 },
{ .vbat_uv = 4180000, .ri_uohm = 183000 },
@@ -105,7 +105,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161la
{ .vbat_uv = 3300000, .ri_uohm = 339000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161la[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161la[] = {
{ .vbat_uv = 4345000, .ri_uohm = 230000 },
{ .vbat_uv = 4329000, .ri_uohm = 238000 },
{ .vbat_uv = 4314000, .ri_uohm = 225000 },
@@ -182,7 +182,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161la[]
{ .vbat_uv = 3590000, .ri_uohm = 164000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161lu[] = {
{ .vbat_uv = 4240000, .ri_uohm = 160000 },
{ .vbat_uv = 4210000, .ri_uohm = 179000 },
{ .vbat_uv = 4180000, .ri_uohm = 183000 },
@@ -214,7 +214,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb425161lu
{ .vbat_uv = 3300000, .ri_uohm = 339000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161lu[] = {
{ .vbat_uv = 4346000, .ri_uohm = 293000 },
{ .vbat_uv = 4336000, .ri_uohm = 290000 },
{ .vbat_uv = 4315000, .ri_uohm = 274000 },
@@ -244,7 +244,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb425161lu[]
{ .vbat_uv = 3590000, .ri_uohm = 164000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb485159lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb485159lu[] = {
{ .vbat_uv = 4240000, .ri_uohm = 160000 },
{ .vbat_uv = 4210000, .ri_uohm = 179000 },
{ .vbat_uv = 4180000, .ri_uohm = 183000 },
@@ -271,7 +271,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb485159lu
{ .vbat_uv = 3300000, .ri_uohm = 339000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb485159lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb485159lu[] = {
{ .vbat_uv = 4302000, .ri_uohm = 200000 },
{ .vbat_uv = 4258000, .ri_uohm = 206000 },
{ .vbat_uv = 4200000, .ri_uohm = 231000 },
@@ -291,7 +291,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb485159lu[]
{ .vbat_uv = 3590000, .ri_uohm = 164000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb535151vu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb535151vu[] = {
{ .vbat_uv = 4071000, .ri_uohm = 158000 },
{ .vbat_uv = 4019000, .ri_uohm = 187000 },
{ .vbat_uv = 3951000, .ri_uohm = 191000 },
@@ -311,7 +311,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb535151vu
{ .vbat_uv = 3280000, .ri_uohm = 250000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb535151vu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb535151vu[] = {
{ .vbat_uv = 4190000, .ri_uohm = 214000 },
{ .vbat_uv = 4159000, .ri_uohm = 252000 },
{ .vbat_uv = 4121000, .ri_uohm = 245000 },
@@ -331,7 +331,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb535151vu[]
{ .vbat_uv = 3510000, .ri_uohm = 228000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb585157lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb585157lu[] = {
{ .vbat_uv = 4194000, .ri_uohm = 121000 },
{ .vbat_uv = 4169000, .ri_uohm = 188000 },
{ .vbat_uv = 4136000, .ri_uohm = 173000 },
@@ -401,7 +401,7 @@ static struct power_supply_vbat_ri_table samsung_vbat2res_discharging_eb585157lu
{ .vbat_uv = 3161000, .ri_uohm = 452000 },
};
-static struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb585157lu[] = {
+static const struct power_supply_vbat_ri_table samsung_vbat2res_charging_eb585157lu[] = {
{ .vbat_uv = 4360000, .ri_uohm = 128000 },
{ .vbat_uv = 4325000, .ri_uohm = 130000 },
{ .vbat_uv = 4316000, .ri_uohm = 148000 },
@@ -613,7 +613,7 @@ static struct power_supply_battery_ocv_table samsung_ocv_cap_eb585157lu[] = {
{ .ocv = 3300000, .capacity = 0},
};
-static struct power_supply_maintenance_charge_table samsung_maint_charge_table[] = {
+static const struct power_supply_maintenance_charge_table samsung_maint_charge_table[] = {
{
/* Maintenance charging phase A, 60 hours */
.charge_current_max_ua = 600000,
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
index f4adde449270..ab3f095d90ea 100644
--- a/drivers/power/supply/sbs-charger.c
+++ b/drivers/power/supply/sbs-charger.c
@@ -234,7 +234,7 @@ MODULE_DEVICE_TABLE(of, sbs_dt_ids);
#endif
static const struct i2c_device_id sbs_id[] = {
- { "sbs-charger", 0 },
+ { "sbs-charger" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sbs_id);
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index 933b04806d10..7d2f39f19acb 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -389,8 +389,8 @@ static int sbsm_probe(struct i2c_client *client)
}
static const struct i2c_device_id sbsm_ids[] = {
- { "sbs-manager", 0 },
- { "ltc1760", 0 },
+ { "sbs-manager" },
+ { "ltc1760" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sbsm_ids);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 5faafb4aa55c..cca650b2e0b9 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -274,10 +274,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return err;
/* full-function RTCs won't have such missing fields */
- if (rtc_valid_tm(&alarm->time) == 0) {
- rtc_add_offset(rtc, &alarm->time);
- return 0;
- }
+ err = rtc_valid_tm(&alarm->time);
+ if (!err)
+ goto done;
/* get the "after" timestamp, to detect wrapped fields */
err = rtc_read_time(rtc, &now);
@@ -379,6 +378,8 @@ done:
if (err && alarm->enabled)
dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
&alarm->time);
+ else
+ rtc_add_offset(rtc, &alarm->time);
return err;
}
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
index 3893a202e9ea..c30c759662e3 100644
--- a/drivers/rtc/lib_test.c
+++ b/drivers/rtc/lib_test.c
@@ -97,4 +97,5 @@ static struct kunit_suite rtc_lib_test_suite = {
kunit_test_suite(rtc_lib_test_suite);
+MODULE_DESCRIPTION("KUnit test for RTC lib functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 100062001831..684f9898d768 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -933,7 +933,7 @@ MODULE_DEVICE_TABLE(of, abb5zes3_dt_match);
#endif
static const struct i2c_device_id abb5zes3_id[] = {
- { "abb5zes3", 0 },
+ { "abb5zes3" },
{ }
};
MODULE_DEVICE_TABLE(i2c, abb5zes3_id);
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index 04e1b8e93bc1..02f7d0711287 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -575,7 +575,7 @@ MODULE_DEVICE_TABLE(of, abeoz9_dt_match);
#endif
static const struct i2c_device_id abeoz9_id[] = {
- { "abeoz9", 0 },
+ { "abeoz9" },
{ }
};
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index fde2b8054c2e..1298962402ff 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -705,14 +705,18 @@ static int abx80x_nvmem_xfer(struct abx80x_priv *priv, unsigned int offset,
if (ret)
return ret;
- if (write)
+ if (write) {
ret = i2c_smbus_write_i2c_block_data(priv->client, reg,
len, val);
- else
+ if (ret)
+ return ret;
+ } else {
ret = i2c_smbus_read_i2c_block_data(priv->client, reg,
len, val);
- if (ret)
- return ret;
+ if (ret <= 0)
+ return ret ? ret : -EIO;
+ len = ret;
+ }
offset += len;
val += len;
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 591e42391747..7ad34539be4d 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -304,7 +304,7 @@ static void bq32k_remove(struct i2c_client *client)
}
static const struct i2c_device_id bq32k_id[] = {
- { "bq32000", 0 },
+ { "bq32000" },
{ }
};
MODULE_DEVICE_TABLE(i2c, bq32k_id);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7d99cd2c37a0..35dca2accbb8 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -643,11 +643,10 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val,
size_t count)
{
unsigned char *buf = val;
- int retval;
off += NVRAM_OFFSET;
spin_lock_irq(&rtc_lock);
- for (retval = 0; count; count--, off++, retval++) {
+ for (; count; count--, off++) {
if (off < 128)
*buf++ = CMOS_READ(off);
else if (can_bank2)
@@ -657,7 +656,7 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val,
}
spin_unlock_irq(&rtc_lock);
- return retval;
+ return count ? -EIO : 0;
}
static int cmos_nvram_write(void *priv, unsigned int off, void *val,
@@ -665,7 +664,6 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
{
struct cmos_rtc *cmos = priv;
unsigned char *buf = val;
- int retval;
/* NOTE: on at least PCs and Ataris, the boot firmware uses a
* checksum on part of the NVRAM data. That's currently ignored
@@ -674,7 +672,7 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
*/
off += NVRAM_OFFSET;
spin_lock_irq(&rtc_lock);
- for (retval = 0; count; count--, off++, retval++) {
+ for (; count; count--, off++) {
/* don't trash RTC registers */
if (off == cmos->day_alrm
|| off == cmos->mon_alrm
@@ -689,7 +687,7 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val,
}
spin_unlock_irq(&rtc_lock);
- return retval;
+ return count ? -EIO : 0;
}
/*----------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 506b7d1c2397..872e0b679be4 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -65,6 +65,7 @@ enum ds_type {
# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
#define DS1307_REG_WDAY 0x03 /* 01-07 */
+# define MCP794XX_BIT_OSCRUN BIT(5)
# define MCP794XX_BIT_VBATEN 0x08
#define DS1307_REG_MDAY 0x04 /* 01-31 */
#define DS1307_REG_MONTH 0x05 /* 01-12 */
@@ -242,6 +243,10 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
regs[DS1307_REG_MIN] & M41T0_BIT_OF) {
dev_warn_once(dev, "oscillator failed, set time!\n");
return -EINVAL;
+ } else if (ds1307->type == mcp794xx &&
+ !(regs[DS1307_REG_WDAY] & MCP794XX_BIT_OSCRUN)) {
+ dev_warn_once(dev, "oscillator failed, set time!\n");
+ return -EINVAL;
}
tmp = regs[DS1307_REG_SECS];
@@ -354,7 +359,7 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
/* assume 20YY not 19YY */
- tmp = t->tm_year - 100;
+ tmp = t->tm_year % 100;
regs[DS1307_REG_YEAR] = bin2bcd(tmp);
if (chip->century_enable_bit)
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 4a5005cb23f5..c2359eb86bc9 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -52,7 +52,7 @@
#define DS1374_REG_TCR 0x09 /* Trickle Charge */
static const struct i2c_device_id ds1374_id[] = {
- { "ds1374", 0 },
+ { "ds1374" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1374_id);
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 641799f30baa..6e5314215d00 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -133,7 +133,7 @@ static int ds1672_probe(struct i2c_client *client)
}
static const struct i2c_device_id ds1672_id[] = {
- { "ds1672", 0 },
+ { "ds1672" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1672_id);
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 1485a6ae51e6..dd37b055693c 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -586,7 +586,7 @@ static int ds3232_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id ds3232_id[] = {
- { "ds3232", 0 },
+ { "ds3232" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds3232_id);
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index fc772eae5da5..dc1ccbc65dcb 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -129,7 +129,7 @@ static int em3027_probe(struct i2c_client *client)
}
static const struct i2c_device_id em3027_id[] = {
- { "em3027", 0 },
+ { "em3027" },
{ }
};
MODULE_DEVICE_TABLE(i2c, em3027_id);
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 400ce4ad0c49..f82728ebac0c 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -53,7 +53,7 @@ struct fm3130 {
int data_valid;
};
static const struct i2c_device_id fm3130_id[] = {
- { "fm3130", 0 },
+ { "fm3130" },
{ }
};
MODULE_DEVICE_TABLE(i2c, fm3130_id);
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index 59c0f38cc08d..53ec7173c28e 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -203,4 +203,5 @@ static struct platform_driver goldfish_rtc = {
module_platform_driver(goldfish_rtc);
+MODULE_DESCRIPTION("Android Goldfish Real Time Clock driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index b018535c842b..63f11ea3589d 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -559,8 +559,8 @@ static int hym8563_probe(struct i2c_client *client)
}
static const struct i2c_device_id hym8563_id[] = {
- { "hym8563", 0 },
- {},
+ { "hym8563" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, hym8563_id);
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 4eef7afcc8bc..6fa9a68af9d9 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -366,7 +366,7 @@ static const struct of_device_id isl12022_dt_match[] = {
MODULE_DEVICE_TABLE(of, isl12022_dt_match);
static const struct i2c_device_id isl12022_id[] = {
- { "isl12022", 0 },
+ { "isl12022" },
{ }
};
MODULE_DEVICE_TABLE(i2c, isl12022_id);
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index e50c23ee1646..7b82e4a14b7a 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -7,6 +7,7 @@
#include <linux/bcd.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -628,6 +629,18 @@ isl1208_rtc_interrupt(int irq, void *data)
struct isl1208_state *isl1208 = i2c_get_clientdata(client);
int handled = 0, sr, err;
+ if (!isl1208->config->has_tamper) {
+ /*
+ * The INT# output is pulled low 250ms after the alarm is
+ * triggered. After the INT# output is pulled low, it is low for
+ * at least 250ms, even if the correct action is taken to clear
+ * it. It is impossible to clear ALM if it is still active. The
+ * host must wait for the RTC to progress past the alarm time
+ * plus the 250ms delay before clearing ALM.
+ */
+ msleep(250);
+ }
+
/*
* I2C reads get NAK'ed if we read straight away after an interrupt?
* Using a mdelay/msleep didn't seem to help either, so we work around
@@ -650,6 +663,13 @@ isl1208_rtc_interrupt(int irq, void *data)
rtc_update_irq(isl1208->rtc, 1, RTC_IRQF | RTC_AF);
+ /* Disable the alarm */
+ err = isl1208_rtc_toggle_alarm(client, 0);
+ if (err)
+ return err;
+
+ fsleep(275);
+
/* Clear the alarm */
sr &= ~ISL1208_REG_SR_ALM;
sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
@@ -658,11 +678,6 @@ isl1208_rtc_interrupt(int irq, void *data)
__func__);
else
handled = 1;
-
- /* Disable the alarm */
- err = isl1208_rtc_toggle_alarm(client, 0);
- if (err)
- return err;
}
if (isl1208->config->has_tamper && (sr & ISL1208_REG_SR_EVT)) {
@@ -775,14 +790,13 @@ static int isl1208_nvmem_read(void *priv, unsigned int off, void *buf,
{
struct isl1208_state *isl1208 = priv;
struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent);
- int ret;
/* nvmem sanitizes offset/count for us, but count==0 is possible */
if (!count)
return count;
- ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf,
+
+ return isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf,
count);
- return ret == 0 ? count : ret;
}
static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf,
@@ -790,15 +804,13 @@ static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf,
{
struct isl1208_state *isl1208 = priv;
struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent);
- int ret;
/* nvmem sanitizes off/count for us, but count==0 is possible */
if (!count)
return count;
- ret = isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf,
- count);
- return ret == 0 ? count : ret;
+ return isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf,
+ count);
}
static const struct nvmem_config isl1208_nvmem_config = {
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
index a2441e5c2c74..9a456f537d3b 100644
--- a/drivers/rtc/rtc-max31335.c
+++ b/drivers/rtc/rtc-max31335.c
@@ -669,7 +669,7 @@ static int max31335_probe(struct i2c_client *client)
}
static const struct i2c_device_id max31335_id[] = {
- { "max31335", 0 },
+ { "max31335" },
{ }
};
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 31b910e4d91a..7be31fce5bc7 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -215,7 +215,7 @@ static int max6900_probe(struct i2c_client *client)
}
static const struct i2c_device_id max6900_id[] = {
- { "max6900", 0 },
+ { "max6900" },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6900_id);
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 28858fcaea8f..71eafe4fbc72 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -403,5 +403,6 @@ static struct platform_driver mpc5121_rtc_driver = {
module_platform_driver(mpc5121_rtc_driver);
+MODULE_DESCRIPTION("Freescale MPC5121 built-in RTC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Rigby <jcrigby@gmail.com>");
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index 7a8b4de893b8..76c5f464b2da 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -567,7 +567,7 @@ static int nct3018y_probe(struct i2c_client *client)
}
static const struct i2c_device_id nct3018y_id[] = {
- { "nct3018y", 0 },
+ { "nct3018y" },
{ }
};
MODULE_DEVICE_TABLE(i2c, nct3018y_id);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c6155c48a4ac..e6b2a9c15b54 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -1027,4 +1027,5 @@ static struct platform_driver omap_rtc_driver = {
module_platform_driver(omap_rtc_driver);
MODULE_AUTHOR("George G. Davis (and others)");
+MODULE_DESCRIPTION("TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 98b77f790b0c..2c63c0ffd05a 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -495,7 +495,7 @@ static int pcf8523_probe(struct i2c_client *client)
}
static const struct i2c_device_id pcf8523_id[] = {
- { "pcf8523", 0 },
+ { "pcf8523" },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8523_id);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 1949d7473310..647d52f1f5c5 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -594,9 +594,9 @@ static int pcf8563_probe(struct i2c_client *client)
}
static const struct i2c_device_id pcf8563_id[] = {
- { "pcf8563", 0 },
- { "rtc8564", 0 },
- { "pca8565", 0 },
+ { "pcf8563" },
+ { "rtc8564" },
+ { "pca8565" },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8563_id);
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index a7e0fc360b6a..652b9dfa7566 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -297,7 +297,7 @@ static int pcf8583_probe(struct i2c_client *client)
}
static const struct i2c_device_id pcf8583_id[] = {
- { "pcf8583", 0 },
+ { "pcf8583" },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8583_id);
diff --git a/drivers/rtc/rtc-rc5t583.c b/drivers/rtc/rtc-rc5t583.c
index 6f4bf919827a..115c46f862f9 100644
--- a/drivers/rtc/rtc-rc5t583.c
+++ b/drivers/rtc/rtc-rc5t583.c
@@ -308,4 +308,5 @@ static struct platform_driver rc5t583_rtc_driver = {
module_platform_driver(rc5t583_rtc_driver);
MODULE_ALIAS("platform:rtc-rc5t583");
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
+MODULE_DESCRIPTION("RICOH 5T583 RTC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 4a81feeb00ff..83331d1fcab0 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -807,8 +807,8 @@ static int rv3029_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id rv3029_id[] = {
- { "rv3029", 0 },
- { "rv3029c2", 0 },
+ { "rv3029" },
+ { "rv3029c2" },
{ }
};
MODULE_DEVICE_TABLE(i2c, rv3029_id);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index af6dd6ccbe3b..7c423d672adb 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -451,7 +451,7 @@ static const struct acpi_device_id rx6110_i2c_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rx6110_i2c_acpi_match);
static const struct i2c_device_id rx6110_i2c_id[] = {
- { "rx6110", 0 },
+ { "rx6110" },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx6110_i2c_id);
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index f44e212c07de..2b6198d1cf81 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -50,7 +50,7 @@
#define RX8010_ALARM_AE BIT(7)
static const struct i2c_device_id rx8010_id[] = {
- { "rx8010", 0 },
+ { "rx8010" },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8010_id);
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 48efd61a114d..b18c12887bdc 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -307,7 +307,7 @@ static int rx8581_probe(struct i2c_client *client)
}
static const struct i2c_device_id rx8581_id[] = {
- { "rx8581", 0 },
+ { "rx8581" },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8581_id);
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 90a3028ac574..2d6b655a4b25 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -50,7 +50,7 @@
#define S35390A_INT2_MODE_PMIN (BIT(3) | BIT(2)) /* INT2FE | INT2ME */
static const struct i2c_device_id s35390a_id[] = {
- { "s35390a", 0 },
+ { "s35390a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, s35390a_id);
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index 7760394ccd2d..fe27b54beaad 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -201,7 +201,7 @@ static int sd3078_probe(struct i2c_client *client)
}
static const struct i2c_device_id sd3078_id[] = {
- {"sd3078", 0},
+ { "sd3078" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sd3078_id);
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 76753c71d92e..98b07969609d 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -5,6 +5,7 @@
*/
#include <linux/bcd.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/iopoll.h>
@@ -83,6 +84,18 @@
#define STM32_RTC_VERR_MAJREV_SHIFT 4
#define STM32_RTC_VERR_MAJREV GENMASK(7, 4)
+/* STM32_RTC_SECCFGR bit fields */
+#define STM32_RTC_SECCFGR 0x20
+#define STM32_RTC_SECCFGR_ALRA_SEC BIT(0)
+#define STM32_RTC_SECCFGR_INIT_SEC BIT(14)
+#define STM32_RTC_SECCFGR_SEC BIT(15)
+
+/* STM32_RTC_RXCIDCFGR bit fields */
+#define STM32_RTC_RXCIDCFGR(x) (0x80 + 0x4 * (x))
+#define STM32_RTC_RXCIDCFGR_CFEN BIT(0)
+#define STM32_RTC_RXCIDCFGR_CID GENMASK(6, 4)
+#define STM32_RTC_RXCIDCFGR_CID1 1
+
/* STM32_RTC_WPR key constants */
#define RTC_WPR_1ST_KEY 0xCA
#define RTC_WPR_2ND_KEY 0x53
@@ -120,6 +133,7 @@ struct stm32_rtc_data {
bool has_pclk;
bool need_dbp;
bool need_accuracy;
+ bool rif_protected;
};
struct stm32_rtc {
@@ -134,6 +148,14 @@ struct stm32_rtc {
int irq_alarm;
};
+struct stm32_rtc_rif_resource {
+ unsigned int num;
+ u32 bit;
+};
+
+static const struct stm32_rtc_rif_resource STM32_RTC_RES_ALRA = {0, STM32_RTC_SECCFGR_ALRA_SEC};
+static const struct stm32_rtc_rif_resource STM32_RTC_RES_INIT = {5, STM32_RTC_SECCFGR_INIT_SEC};
+
static void stm32_rtc_wpr_unlock(struct stm32_rtc *rtc)
{
const struct stm32_rtc_registers *regs = &rtc->data->regs;
@@ -553,6 +575,7 @@ static const struct stm32_rtc_data stm32_rtc_data = {
.has_pclk = false,
.need_dbp = true,
.need_accuracy = false,
+ .rif_protected = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -575,6 +598,7 @@ static const struct stm32_rtc_data stm32h7_rtc_data = {
.has_pclk = true,
.need_dbp = true,
.need_accuracy = false,
+ .rif_protected = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -606,6 +630,7 @@ static const struct stm32_rtc_data stm32mp1_data = {
.has_pclk = true,
.need_dbp = false,
.need_accuracy = true,
+ .rif_protected = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -624,14 +649,57 @@ static const struct stm32_rtc_data stm32mp1_data = {
.clear_events = stm32mp1_rtc_clear_events,
};
+static const struct stm32_rtc_data stm32mp25_data = {
+ .has_pclk = true,
+ .need_dbp = false,
+ .need_accuracy = true,
+ .rif_protected = true,
+ .regs = {
+ .tr = 0x00,
+ .dr = 0x04,
+ .cr = 0x18,
+ .isr = 0x0C, /* named RTC_ICSR on stm32mp25 */
+ .prer = 0x10,
+ .alrmar = 0x40,
+ .wpr = 0x24,
+ .sr = 0x50,
+ .scr = 0x5C,
+ .verr = 0x3F4,
+ },
+ .events = {
+ .alra = STM32_RTC_SR_ALRA,
+ },
+ .clear_events = stm32mp1_rtc_clear_events,
+};
+
static const struct of_device_id stm32_rtc_of_match[] = {
{ .compatible = "st,stm32-rtc", .data = &stm32_rtc_data },
{ .compatible = "st,stm32h7-rtc", .data = &stm32h7_rtc_data },
{ .compatible = "st,stm32mp1-rtc", .data = &stm32mp1_data },
+ { .compatible = "st,stm32mp25-rtc", .data = &stm32mp25_data },
{}
};
MODULE_DEVICE_TABLE(of, stm32_rtc_of_match);
+static int stm32_rtc_check_rif(struct stm32_rtc *stm32_rtc,
+ struct stm32_rtc_rif_resource res)
+{
+ u32 rxcidcfgr = readl_relaxed(stm32_rtc->base + STM32_RTC_RXCIDCFGR(res.num));
+ u32 seccfgr;
+
+ /* Check if RTC available for our CID */
+ if ((rxcidcfgr & STM32_RTC_RXCIDCFGR_CFEN) &&
+ (FIELD_GET(STM32_RTC_RXCIDCFGR_CID, rxcidcfgr) != STM32_RTC_RXCIDCFGR_CID1))
+ return -EACCES;
+
+ /* Check if RTC available for non secure world */
+ seccfgr = readl_relaxed(stm32_rtc->base + STM32_RTC_SECCFGR);
+ if ((seccfgr & STM32_RTC_SECCFGR_SEC) | (seccfgr & res.bit))
+ return -EACCES;
+
+ return 0;
+}
+
static int stm32_rtc_init(struct platform_device *pdev,
struct stm32_rtc *rtc)
{
@@ -787,6 +855,16 @@ static int stm32_rtc_probe(struct platform_device *pdev)
regmap_update_bits(rtc->dbp, rtc->dbp_reg,
rtc->dbp_mask, rtc->dbp_mask);
+ if (rtc->data->rif_protected) {
+ ret = stm32_rtc_check_rif(rtc, STM32_RTC_RES_INIT);
+ if (!ret)
+ ret = stm32_rtc_check_rif(rtc, STM32_RTC_RES_ALRA);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to probe RTC due to RIF configuration\n");
+ goto err;
+ }
+ }
+
/*
* After a system reset, RTC_ISR.INITS flag can be read to check if
* the calendar has been initialized or not. INITS flag is reset by a
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 411ff66c0468..2ea1bbfbbc2a 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -466,4 +466,5 @@ static struct platform_driver tps65910_rtc_driver = {
module_platform_driver(tps65910_rtc_driver);
MODULE_ALIAS("platform:tps65910-rtc");
MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
+MODULE_DESCRIPTION("TI TPS65910 RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-tps6594.c b/drivers/rtc/rtc-tps6594.c
index 838ae8562a35..e69667634137 100644
--- a/drivers/rtc/rtc-tps6594.c
+++ b/drivers/rtc/rtc-tps6594.c
@@ -42,6 +42,11 @@
// Multiplier for ppb conversions
#define PPB_MULT NANO
+struct tps6594_rtc {
+ struct rtc_device *rtc_dev;
+ int irq;
+};
+
static int tps6594_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
@@ -325,11 +330,11 @@ static int tps6594_rtc_set_offset(struct device *dev, long offset)
return tps6594_rtc_set_calibration(dev, calibration);
}
-static irqreturn_t tps6594_rtc_interrupt(int irq, void *rtc)
+static irqreturn_t tps6594_rtc_interrupt(int irq, void *data)
{
- struct device *dev = rtc;
+ struct device *dev = data;
struct tps6594 *tps = dev_get_drvdata(dev->parent);
- struct rtc_device *rtc_dev = dev_get_drvdata(dev);
+ struct tps6594_rtc *rtc = dev_get_drvdata(dev);
int ret;
u32 rtc_reg;
@@ -337,7 +342,7 @@ static irqreturn_t tps6594_rtc_interrupt(int irq, void *rtc)
if (ret)
return IRQ_NONE;
- rtc_update_irq(rtc_dev, 1, RTC_IRQF | RTC_AF);
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
@@ -356,7 +361,7 @@ static int tps6594_rtc_probe(struct platform_device *pdev)
{
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
- struct rtc_device *rtc;
+ struct tps6594_rtc *rtc;
int irq;
int ret;
@@ -364,9 +369,9 @@ static int tps6594_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- rtc = devm_rtc_allocate_device(dev);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ rtc->rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
// Enable crystal oscillator.
ret = regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_2,
@@ -415,6 +420,8 @@ static int tps6594_rtc_probe(struct platform_device *pdev)
if (irq < 0)
return dev_err_probe(dev, irq, "Failed to get irq\n");
+ rtc->irq = irq;
+
ret = devm_request_threaded_irq(dev, irq, NULL, tps6594_rtc_interrupt,
IRQF_ONESHOT, TPS6594_IRQ_NAME_ALARM,
dev);
@@ -427,13 +434,56 @@ static int tps6594_rtc_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to init rtc as wakeup source\n");
- rtc->ops = &tps6594_rtc_ops;
- rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->rtc_dev->ops = &tps6594_rtc_ops;
+ rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->rtc_dev->range_max = RTC_TIMESTAMP_END_2099;
+
+ return devm_rtc_register_device(rtc->rtc_dev);
+}
+
+static int tps6594_rtc_resume(struct device *dev)
+{
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ struct tps6594_rtc *rtc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_test_bits(tps->regmap, TPS6594_REG_INT_STARTUP,
+ TPS6594_BIT_RTC_INT);
+ if (ret < 0) {
+ dev_err(dev, "failed to read REG_INT_STARTUP: %d\n", ret);
+ goto out;
+ }
+
+ if (ret > 0) {
+ /*
+ * If the alarm bit is set, it means that the IRQ has been
+ * fired. But, the kernel may not have woke up yet when it
+ * happened. So, we have to clear it.
+ */
+ ret = regmap_write(tps->regmap, TPS6594_REG_RTC_STATUS,
+ TPS6594_BIT_ALARM);
+ if (ret < 0)
+ dev_err(dev, "error clearing alarm bit: %d", ret);
- return devm_rtc_register_device(rtc);
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
+ }
+out:
+ disable_irq_wake(rtc->irq);
+
+ return 0;
}
+static int tps6594_rtc_suspend(struct device *dev)
+{
+ struct tps6594_rtc *rtc = dev_get_drvdata(dev);
+
+ enable_irq_wake(rtc->irq);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(tps6594_rtc_pm_ops, tps6594_rtc_suspend, tps6594_rtc_resume);
+
static const struct platform_device_id tps6594_rtc_id_table[] = {
{ "tps6594-rtc", },
{}
@@ -444,6 +494,7 @@ static struct platform_driver tps6594_rtc_driver = {
.probe = tps6594_rtc_probe,
.driver = {
.name = "tps6594-rtc",
+ .pm = pm_sleep_ptr(&tps6594_rtc_pm_ops),
},
.id_table = tps6594_rtc_id_table,
};
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 13f8ce08243c..2cfacdd37e09 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -685,4 +685,5 @@ static struct platform_driver twl4030rtc_driver = {
module_platform_driver(twl4030rtc_driver);
MODULE_AUTHOR("Texas Instruments, MontaVista Software");
+MODULE_DESCRIPTION("TI TWL4030/TWL5030/TWL6030/TPS659x0 RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 807f953ae0ae..4bcd7ca32f27 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -663,7 +663,7 @@ static void x1205_remove(struct i2c_client *client)
}
static const struct i2c_device_id x1205_id[] = {
- { "x1205", 0 },
+ { "x1205" },
{ }
};
MODULE_DEVICE_TABLE(i2c, x1205_id);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 0316c20823ee..6adaeb985dde 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -2248,13 +2248,19 @@ static ssize_t dasd_copy_pair_store(struct device *dev,
/* allocate primary devmap if needed */
prim_devmap = dasd_find_busid(prim_busid);
- if (IS_ERR(prim_devmap))
+ if (IS_ERR(prim_devmap)) {
prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT);
+ if (IS_ERR(prim_devmap))
+ return PTR_ERR(prim_devmap);
+ }
/* allocate secondary devmap if needed */
sec_devmap = dasd_find_busid(sec_busid);
- if (IS_ERR(sec_devmap))
+ if (IS_ERR(sec_devmap)) {
sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT);
+ if (IS_ERR(sec_devmap))
+ return PTR_ERR(sec_devmap);
+ }
/* setting copy relation is only allowed for offline secondary */
if (sec_devmap->device)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index ea4b1d01bb76..8245b742e4a2 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -29,6 +29,7 @@
#include "dasd_int.h"
#include "dasd_diag.h"
+MODULE_DESCRIPTION("S/390 Support for DIAG access to DASD Disks");
MODULE_LICENSE("GPL");
/* The maximum number of blocks per request (max_blocks) is dependent on the
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a76c6af9ea63..9388b5c383ca 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -44,6 +44,7 @@
/* 64k are 128 x 512 byte sectors */
#define DASD_RAW_SECTORS_PER_TRACK 128
+MODULE_DESCRIPTION("S/390 DASD ECKD Disks device driver");
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_eckd_discipline;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 9f2023a077c2..a2216795591d 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -32,6 +32,7 @@
#define DASD_FBA_CCW_LOCATE 0x43
#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
+MODULE_DESCRIPTION("S/390 DASD FBA Disks device driver");
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_fba_discipline;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index fbe29cabcbb8..f3621adbd5de 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -736,7 +736,7 @@ sclp_sync_wait(void)
cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
cr0_sync.val |= 1UL << (63 - 54);
local_ctl_load(0, &cr0_sync);
- __arch_local_irq_stosm(0x01);
+ arch_local_irq_enable_external();
/* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 49adddf978cc..4813087e58a1 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2286,7 +2286,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
* on.
*/
if (!io_req)
- /* If there is not io_req assocated with this CQE
+ /* If there is not io_req associated with this CQE
* just queue it on CPU 0
*/
cpu = 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2e933fd1de70..adeaa8ab9951 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -33,11 +33,12 @@
* than the level indicated above to trigger output.
*/
+#include <linux/bio-integrity.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bio.h>
+#include <linux/bio-integrity.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 985683767a40..9ebf25a0ef9b 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -979,7 +979,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
pos = pframe + (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
- /* check if this stat has been successfully authenticated/assocated */
+ /* check if this stat has been successfully authenticated/associated */
if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index a392d5b4caf2..e9763eab16f6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -452,7 +452,7 @@ void LPS_Enter(struct adapter *padapter, const char *msg)
if (hal_btcoex_IsBtControlLps(padapter))
return;
- /* Skip lps enter request if number of assocated adapters is not 1 */
+ /* Skip lps enter request if number of associated adapters is not 1 */
if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE))
n_assoc_iface++;
if (n_assoc_iface != 1)
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 8795187fbc52..f6e700e48aad 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -300,8 +300,6 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies);
else if (tz->polling_delay_jiffies)
thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies);
- else if (tz->temperature == THERMAL_TEMP_INVALID)
- thermal_zone_device_set_polling(tz, msecs_to_jiffies(THERMAL_RECHECK_DELAY_MS));
}
static struct thermal_governor *thermal_get_tz_governor(struct thermal_zone_device *tz)
@@ -382,7 +380,7 @@ static void handle_thermal_trip(struct thermal_zone_device *tz,
td->threshold = trip->temperature;
if (tz->last_temperature >= old_threshold &&
- tz->last_temperature != THERMAL_TEMP_INVALID) {
+ tz->last_temperature != THERMAL_TEMP_INIT) {
/*
* Mitigation is under way, so it needs to stop if the zone
* temperature falls below the low temperature of the trip.
@@ -417,27 +415,6 @@ static void handle_thermal_trip(struct thermal_zone_device *tz,
}
}
-static void update_temperature(struct thermal_zone_device *tz)
-{
- int temp, ret;
-
- ret = __thermal_zone_get_temp(tz, &temp);
- if (ret) {
- if (ret != -EAGAIN)
- dev_warn(&tz->device,
- "failed to read out thermal zone (%d)\n",
- ret);
- return;
- }
-
- tz->last_temperature = tz->temperature;
- tz->temperature = temp;
-
- trace_thermal_temperature(tz);
-
- thermal_genl_sampling_temp(tz->id, temp);
-}
-
static void thermal_zone_device_check(struct work_struct *work)
{
struct thermal_zone_device *tz = container_of(work, struct
@@ -452,7 +429,7 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
- tz->temperature = THERMAL_TEMP_INVALID;
+ tz->temperature = THERMAL_TEMP_INIT;
tz->passive = 0;
tz->prev_low_trip = -INT_MAX;
tz->prev_high_trip = INT_MAX;
@@ -504,6 +481,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
struct thermal_trip_desc *td;
LIST_HEAD(way_down_list);
LIST_HEAD(way_up_list);
+ int temp, ret;
if (tz->suspended)
return;
@@ -511,10 +489,29 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
if (!thermal_zone_device_is_enabled(tz))
return;
- update_temperature(tz);
+ ret = __thermal_zone_get_temp(tz, &temp);
+ if (ret) {
+ if (ret != -EAGAIN)
+ dev_info(&tz->device, "Temperature check failed (%d)\n", ret);
- if (tz->temperature == THERMAL_TEMP_INVALID)
+ thermal_zone_device_set_polling(tz, msecs_to_jiffies(THERMAL_RECHECK_DELAY_MS));
+ return;
+ } else if (temp <= THERMAL_TEMP_INVALID) {
+ /*
+ * Special case: No valid temperature value is available, but
+ * the zone owner does not want the core to do anything about
+ * it. Continue regular zone polling if needed, so that this
+ * function can be called again, but skip everything else.
+ */
goto monitor;
+ }
+
+ tz->last_temperature = tz->temperature;
+ tz->temperature = temp;
+
+ trace_thermal_temperature(tz);
+
+ thermal_genl_sampling_temp(tz->id, temp);
tz->notify_event = event;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 30c0e78859a7..ba8e6fc807ca 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -133,6 +133,9 @@ struct thermal_zone_device {
struct thermal_trip_desc trips[] __counted_by(num_trips);
};
+/* Initial thermal zone temperature. */
+#define THERMAL_TEMP_INIT INT_MIN
+
/*
* Default delay after a failing thermal zone temperature check before
* attempting to check it again.
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 81e019493557..aedb8369e2aa 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -163,6 +163,8 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
}
ret = __thermal_zone_get_temp(tz, temp);
+ if (!ret && *temp <= THERMAL_TEMP_INVALID)
+ ret = -ENODATA;
unlock:
mutex_unlock(&tz->lock);
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index 0725668ffea4..225540fc81ba 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -231,7 +231,7 @@ static const struct pci_device_id cdnsp_pci_ids[] = {
static struct pci_driver cdnsp_pci_driver = {
.name = "cdnsp-pci",
- .id_table = &cdnsp_pci_ids[0],
+ .id_table = cdnsp_pci_ids,
.probe = cdnsp_pci_probe,
.remove = cdnsp_pci_remove,
.driver = {
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
index 1691541c9413..50c3d0974d9b 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-pci.c
@@ -121,7 +121,7 @@ static const struct pci_device_id cdns2_pci_ids[] = {
static struct pci_driver cdns2_pci_driver = {
.name = "cdns2-pci",
- .id_table = &cdns2_pci_ids[0],
+ .id_table = cdns2_pci_ids,
.probe = cdns2_pci_probe,
.remove = cdns2_pci_remove,
.driver = {
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index a94ec6225d31..5f9e7e477078 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -364,7 +364,6 @@ static void tce_iommu_release(void *iommu_data)
if (!tbl)
continue;
- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
tce_iommu_free_table(container, tbl);
}
@@ -720,6 +719,8 @@ static long tce_iommu_remove_window(struct tce_container *container,
BUG_ON(!tbl->it_size);
+ tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
+
/* Detach groups from IOMMUs */
list_for_each_entry(tcegrp, &container->group_list, next) {
table_group = iommu_group_get_iommudata(tcegrp->grp);
@@ -738,7 +739,6 @@ static long tce_iommu_remove_window(struct tce_container *container,
}
/* Free table */
- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
tce_iommu_free_table(container, tbl);
container->tables[num] = NULL;
@@ -1197,9 +1197,14 @@ static void tce_iommu_release_ownership(struct tce_container *container,
return;
}
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
- if (container->tables[i])
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ if (container->tables[i]) {
+ tce_iommu_clear(container, container->tables[i],
+ container->tables[i]->it_offset,
+ container->tables[i]->it_size);
table_group->ops->unset_window(table_group, i);
+ }
+ }
}
static long tce_iommu_take_ownership(struct tce_container *container,
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 5ee7e78c2cea..65363df8e81b 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -146,7 +146,7 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
printk(KERN_ERR "no mapping available\n");
BUG_ON(!page->mapping);
- page->index = vmf->pgoff; /* for page_mkclean() */
+ page->index = vmf->pgoff; /* for folio_mkclean() */
vmf->page = page;
return 0;
@@ -194,7 +194,7 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
/*
* We want the page to remain locked from ->page_mkwrite until
- * the PTE is marked dirty to avoid page_mkclean() being called
+ * the PTE is marked dirty to avoid folio_mkclean() being called
* before the PTE is updated, which would leave the page ignored
* by defio.
* Do this by locking the page here and informing the caller
@@ -277,10 +277,11 @@ static void fb_deferred_io_work(struct work_struct *work)
/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
- struct page *cur = pageref->page;
- lock_page(cur);
- page_mkclean(cur);
- unlock_page(cur);
+ struct folio *folio = page_folio(pageref->page);
+
+ folio_lock(folio);
+ folio_mkclean(folio);
+ folio_unlock(folio);
}
/* driver's callback with pagereflist */
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index f714009b9ff7..6fc7884ea0a1 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -30,8 +30,6 @@
#include <asm/svm.h>
#include <asm/sev.h>
-#include "sev-guest.h"
-
#define DEVICE_NAME "sev-guest"
#define AAD_LEN 48
#define MSG_HDR_VER 1
diff --git a/drivers/virt/coco/sev-guest/sev-guest.h b/drivers/virt/coco/sev-guest/sev-guest.h
deleted file mode 100644
index 21bda26fdb95..000000000000
--- a/drivers/virt/coco/sev-guest/sev-guest.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2021 Advanced Micro Devices, Inc.
- *
- * Author: Brijesh Singh <brijesh.singh@amd.com>
- *
- * SEV-SNP API spec is available at https://developer.amd.com/sev
- */
-
-#ifndef __VIRT_SEVGUEST_H__
-#define __VIRT_SEVGUEST_H__
-
-#include <linux/types.h>
-
-#define MAX_AUTHTAG_LEN 32
-
-/* See SNP spec SNP_GUEST_REQUEST section for the structure */
-enum msg_type {
- SNP_MSG_TYPE_INVALID = 0,
- SNP_MSG_CPUID_REQ,
- SNP_MSG_CPUID_RSP,
- SNP_MSG_KEY_REQ,
- SNP_MSG_KEY_RSP,
- SNP_MSG_REPORT_REQ,
- SNP_MSG_REPORT_RSP,
- SNP_MSG_EXPORT_REQ,
- SNP_MSG_EXPORT_RSP,
- SNP_MSG_IMPORT_REQ,
- SNP_MSG_IMPORT_RSP,
- SNP_MSG_ABSORB_REQ,
- SNP_MSG_ABSORB_RSP,
- SNP_MSG_VMRK_REQ,
- SNP_MSG_VMRK_RSP,
-
- SNP_MSG_TYPE_MAX
-};
-
-enum aead_algo {
- SNP_AEAD_INVALID,
- SNP_AEAD_AES_256_GCM,
-};
-
-struct snp_guest_msg_hdr {
- u8 authtag[MAX_AUTHTAG_LEN];
- u64 msg_seqno;
- u8 rsvd1[8];
- u8 algo;
- u8 hdr_version;
- u16 hdr_sz;
- u8 msg_type;
- u8 msg_version;
- u16 msg_sz;
- u32 rsvd2;
- u8 msg_vmpck;
- u8 rsvd3[35];
-} __packed;
-
-struct snp_guest_msg {
- struct snp_guest_msg_hdr hdr;
- u8 payload[4000];
-} __packed;
-
-#endif /* __VIRT_SEVGUEST_H__ */
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 6284538a8184..42a48ac763ee 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -122,7 +122,7 @@ config VIRTIO_BALLOON
config VIRTIO_MEM
tristate "Virtio mem driver"
- depends on X86_64 || ARM64
+ depends on X86_64 || ARM64 || RISCV
depends on VIRTIO
depends on MEMORY_HOTPLUG
depends on MEMORY_HOTREMOVE
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index a3857bacc844..b0b871441578 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -1146,12 +1146,16 @@ static void virtio_mem_set_fake_offline(unsigned long pfn,
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
- __SetPageOffline(page);
- if (!onlined) {
+ if (!onlined)
+ /*
+ * Pages that have not been onlined yet were initialized
+ * to PageOffline(). Remember that we have to route them
+ * through generic_online_page().
+ */
SetPageDirty(page);
- /* FIXME: remove after cleanups */
- ClearPageReserved(page);
- }
+ else
+ __SetPageOffline(page);
+ VM_WARN_ON_ONCE(!PageOffline(page));
}
page_offline_end();
}
@@ -1166,9 +1170,11 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
- __ClearPageOffline(page);
if (!onlined)
+ /* generic_online_page() will clear PageOffline(). */
ClearPageDirty(page);
+ else
+ __ClearPageOffline(page);
}
}
@@ -1263,12 +1269,6 @@ static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
struct page *page;
unsigned long i;
- /*
- * Drop our reference to the pages so the memory can get offlined
- * and add the unplugged pages to the managed page counters (so
- * offlining code can correctly subtract them again).
- */
- adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
/* Drop our reference to the pages so the memory can get offlined. */
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(pfn + i);
@@ -1287,10 +1287,9 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
unsigned long i;
/*
- * Get the reference we dropped when going offline and subtract the
- * unplugged pages from the managed page counters.
+ * Get the reference again that we dropped via page_ref_dec_and_test()
+ * when going offline.
*/
- adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
for (i = 0; i < nr_pages; i++)
page_ref_inc(pfn_to_page(pfn + i));
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index aaf2514fcfa4..528395133b4f 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -146,7 +146,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
/* balloon_append: add the given page to the balloon. */
static void balloon_append(struct page *page)
{
- __SetPageOffline(page);
+ if (!PageOffline(page))
+ __SetPageOffline(page);
/* Lowmem is re-populated first, so highmem pages go at list tail. */
if (PageHighMem(page)) {
@@ -412,7 +413,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
- /* Relinquish the page back to the allocator. */
+ /*
+ * Relinquish the page back to the allocator. Note that
+ * some pages, including ones added via xen_online_page(), might
+ * not be marked reserved; free_reserved_page() will handle that.
+ */
free_reserved_page(page);
}