summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/evm13
-rw-r--r--Documentation/ABI/testing/ima_policy2
-rw-r--r--Documentation/PCI/pci-error-recovery.txt35
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/auxdisplay/lcd-panel-cgram.txt (renamed from Documentation/misc-devices/lcd-panel-cgram.txt)0
-rw-r--r--Documentation/devicetree/bindings/arm/ux500/boards.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/k3dma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt1
-rw-r--r--Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt (renamed from Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt)0
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt2
-rw-r--r--Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt47
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt (renamed from Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt)0
-rw-r--r--Documentation/devicetree/bindings/mips/lantiq/rcu.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/ibm,ndfc.txt (renamed from Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt)0
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-nand.txt4
-rw-r--r--Documentation/devicetree/bindings/net/ibm,emac.txt (renamed from Documentation/devicetree/bindings/powerpc/4xx/emac.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt24
-rw-r--r--Documentation/devicetree/bindings/pci/mobiveil-pcie.txt73
-rw-r--r--Documentation/devicetree/bindings/pci/pci-armada8k.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt62
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt (renamed from Documentation/devicetree/bindings/pci/rockchip-pcie.txt)0
-rw-r--r--Documentation/devicetree/bindings/pci/xgene-pci.txt7
-rw-r--r--Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt16
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt18
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pps/pps-gpio.txt1
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt2
-rw-r--r--Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt (renamed from Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt)0
-rw-r--r--Documentation/devicetree/bindings/rng/sparc_sun_oracle_rng.txt (renamed from Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt)0
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt9
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt6
-rw-r--r--Documentation/devicetree/bindings/timer/altr,timer-1.0.txt (renamed from Documentation/devicetree/bindings/nios2/timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer.txt (renamed from Documentation/devicetree/bindings/arm/arch_timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/arm,armv7m-systick.txt (renamed from Documentation/devicetree/bindings/arm/armv7m_systick.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/arm,global_timer.txt (renamed from Documentation/devicetree/bindings/arm/global_timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/arm,twd.txt (renamed from Documentation/devicetree/bindings/arm/twd.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,gtm.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/gtm.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/mrvl,mmp-timer.txt (renamed from Documentation/devicetree/bindings/arm/mrvl/timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/qcom,msm-timer.txt (renamed from Documentation/devicetree/bindings/arm/msm/timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/st,spear-timer.txt (renamed from Documentation/devicetree/bindings/arm/spear-timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt (renamed from Documentation/devicetree/bindings/c6x/timer64.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/ti,timer.txt (renamed from Documentation/devicetree/bindings/arm/omap/timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/timer/via,vt8500-timer.txt (renamed from Documentation/devicetree/bindings/arm/vt8500/via,vt8500-timer.txt)0
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt7
-rw-r--r--MAINTAINERS30
-rw-r--r--arch/arm64/configs/defconfig3
-rw-r--r--arch/microblaze/include/asm/pci.h4
-rw-r--r--arch/microblaze/pci/pci-common.c61
-rw-r--r--arch/mips/pci/pci-legacy.c8
-rw-r--r--arch/sparc/kernel/leon_pci.c62
-rw-r--r--arch/sparc/kernel/pci.c136
-rw-r--r--arch/sparc/kernel/pci_common.c31
-rw-r--r--arch/sparc/kernel/pci_msi.c10
-rw-r--r--arch/sparc/kernel/pcic.c94
-rw-r--r--arch/x86/pci/early.c19
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/xtensa/include/asm/pci.h2
-rw-r--r--arch/xtensa/kernel/pci.c69
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/auxdisplay/Kconfig16
-rw-r--r--drivers/auxdisplay/arm-charlcd.c4
-rw-r--r--drivers/auxdisplay/cfag12864b.c16
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c20
-rw-r--r--drivers/auxdisplay/charlcd.c119
-rw-r--r--drivers/auxdisplay/hd44780.c6
-rw-r--r--drivers/auxdisplay/ht16k33.c10
-rw-r--r--drivers/auxdisplay/ks0108.c16
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/char/tpm/Makefile10
-rw-r--r--drivers/char/tpm/eventlog/acpi.c (renamed from drivers/char/tpm/tpm_eventlog_acpi.c)3
-rw-r--r--drivers/char/tpm/eventlog/common.c195
-rw-r--r--drivers/char/tpm/eventlog/common.h35
-rw-r--r--drivers/char/tpm/eventlog/efi.c (renamed from drivers/char/tpm/tpm_eventlog_efi.c)6
-rw-r--r--drivers/char/tpm/eventlog/of.c (renamed from drivers/char/tpm/tpm_eventlog_of.c)11
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c (renamed from drivers/char/tpm/tpm1_eventlog.c)200
-rw-r--r--drivers/char/tpm/eventlog/tpm2.c (renamed from drivers/char/tpm/tpm2_eventlog.c)3
-rw-r--r--drivers/char/tpm/st33zp24/spi.c4
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-dev-common.c40
-rw-r--r--drivers/char/tpm/tpm-dev.h2
-rw-r--r--drivers/char/tpm/tpm-interface.c5
-rw-r--r--drivers/char/tpm/tpm.h32
-rw-r--r--drivers/char/tpm/tpm2-space.c3
-rw-r--r--drivers/char/tpm/tpm_crb.c10
-rw-r--r--drivers/char/tpm/tpm_tis_core.c58
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile7
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/Makefile9
-rw-r--r--drivers/infiniband/core/addr.c172
-rw-r--r--drivers/infiniband/core/cache.c112
-rw-r--r--drivers/infiniband/core/cm.c62
-rw-r--r--drivers/infiniband/core/cma.c36
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/mad.c12
-rw-r--r--drivers/infiniband/core/nldev.c122
-rw-r--r--drivers/infiniband/core/restrack.c9
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/security.c4
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/core/umem.c13
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c111
-rw-r--r--drivers/infiniband/core/uverbs_main.c42
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c3
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c157
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c23
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c4
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig1
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h5
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c501
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile10
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c497
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h10
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c82
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h15
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h13
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c292
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h102
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c53
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c39
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h24
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c375
-rw-r--r--drivers/infiniband/hw/hfi1/fault.h109
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c8
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h61
-rw-r--r--drivers/infiniband/hw/hfi1/init.c47
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c37
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c32
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c44
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c10
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c154
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c12
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c43
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h160
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c61
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c45
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c188
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c67
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c34
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c60
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c50
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c57
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c511
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h36
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c43
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c154
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig2
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c74
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h6
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c151
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_cq.h35
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c35
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c93
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c12
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h6
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c21
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c37
-rw-r--r--drivers/iommu/amd_iommu.c11
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/mailbox/Kconfig22
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c2
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c2
-rw-r--r--drivers/mailbox/stm32-ipcc.c402
-rw-r--r--drivers/misc/pci_endpoint_test.c29
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c75
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c7
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/of/of_numa.c1
-rw-r--r--drivers/of/platform.c16
-rw-r--r--drivers/of/resolver.c5
-rw-r--r--drivers/of/unittest.c8
-rw-r--r--drivers/pci/Kconfig12
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/ats.c3
-rw-r--r--drivers/pci/dwc/Kconfig88
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c19
-rw-r--r--drivers/pci/dwc/pci-imx6.c2
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c21
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c6
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c19
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c80
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c155
-rw-r--r--drivers/pci/dwc/pcie-designware.c22
-rw-r--r--drivers/pci/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/dwc/pcie-qcom.c13
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c35
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c23
-rw-r--r--drivers/pci/host/Kconfig55
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c7
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-host-common.c13
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-hyperv.c162
-rw-r--r--drivers/pci/host/pci-mvebu.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pci-v3-semi.c5
-rw-r--r--drivers/pci/host/pci-versatile.c5
-rw-r--r--drivers/pci/host/pci-xgene.c5
-rw-r--r--drivers/pci/host/pcie-altera.c7
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c5
-rw-r--r--drivers/pci/host/pcie-mediatek.c236
-rw-r--r--drivers/pci/host/pcie-mobiveil.c866
-rw-r--r--drivers/pci/host/pcie-rcar.c284
-rw-r--r--drivers/pci/host/pcie-rockchip-ep.c642
-rw-r--r--drivers/pci/host/pcie-rockchip-host.c1142
-rw-r--r--drivers/pci/host/pcie-rockchip.c1580
-rw-r--r--drivers/pci/host/pcie-rockchip.h338
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c6
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/host/vmd.c91
-rw-r--r--drivers/pci/hotplug/Kconfig5
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c45
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c84
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c84
-rw-r--r--drivers/pci/hotplug/pnv_php.c8
-rw-r--r--drivers/pci/hotplug/shpchp.h12
-rw-r--r--drivers/pci/hotplug/shpchp_core.c14
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c8
-rw-r--r--drivers/pci/iov.c42
-rw-r--r--drivers/pci/of.c63
-rw-r--r--drivers/pci/pci-acpi.c55
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-pf-stub.c54
-rw-r--r--drivers/pci/pci-sysfs.c15
-rw-r--r--drivers/pci/pci.c89
-rw-r--r--drivers/pci/pci.h45
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c11
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h32
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c397
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c38
-rw-r--r--drivers/pci/pcie/aspm.c9
-rw-r--r--drivers/pci/pcie/dpc.c74
-rw-r--r--drivers/pci/pcie/err.c388
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c57
-rw-r--r--drivers/pci/pcie/portdrv_core.c71
-rw-r--r--drivers/pci/probe.c96
-rw-r--r--drivers/pci/quirks.c1002
-rw-r--r--drivers/pci/setup-bus.c82
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/Kconfig15
-rw-r--r--drivers/pinctrl/actions/Makefile2
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c785
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.h162
-rw-r--r--drivers/pinctrl/actions/pinctrl-s900.c1888
-rw-r--r--drivers/pinctrl/bcm/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c100
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2.c5
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2cd.c5
-rw-r--r--drivers/pinctrl/berlin/berlin-bg2q.c5
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c13
-rw-r--r--drivers/pinctrl/berlin/berlin.c5
-rw-r--r--drivers/pinctrl/berlin/berlin.h5
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c42
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h6
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c27
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c15
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.h6
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx21.c15
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx23.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c28
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx28.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx35.c24
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c19
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx51.c21
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c21
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c21
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sll.c8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7ulp.c17
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.h8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c15
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/mediatek/Kconfig6
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c492
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h106
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2701.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2712.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c143
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8127.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8135.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8173.c25
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c608
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.h13
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c107
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c23
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c32
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c22
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c246
-rw-r--r--drivers/pinctrl/pinctrl-single.c72
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c92
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c114
-rw-r--r--drivers/pinctrl/samsung/Kconfig10
-rw-r--r--drivers/pinctrl/samsung/Makefile1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c30
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c20
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c1005
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c29
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig10
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile2
-rw-r--r--drivers/pinctrl/sh-pfc/core.c12
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77470.c2343
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c8
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c8
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c1592
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c32
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77980.c52
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c2695
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h10
-rw-r--r--drivers/pinctrl/sunxi/Kconfig4
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c128
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c11
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h11
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c30
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c49
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c54
-rw-r--r--include/dt-bindings/pinctrl/mt7623-pinfunc.h90
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/aer.h1
-rw-r--r--include/linux/cfag12864b.h16
-rw-r--r--include/linux/ks0108.h16
-rw-r--r--include/linux/mlx5/device.h12
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h52
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/of_pci.h34
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-epc.h8
-rw-r--r--include/linux/pci-epf.h4
-rw-r--r--include/linux/pci.h21
-rw-r--r--include/linux/pci_hotplug.h18
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/ras/ras_event.h22
-rw-r--r--include/rdma/ib_addr.h20
-rw-r--r--include/rdma/ib_cache.h1
-rw-r--r--include/rdma/ib_verbs.h102
-rw-r--r--include/rdma/rdma_cm.h3
-rw-r--r--include/rdma/rdma_vt.h7
-rw-r--r--include/rdma/rdmavt_cq.h5
-rw-r--r--include/rdma/rdmavt_qp.h1
-rw-r--r--include/rdma/restrack.h22
-rw-r--r--include/rdma/uverbs_ioctl.h11
-rw-r--r--include/uapi/linux/audit.h1
-rw-r--r--include/uapi/linux/pci_regs.h6
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h21
-rw-r--r--include/uapi/rdma/ib_user_verbs.h63
-rw-r--r--include/uapi/rdma/mlx5-abi.h30
-rw-r--r--include/uapi/rdma/rdma_netlink.h30
-rw-r--r--mm/memory.c7
-rw-r--r--samples/auxdisplay/cfag12864b-example.c16
-rw-r--r--scripts/Makefile.lib3
-rw-r--r--scripts/dtc/checks.c199
-rw-r--r--scripts/dtc/dtc-lexer.l7
-rw-r--r--scripts/dtc/dtc-parser.y39
-rw-r--r--scripts/dtc/dtc.h4
-rw-r--r--scripts/dtc/livetree.c26
-rw-r--r--scripts/dtc/version_gen.h2
-rw-r--r--security/integrity/evm/Kconfig11
-rw-r--r--security/integrity/evm/evm.h7
-rw-r--r--security/integrity/evm/evm_crypto.c10
-rw-r--r--security/integrity/evm/evm_main.c79
-rw-r--r--security/integrity/evm/evm_secfs.c206
-rw-r--r--security/integrity/iint.c18
-rw-r--r--security/integrity/ima/ima.h1
-rw-r--r--security/integrity/ima/ima_fs.c18
-rw-r--r--security/integrity/ima/ima_kexec.c2
-rw-r--r--security/integrity/ima/ima_main.c19
-rw-r--r--security/integrity/ima/ima_policy.c70
-rw-r--r--security/integrity/ima/ima_template_lib.c2
-rw-r--r--security/integrity/integrity.h2
-rw-r--r--security/smack/smack_lsm.c12
461 files changed, 24110 insertions, 8337 deletions
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
index d12cb2eae9ee6..201d10319fa18 100644
--- a/Documentation/ABI/testing/evm
+++ b/Documentation/ABI/testing/evm
@@ -57,3 +57,16 @@ Description:
dracut (via 97masterkey and 98integrity) and systemd (via
core/ima-setup) have support for loading keys at boot
time.
+
+What: security/integrity/evm/evm_xattrs
+Date: April 2018
+Contact: Matthew Garrett <mjg59@google.com>
+Description:
+ Shows the set of extended attributes used to calculate or
+ validate the EVM signature, and allows additional attributes
+ to be added at runtime. Any signatures generated after
+ additional attributes are added (and on files posessing those
+ additional attributes) will only be valid if the same
+ additional attributes are configured on system boot. Writing
+ a single period (.) will lock the xattr list from any further
+ modification.
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index b8465e00ba5fb..74c6702de74e2 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -21,7 +21,7 @@ Description:
audit | hash | dont_hash
condition:= base | lsm [option]
base: [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
- [euid=] [fowner=]]
+ [euid=] [fowner=] [fsname=]]
lsm: [[subj_user=] [subj_role=] [subj_type=]
[obj_user=] [obj_role=] [obj_type=]]
option: [[appraise_type=]] [permit_directio]
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index 0b6bb3ef449ee..688b69121e829 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -110,7 +110,7 @@ The actual steps taken by a platform to recover from a PCI error
event will be platform-dependent, but will follow the general
sequence described below.
-STEP 0: Error Event
+STEP 0: Error Event: ERR_NONFATAL
-------------------
A PCI bus error is detected by the PCI hardware. On powerpc, the slot
is isolated, in that all I/O is blocked: all reads return 0xffffffff,
@@ -228,13 +228,7 @@ proceeds to either STEP3 (Link Reset) or to STEP 5 (Resume Operations).
If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform
proceeds to STEP 4 (Slot Reset)
-STEP 3: Link Reset
-------------------
-The platform resets the link. This is a PCI-Express specific step
-and is done whenever a fatal error has been detected that can be
-"solved" by resetting the link.
-
-STEP 4: Slot Reset
+STEP 3: Slot Reset
------------------
In response to a return value of PCI_ERS_RESULT_NEED_RESET, the
@@ -320,7 +314,7 @@ Failure).
>>> However, it probably should.
-STEP 5: Resume Operations
+STEP 4: Resume Operations
-------------------------
The platform will call the resume() callback on all affected device
drivers if all drivers on the segment have returned
@@ -332,7 +326,7 @@ a result code.
At this point, if a new error happens, the platform will restart
a new error recovery sequence.
-STEP 6: Permanent Failure
+STEP 5: Permanent Failure
-------------------------
A "permanent failure" has occurred, and the platform cannot recover
the device. The platform will call error_detected() with a
@@ -355,6 +349,27 @@ errors. See the discussion in powerpc/eeh-pci-error-recovery.txt
for additional detail on real-life experience of the causes of
software errors.
+STEP 0: Error Event: ERR_FATAL
+-------------------
+PCI bus error is detected by the PCI hardware. On powerpc, the slot is
+isolated, in that all I/O is blocked: all reads return 0xffffffff, all
+writes are ignored.
+
+STEP 1: Remove devices
+--------------------
+Platform removes the devices depending on the error agent, it could be
+this port for all subordinates or upstream component (likely downstream
+port)
+
+STEP 2: Reset link
+--------------------
+The platform resets the link. This is a PCI-Express specific step and is
+done whenever a fatal error has been detected that can be "solved" by
+resetting the link.
+
+STEP 3: Re-enumerate the devices
+--------------------
+Initiates the re-enumeration.
Conclusion; General Remarks
---------------------------
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1beb30d8d7fc1..20cc45602f456 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3162,6 +3162,8 @@
on: Turn realloc on
realloc same as realloc=on
noari do not use PCIe ARI.
+ noats [PCIE, Intel-IOMMU, AMD-IOMMU]
+ do not use PCIe ATS (and IOMMU device IOTLB).
pcie_scan_all Scan all possible PCIe devices. Otherwise we
only look for one device below a PCIe downstream
port.
diff --git a/Documentation/misc-devices/lcd-panel-cgram.txt b/Documentation/auxdisplay/lcd-panel-cgram.txt
index 7f82c905763d2..7f82c905763d2 100644
--- a/Documentation/misc-devices/lcd-panel-cgram.txt
+++ b/Documentation/auxdisplay/lcd-panel-cgram.txt
diff --git a/Documentation/devicetree/bindings/arm/ux500/boards.txt b/Documentation/devicetree/bindings/arm/ux500/boards.txt
index 7334c24625fcc..0fa429534f491 100644
--- a/Documentation/devicetree/bindings/arm/ux500/boards.txt
+++ b/Documentation/devicetree/bindings/arm/ux500/boards.txt
@@ -26,7 +26,7 @@ interrupt-controller:
see binding for interrupt-controller/arm,gic.txt
timer:
- see binding for arm/twd.txt
+ see binding for timer/arm,twd.txt
clocks:
see binding for clocks/ux500.txt
diff --git a/Documentation/devicetree/bindings/dma/k3dma.txt b/Documentation/devicetree/bindings/dma/k3dma.txt
index 23f8d712c3ce5..4945aeac4dc4f 100644
--- a/Documentation/devicetree/bindings/dma/k3dma.txt
+++ b/Documentation/devicetree/bindings/dma/k3dma.txt
@@ -23,7 +23,6 @@ Controller:
dma-requests = <27>;
interrupts = <0 12 4>;
clocks = <&pclk>;
- status = "disable";
};
Client:
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 66026dcf53e12..3f15f66445273 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -190,7 +190,6 @@ mmc0: mmc@23000000 {
power-domains = <&k2g_pds 0xb>;
clocks = <&k2g_clks 0xb 1>, <&k2g_clks 0xb 2>;
clock-names = "fck", "mmchsdb_fck";
- status = "disabled";
};
------------------------------------------------------------------------------
diff --git a/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt b/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
index a767259dedadc..1e442450747f9 100644
--- a/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
+++ b/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
@@ -11,6 +11,10 @@ Optional properties:
- trans-gpios = <gpio-descriptor>; : GPIO for voltage translator enable
- mux-gpios = <gpio-descriptor>; : GPIO for pin multiplexing with other
functions (eg, external FSI masters)
+ - no-gpio-delays; : Don't add extra delays between GPIO
+ accesses. This is useful when the HW
+ GPIO block is running at a low enough
+ frequency.
Examples:
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
index 6c49db7f8ad25..6c49db7f8ad25 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
index 16964f0c17733..6e8a9ab0fdaeb 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
@@ -10,6 +10,8 @@ platforms.
Definition: must be one of:
"qcom,msm8916-apcs-kpss-global",
"qcom,msm8996-apcs-hmss-global"
+ "qcom,msm8998-apcs-hmss-global"
+ "qcom,sdm845-apss-shared"
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt b/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt
new file mode 100644
index 0000000000000..1d2b7fee7b853
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt
@@ -0,0 +1,47 @@
+* STMicroelectronics STM32 IPCC (Inter-Processor Communication Controller)
+
+The IPCC block provides a non blocking signaling mechanism to post and
+retrieve messages in an atomic way between two processors.
+It provides the signaling for N bidirectionnal channels. The number of channels
+(N) can be read from a dedicated register.
+
+Required properties:
+- compatible: Must be "st,stm32mp1-ipcc"
+- reg: Register address range (base address and length)
+- st,proc-id: Processor id using the mailbox (0 or 1)
+- clocks: Input clock
+- interrupt-names: List of names for the interrupts described by the interrupt
+ property. Must contain the following entries:
+ - "rx"
+ - "tx"
+ - "wakeup"
+- interrupts: Interrupt specifiers for "rx channel occupied", "tx channel
+ free" and "system wakeup".
+- #mbox-cells: Number of cells required for the mailbox specifier. Must be 1.
+ The data contained in the mbox specifier of the "mboxes"
+ property in the client node is the mailbox channel index.
+
+Optional properties:
+- wakeup-source: Flag to indicate whether this device can wake up the system
+
+
+
+Example:
+ ipcc: mailbox@4c001000 {
+ compatible = "st,stm32mp1-ipcc";
+ #mbox-cells = <1>;
+ reg = <0x4c001000 0x400>;
+ st,proc-id = <0>;
+ interrupts-extended = <&intc GIC_SPI 100 IRQ_TYPE_NONE>,
+ <&intc GIC_SPI 101 IRQ_TYPE_NONE>,
+ <&aiec 62 1>;
+ interrupt-names = "rx", "tx", "wakeup";
+ clocks = <&rcc_clk IPCC>;
+ wakeup-source;
+ }
+
+Client:
+ mbox_test {
+ ...
+ mboxes = <&ipcc 0>, <&ipcc 1>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt
index f9632bacbd04a..f9632bacbd04a 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt
diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
index a086f1e1cdd78..7f0822b4beae4 100644
--- a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
+++ b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt
@@ -61,7 +61,6 @@ Example of the RCU bindings on a xRX200 SoC:
usb_phy0: usb2-phy@18 {
compatible = "lantiq,xrx200-usb2-phy";
reg = <0x18 4>, <0x38 4>;
- status = "disabled";
resets = <&reset1 4 4>, <&reset0 4 4>;
reset-names = "phy", "ctrl";
@@ -71,7 +70,6 @@ Example of the RCU bindings on a xRX200 SoC:
usb_phy1: usb2-phy@34 {
compatible = "lantiq,xrx200-usb2-phy";
reg = <0x34 4>, <0x3C 4>;
- status = "disabled";
resets = <&reset1 5 4>, <&reset0 4 4>;
reset-names = "phy", "ctrl";
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index ee978c95189d7..839f469f4525b 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -69,7 +69,6 @@ Example: R8A7790 (R-Car H2) SDHI controller nodes
max-frequency = <195000000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 314>;
- status = "disabled";
};
sdhi1: sd@ee120000 {
@@ -83,7 +82,6 @@ Example: R8A7790 (R-Car H2) SDHI controller nodes
max-frequency = <195000000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 313>;
- status = "disabled";
};
sdhi2: sd@ee140000 {
@@ -97,7 +95,6 @@ Example: R8A7790 (R-Car H2) SDHI controller nodes
max-frequency = <97500000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 312>;
- status = "disabled";
};
sdhi3: sd@ee160000 {
@@ -111,5 +108,4 @@ Example: R8A7790 (R-Car H2) SDHI controller nodes
max-frequency = <97500000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
resets = <&cpg 311>;
- status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt b/Documentation/devicetree/bindings/mtd/ibm,ndfc.txt
index 869f0b5f16e88..869f0b5f16e88 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt
+++ b/Documentation/devicetree/bindings/mtd/ibm,ndfc.txt
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
index 1c88526dedfc2..c5ba6a4ba1f2f 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
@@ -20,7 +20,6 @@ Required NFI properties:
- interrupts: Interrupts of NFI.
- clocks: NFI required clocks.
- clock-names: NFI clocks internal name.
-- status: Disabled default. Then set "okay" by platform.
- ecc-engine: Required ECC Engine node.
- #address-cells: NAND chip index, should be 1.
- #size-cells: Should be 0.
@@ -34,7 +33,6 @@ Example:
clocks = <&pericfg CLK_PERI_NFI>,
<&pericfg CLK_PERI_NFI_PAD>;
clock-names = "nfi_clk", "pad_clk";
- status = "disabled";
ecc-engine = <&bch>;
#address-cells = <1>;
#size-cells = <0>;
@@ -152,7 +150,6 @@ Required BCH properties:
- interrupts: Interrupts of ECC.
- clocks: ECC required clocks.
- clock-names: ECC clocks internal name.
-- status: Disabled default. Then set "okay" by platform.
Example:
@@ -162,5 +159,4 @@ Example:
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_NFI_ECC>;
clock-names = "nfiecc_clk";
- status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt b/Documentation/devicetree/bindings/net/ibm,emac.txt
index 44b842b6ca154..44b842b6ca154 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
+++ b/Documentation/devicetree/bindings/net/ibm,emac.txt
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 890526dbfc264..fac897d54423c 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -21,6 +21,7 @@ Required properties:
- "renesas,etheravb-r8a77965" for the R8A77965 SoC.
- "renesas,etheravb-r8a77970" for the R8A77970 SoC.
- "renesas,etheravb-r8a77980" for the R8A77980 SoC.
+ - "renesas,etheravb-r8a77990" for the R8A77990 SoC.
- "renesas,etheravb-r8a77995" for the R8A77995 SoC.
- "renesas,etheravb-rcar-gen3" as a fallback for the above
R-Car Gen3 devices.
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index 1da7ade3183c8..c124f9bc11f3a 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -1,7 +1,9 @@
* Synopsys DesignWare PCIe interface
Required properties:
-- compatible: should contain "snps,dw-pcie" to identify the core.
+- compatible:
+ "snps,dw-pcie" for RC mode;
+ "snps,dw-pcie-ep" for EP mode;
- reg: Should contain the configuration address space.
- reg-names: Must be "config" for the PCIe configuration space.
(The old way of getting the configuration address space from "ranges"
@@ -41,11 +43,11 @@ EP mode:
Example configuration:
- pcie: pcie@dffff000 {
+ pcie: pcie@dfc00000 {
compatible = "snps,dw-pcie";
- reg = <0xdffff000 0x1000>, /* Controller registers */
- <0xd0000000 0x2000>; /* PCI config space */
- reg-names = "ctrlreg", "config";
+ reg = <0xdfc00000 0x0001000>, /* IP registers */
+ <0xd0000000 0x0002000>; /* Configuration space */
+ reg-names = "dbi", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
@@ -54,5 +56,15 @@ Example configuration:
interrupts = <25>, <24>;
#interrupt-cells = <1>;
num-lanes = <1>;
- num-viewport = <3>;
+ };
+or
+ pcie: pcie@dfc00000 {
+ compatible = "snps,dw-pcie-ep";
+ reg = <0xdfc00000 0x0001000>, /* IP registers 1 */
+ <0xdfc01000 0x0001000>, /* IP registers 2 */
+ <0xd0000000 0x2000000>; /* Configuration space */
+ reg-names = "dbi", "dbi2", "addr_space";
+ num-ib-windows = <6>;
+ num-ob-windows = <2>;
+ num-lanes = <1>;
};
diff --git a/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
new file mode 100644
index 0000000000000..65038aa642e5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
@@ -0,0 +1,73 @@
+* Mobiveil AXI PCIe Root Port Bridge DT description
+
+Mobiveil's GPEX 4.0 is a PCIe Gen4 root port bridge IP. This configurable IP
+has up to 8 outbound and inbound windows for the address translation.
+
+Required properties:
+- #address-cells: Address representation for root ports, set to <3>
+- #size-cells: Size representation for root ports, set to <2>
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- compatible: Should contain "mbvl,gpex40-pcie"
+- reg: Should contain PCIe registers location and length
+ "config_axi_slave": PCIe controller registers
+ "csr_axi_slave" : Bridge config registers
+ "gpio_slave" : GPIO registers to control slot power
+ "apb_csr" : MSI registers
+
+- device_type: must be "pci"
+- apio-wins : number of requested apio outbound windows
+ default 2 outbound windows are configured -
+ 1. Config window
+ 2. Memory window
+- ppio-wins : number of requested ppio inbound windows
+ default 1 inbound memory window is configured.
+- bus-range: PCI bus numbers covered
+- interrupt-controller: identifies the node as an interrupt controller
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- interrupt-parent : phandle to the interrupt controller that
+ it is attached to, it should be set to gic to point to
+ ARM's Generic Interrupt Controller node in system DT.
+- interrupts: The interrupt line of the PCIe controller
+ last cell of this field is set to 4 to
+ denote it as IRQ_TYPE_LEVEL_HIGH type interrupt.
+- interrupt-map-mask,
+ interrupt-map: standard PCI properties to define the mapping of the
+ PCI interface to interrupt numbers.
+- ranges: ranges for the PCI memory regions (I/O space region is not
+ supported by hardware)
+ Please refer to the standard PCI bus binding document for a more
+ detailed explanation
+
+
+Example:
+++++++++
+ pcie0: pcie@a0000000 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ compatible = "mbvl,gpex40-pcie";
+ reg = <0xa0000000 0x00001000>,
+ <0xb0000000 0x00010000>,
+ <0xff000000 0x00200000>,
+ <0xb0010000 0x00001000>;
+ reg-names = "config_axi_slave",
+ "csr_axi_slave",
+ "gpio_slave",
+ "apb_csr";
+ device_type = "pci";
+ apio-wins = <2>;
+ ppio-wins = <1>;
+ bus-range = <0x00000000 0x000000ff>;
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <1>;
+ interrupts = < 0 89 4 >;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 0 &pci_express 0>,
+ <0 0 0 1 &pci_express 1>,
+ <0 0 0 2 &pci_express 2>,
+ <0 0 0 3 &pci_express 3>;
+ ranges = < 0x83000000 0 0x00000000 0xa8000000 0 0x8000000>;
+
+ };
diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
index c1e4c3d10a747..9e3fc15e1af88 100644
--- a/Documentation/devicetree/bindings/pci/pci-armada8k.txt
+++ b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
@@ -12,7 +12,10 @@ Required properties:
- "ctrl" for the control register region
- "config" for the config space region
- interrupts: Interrupt specifier for the PCIe controler
-- clocks: reference to the PCIe controller clock
+- clocks: reference to the PCIe controller clocks
+- clock-names: mandatory if there is a second clock, in this case the
+ name must be "core" for the first clock and "reg" for the second
+ one
Example:
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 1fb614e615da3..a5f7fc62d10e9 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -8,6 +8,7 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
"renesas,pcie-r8a7793" for the R8A7793 SoC;
"renesas,pcie-r8a7795" for the R8A7795 SoC;
"renesas,pcie-r8a7796" for the R8A7796 SoC;
+ "renesas,pcie-r8a77980" for the R8A77980 SoC;
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or
RZ/G1 compatible device.
"renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device.
@@ -32,6 +33,11 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
and PCIe bus clocks.
- clock-names: from common clock binding: should be "pcie" and "pcie_bus".
+Optional properties:
+- phys: from common PHY binding: PHY phandle and specifier (only make sense
+ for R-Car gen3 SoCs where the PCIe PHYs have their own register blocks).
+- phy-names: from common PHY binding: should be "pcie".
+
Example:
SoC-specific DT Entry:
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt
new file mode 100644
index 0000000000000..778467307a932
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie-ep.txt
@@ -0,0 +1,62 @@
+* Rockchip AXI PCIe Endpoint Controller DT description
+
+Required properties:
+- compatible: Should contain "rockchip,rk3399-pcie-ep"
+- reg: Two register ranges as listed in the reg-names property
+- reg-names: Must include the following names
+ - "apb-base"
+ - "mem-base"
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - "aclk"
+ - "aclk-perf"
+ - "hclk"
+ - "pm"
+- resets: Must contain seven entries for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following names
+ - "core"
+ - "mgmt"
+ - "mgmt-sticky"
+ - "pipe"
+ - "pm"
+ - "aclk"
+ - "pclk"
+- pinctrl-names : The pin control state names
+- pinctrl-0: The "default" pinctrl state
+- phys: Must contain an phandle to a PHY for each entry in phy-names.
+- phy-names: Must include 4 entries for all 4 lanes even if some of
+ them won't be used for your cases. Entries are of the form "pcie-phy-N":
+ where N ranges from 0 to 3.
+ (see example below and you MUST also refer to ../phy/rockchip-pcie-phy.txt
+ for changing the #phy-cells of phy node to support it)
+- rockchip,max-outbound-regions: Maximum number of outbound regions
+
+Optional Property:
+- num-lanes: number of lanes to use
+- max-functions: Maximum number of functions that can be configured (default 1).
+
+pcie0-ep: pcie@f8000000 {
+ compatible = "rockchip,rk3399-pcie-ep";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ rockchip,max-outbound-regions = <16>;
+ clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
+ <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
+ clock-names = "aclk", "aclk-perf",
+ "hclk", "pm";
+ max-functions = /bits/ 8 <8>;
+ num-lanes = <4>;
+ reg = <0x0 0xfd000000 0x0 0x1000000>, <0x0 0x80000000 0x0 0x20000>;
+ reg-names = "apb-base", "mem-base";
+ resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE> ,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>, <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
+ phys = <&pcie_phy 0>, <&pcie_phy 1>, <&pcie_phy 2>, <&pcie_phy 3>;
+ phy-names = "pcie-phy-0", "pcie-phy-1", "pcie-phy-2", "pcie-phy-3";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreq>;
+};
diff --git a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt b/Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt
index af34c65773fd7..af34c65773fd7 100644
--- a/Documentation/devicetree/bindings/pci/rockchip-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/rockchip-pcie-host.txt
diff --git a/Documentation/devicetree/bindings/pci/xgene-pci.txt b/Documentation/devicetree/bindings/pci/xgene-pci.txt
index 6fd2decfa66c4..92490330dc1c7 100644
--- a/Documentation/devicetree/bindings/pci/xgene-pci.txt
+++ b/Documentation/devicetree/bindings/pci/xgene-pci.txt
@@ -25,8 +25,6 @@ Optional properties:
Example:
-SoC-specific DT Entry:
-
pcie0: pcie@1f2b0000 {
status = "disabled";
device_type = "pci";
@@ -50,8 +48,3 @@ SoC-specific DT Entry:
clocks = <&pcie0clk 0>;
};
-
-Board-specific DT Entry:
- &pcie0 {
- status = "ok";
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
index fb87c7d74f2e5..8fb5a53775e88 100644
--- a/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
@@ -8,6 +8,17 @@ Required Properties:
- reg: Should contain the register base address and size of
the pin controller.
- clocks: phandle of the clock feeding the pin controller
+- gpio-controller: Marks the device node as a GPIO controller.
+- gpio-ranges: Specifies the mapping between gpio controller and
+ pin-controller pins.
+- #gpio-cells: Should be two. The first cell is the gpio pin number
+ and the second cell is used for optional parameters.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt. Shall be set to 2. The first cell
+ defines the interrupt number, the second encodes
+ the trigger flags described in
+ bindings/interrupt-controller/interrupts.txt
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
@@ -164,6 +175,11 @@ Example:
compatible = "actions,s900-pinctrl";
reg = <0x0 0xe01b0000 0x0 0x1000>;
clocks = <&cmu CLK_GPIO>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 146>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
uart2-default: uart2-default {
pinmux {
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 64bc5c2a76da6..258a4648ab813 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -28,6 +28,7 @@ Required properties:
"allwinner,sun50i-a64-r-pinctrl"
"allwinner,sun50i-h5-pinctrl"
"allwinner,sun50i-h6-pinctrl"
+ "allwinner,sun50i-h6-r-pinctrl"
"nextthing,gr8-pinctrl"
- reg: Should contain the register physical address and length for the
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
index 2569866c692f4..3fac0a061bcc7 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
@@ -36,6 +36,24 @@ listed. In other words, a subnode that lists only a mux function implies no
information about any pull configuration. Similarly, a subnode that lists only
a pul parameter implies no information about the mux function.
+The BCM2835 pin configuration and multiplexing supports the generic bindings.
+For details on each properties, you can refer to ./pinctrl-bindings.txt.
+
+Required sub-node properties:
+ - pins
+ - function
+
+Optional sub-node properties:
+ - bias-disable
+ - bias-pull-up
+ - bias-pull-down
+ - output-high
+ - output-low
+
+Legacy pin configuration and multiplexing binding:
+*** (Its use is deprecated, use generic multiplexing and configuration
+bindings instead)
+
Required subnode-properties:
- brcm,pins: An array of cells. Each cell contains the ID of a pin. Valid IDs
are the integer GPIO IDs; 0==GPIO0, 1==GPIO1, ... 53==GPIO53.
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 2c12f9789116c..54ecb8ab77883 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -3,8 +3,10 @@
Required properties for the root node:
- compatible: one of "amlogic,meson8-cbus-pinctrl"
"amlogic,meson8b-cbus-pinctrl"
+ "amlogic,meson8m2-cbus-pinctrl"
"amlogic,meson8-aobus-pinctrl"
"amlogic,meson8b-aobus-pinctrl"
+ "amlogic,meson8m2-aobus-pinctrl"
"amlogic,meson-gxbb-periphs-pinctrl"
"amlogic,meson-gxbb-aobus-pinctrl"
"amlogic,meson-gxl-periphs-pinctrl"
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
index a5a8322a31bda..a677145ae6d1f 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
@@ -18,7 +18,9 @@ Required properties:
removed.
- #gpio-cells : Should be two.
- first cell is the pin number
- - second cell is used to specify flags. Flags are currently unused.
+ - second cell is used to specify flags as described in
+ 'Documentation/devicetree/bindings/gpio/gpio.txt'. Allowed values defined by
+ 'include/dt-bindings/gpio/gpio.h' (e.g. GPIO_ACTIVE_LOW).
- gpio-controller : Marks the device node as a GPIO controller.
- reg : For an address on its bus. I2C uses this a the I2C address of the chip.
SPI uses this to specify the chipselect line which the chip is
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
index f18ed99f6e145..def8fcad89414 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
@@ -9,6 +9,16 @@ Required properties for the root node:
- #gpio-cells: Should be two. The first cell is the pin number and the
second is the GPIO flags.
+Optional properties:
+- interrupt-controller : Marks the device node as an interrupt controller
+
+If the property interrupt-controller is defined, following property is required
+- reg-names: A string describing the "reg" entries. Must contain "eint".
+- interrupts : The interrupt output from the controller.
+- #interrupt-cells: Should be two.
+- interrupt-parent: Phandle of the interrupt parent to which the external
+ GPIO interrupts are forwarded to.
+
Please refer to pinctrl-bindings.txt in this directory for details of the
common pinctrl bindings used by client devices, including the meaning of the
phrase "pin configuration node".
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 892d8fd7b7003..abd8fbcf1e62d 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -15,6 +15,7 @@ Required Properties:
- "renesas,pfc-r8a7740": for R8A7740 (R-Mobile A1) compatible pin-controller.
- "renesas,pfc-r8a7743": for R8A7743 (RZ/G1M) compatible pin-controller.
- "renesas,pfc-r8a7745": for R8A7745 (RZ/G1E) compatible pin-controller.
+ - "renesas,pfc-r8a77470": for R8A77470 (RZ/G1C) compatible pin-controller.
- "renesas,pfc-r8a7778": for R8A7778 (R-Car M1) compatible pin-controller.
- "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
- "renesas,pfc-r8a7790": for R8A7790 (R-Car H2) compatible pin-controller.
@@ -27,6 +28,7 @@ Required Properties:
- "renesas,pfc-r8a77965": for R8A77965 (R-Car M3-N) compatible pin-controller.
- "renesas,pfc-r8a77970": for R8A77970 (R-Car V3M) compatible pin-controller.
- "renesas,pfc-r8a77980": for R8A77980 (R-Car V3H) compatible pin-controller.
+ - "renesas,pfc-r8a77990": for R8A77990 (R-Car E3) compatible pin-controller.
- "renesas,pfc-r8a77995": for R8A77995 (R-Car D3) compatible pin-controller.
- "renesas,pfc-sh73a0": for SH73A0 (SH-Mobile AG5) compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index a01a3b8a23632..0919db294c174 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -20,6 +20,7 @@ defined as gpio sub-nodes of the pinmux controller.
Required properties for iomux controller:
- compatible: should be
+ "rockchip,px30-pinctrl": for Rockchip PX30
"rockchip,rv1108-pinctrl": for Rockchip RV1108
"rockchip,rk2928-pinctrl": for Rockchip RK2928
"rockchip,rk3066a-pinctrl": for Rockchip RK3066a
diff --git a/Documentation/devicetree/bindings/pps/pps-gpio.txt b/Documentation/devicetree/bindings/pps/pps-gpio.txt
index 0de23b7936572..3683874832ae1 100644
--- a/Documentation/devicetree/bindings/pps/pps-gpio.txt
+++ b/Documentation/devicetree/bindings/pps/pps-gpio.txt
@@ -20,5 +20,4 @@ Example:
assert-falling-edge;
compatible = "pps-gpio";
- status = "okay";
};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
index 2e53324fb720d..5ccfcc82da083 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
@@ -2,7 +2,7 @@
Required properties:
- compatible: Shall contain "ti,omap-dmtimer-pwm".
-- ti,timers: phandle to PWM capable OMAP timer. See arm/omap/timer.txt for info
+- ti,timers: phandle to PWM capable OMAP timer. See timer/ti,timer.txt for info
about these timers.
- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt b/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt
index a13fbdb4bd88d..a13fbdb4bd88d 100644
--- a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+++ b/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt
diff --git a/Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt b/Documentation/devicetree/bindings/rng/sparc_sun_oracle_rng.txt
index b0b211194c714..b0b211194c714 100644
--- a/Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt
+++ b/Documentation/devicetree/bindings/rng/sparc_sun_oracle_rng.txt
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index 274058c583dde..de0d6090c0fd2 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -6,7 +6,14 @@ I. For patch submitters
0) Normal patch submission rules from Documentation/process/submitting-patches.rst
applies.
- 1) The Documentation/ portion of the patch should be a separate patch.
+ 1) The Documentation/ and include/dt-bindings/ portion of the patch should
+ be a separate patch. The preferred subject prefix for binding patches is:
+
+ "dt-bindings: <binding dir>: ..."
+
+ The 80 characters of the subject are precious. It is recommended to not
+ use "Documentation" or "doc" because that is implied. All bindings are
+ docs. Repeating "binding" again should also be avoided.
2) Submit the entire series to the devicetree mailinglist at
diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
index fdf5caa6229b4..39e7d4e61a63c 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
@@ -27,9 +27,9 @@ Example:
tsc: thermal@e6198000 {
compatible = "renesas,r8a7795-thermal";
- reg = <0 0xe6198000 0 0x68>,
- <0 0xe61a0000 0 0x5c>,
- <0 0xe61a8000 0 0x5c>;
+ reg = <0 0xe6198000 0 0x100>,
+ <0 0xe61a0000 0 0x100>,
+ <0 0xe61a8000 0 0x100>;
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/nios2/timer.txt b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
index 904a5846d7acd..904a5846d7acd 100644
--- a/Documentation/devicetree/bindings/nios2/timer.txt
+++ b/Documentation/devicetree/bindings/timer/altr,timer-1.0.txt
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/timer/arm,arch_timer.txt
index 68301b77e854c..68301b77e854c 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer.txt
diff --git a/Documentation/devicetree/bindings/arm/armv7m_systick.txt b/Documentation/devicetree/bindings/timer/arm,armv7m-systick.txt
index 7cf4a24601eb2..7cf4a24601eb2 100644
--- a/Documentation/devicetree/bindings/arm/armv7m_systick.txt
+++ b/Documentation/devicetree/bindings/timer/arm,armv7m-systick.txt
diff --git a/Documentation/devicetree/bindings/arm/global_timer.txt b/Documentation/devicetree/bindings/timer/arm,global_timer.txt
index bdae3a818793c..bdae3a818793c 100644
--- a/Documentation/devicetree/bindings/arm/global_timer.txt
+++ b/Documentation/devicetree/bindings/timer/arm,global_timer.txt
diff --git a/Documentation/devicetree/bindings/arm/twd.txt b/Documentation/devicetree/bindings/timer/arm,twd.txt
index 383ea19c2bf00..383ea19c2bf00 100644
--- a/Documentation/devicetree/bindings/arm/twd.txt
+++ b/Documentation/devicetree/bindings/timer/arm,twd.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
index 9a33efded4bc9..9a33efded4bc9 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt
+++ b/Documentation/devicetree/bindings/timer/fsl,gtm.txt
diff --git a/Documentation/devicetree/bindings/arm/mrvl/timer.txt b/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.txt
index 9a6e251462e72..9a6e251462e72 100644
--- a/Documentation/devicetree/bindings/arm/mrvl/timer.txt
+++ b/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.txt
diff --git a/Documentation/devicetree/bindings/arm/msm/timer.txt b/Documentation/devicetree/bindings/timer/qcom,msm-timer.txt
index 5e10c345548f5..5e10c345548f5 100644
--- a/Documentation/devicetree/bindings/arm/msm/timer.txt
+++ b/Documentation/devicetree/bindings/timer/qcom,msm-timer.txt
diff --git a/Documentation/devicetree/bindings/arm/spear-timer.txt b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
index c0017221cf556..c0017221cf556 100644
--- a/Documentation/devicetree/bindings/arm/spear-timer.txt
+++ b/Documentation/devicetree/bindings/timer/st,spear-timer.txt
diff --git a/Documentation/devicetree/bindings/c6x/timer64.txt b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
index 95911fe70224d..95911fe70224d 100644
--- a/Documentation/devicetree/bindings/c6x/timer64.txt
+++ b/Documentation/devicetree/bindings/timer/ti,c64x+timer64.txt
diff --git a/Documentation/devicetree/bindings/arm/omap/timer.txt b/Documentation/devicetree/bindings/timer/ti,timer.txt
index d02e27c764ecc..d02e27c764ecc 100644
--- a/Documentation/devicetree/bindings/arm/omap/timer.txt
+++ b/Documentation/devicetree/bindings/timer/ti,timer.txt
diff --git a/Documentation/devicetree/bindings/arm/vt8500/via,vt8500-timer.txt b/Documentation/devicetree/bindings/timer/via,vt8500-timer.txt
index 901c73f0d8ef0..901c73f0d8ef0 100644
--- a/Documentation/devicetree/bindings/arm/vt8500/via,vt8500-timer.txt
+++ b/Documentation/devicetree/bindings/timer/via,vt8500-timer.txt
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 36003832c2a83..4b38f3373f432 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -32,6 +32,7 @@ andestech Andes Technology Corporation
apm Applied Micro Circuits Corporation (APM)
aptina Aptina Imaging
arasan Arasan Chip Systems
+archermind ArcherMind Technology (Nanjing) Co., Ltd.
arctic Arctic Sand
aries Aries Embedded GmbH
arm ARM Ltd.
@@ -47,6 +48,7 @@ auvidea Auvidea GmbH
avago Avago Technologies
avia avia semiconductor
avic Shanghai AVIC Optoelectronics Co., Ltd.
+avnet Avnet, Inc.
axentia Axentia Technologies AB
axis Axis Communications AB
bananapi BIPAI KEJI LIMITED
@@ -186,6 +188,7 @@ khadas Khadas
kiebackpeter Kieback & Peter GmbH
kinetic Kinetic Technologies
kingnovel Kingnovel Technology Co., Ltd.
+koe Kaohsiung Opto-Electronics Inc.
kosagi Sutajio Ko-Usagi PTE Ltd.
kyo Kyocera Corporation
lacie LaCie
@@ -200,11 +203,13 @@ linaro Linaro Limited
linksys Belkin International, Inc. (Linksys)
linux Linux-specific binding
lltc Linear Technology Corporation
+logicpd Logic PD, Inc.
lsi LSI Corp. (LSI Logic)
lwn Liebherr-Werk Nenzing GmbH
macnica Macnica Americas
marvell Marvell Technology Group Ltd.
maxim Maxim Integrated Products
+mbvl Mobiveil Inc.
mcube mCube
meas Measurement Specialties
mediatek MediaTek Inc.
@@ -319,6 +324,7 @@ sgx SGX Sensortech
sharp Sharp Corporation
shimafuji Shimafuji Electric, Inc.
si-en Si-En Technology Ltd.
+sifive SiFive, Inc.
sigma Sigma Designs, Inc.
sii Seiko Instruments, Inc.
sil Silicon Image
@@ -394,6 +400,7 @@ vot Vision Optical Technology Co., Ltd.
wd Western Digital Corp.
wetek WeTek Electronics, limited.
wexler Wexler
+wi2wi Wi2Wi, Inc.
winbond Winbond Electronics corp.
winstar Winstar Display Corp.
wlf Wolfson Microelectronics
diff --git a/MAINTAINERS b/MAINTAINERS
index fff6439819c89..c9ac159fb0235 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1135,10 +1135,12 @@ F: arch/arm/mach-actions/
F: arch/arm/boot/dts/owl-*
F: arch/arm64/boot/dts/actions/
F: drivers/clocksource/owl-*
+F: drivers/pinctrl/actions/*
F: drivers/soc/actions/
F: include/dt-bindings/power/owl-*
F: include/linux/soc/actions/
F: Documentation/devicetree/bindings/arm/actions.txt
+F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
F: Documentation/devicetree/bindings/power/actions,owl-sps.txt
F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt
@@ -2531,8 +2533,6 @@ F: kernel/audit*
AUXILIARY DISPLAY DRIVERS
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/
F: include/linux/cfag12864b.h
@@ -3382,16 +3382,12 @@ F: include/linux/usb/wusb*
CFAG12864B LCD DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/cfag12864b.c
F: include/linux/cfag12864b.h
CFAG12864BFB LCD FRAMEBUFFER DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/cfag12864bfb.c
F: include/linux/cfag12864b.h
@@ -7975,8 +7971,6 @@ F: kernel/kprobes.c
KS0108 LCD CONTROLLER DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: Documentation/auxdisplay/ks0108
F: drivers/auxdisplay/ks0108.c
@@ -9484,6 +9478,13 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/dvb-frontends/mn88473*
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L: linux-pci@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F: drivers/pci/host/pcie-mobiveil.c
+
MODULE SUPPORT
M: Jessica Yu <jeyu@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -10509,12 +10510,14 @@ F: drivers/infiniband/ulp/opa_vnic
OPEN FIRMWARE AND DEVICE TREE OVERLAYS
M: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
+M: Frank Rowand <frowand.list@gmail.com>
L: devicetree@vger.kernel.org
S: Maintained
F: Documentation/devicetree/dynamic-resolution-notes.txt
F: Documentation/devicetree/overlay-notes.txt
F: drivers/of/overlay.c
F: drivers/of/resolver.c
+K: of_overlay_notifier_
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh+dt@kernel.org>
@@ -10826,9 +10829,9 @@ F: Documentation/devicetree/bindings/pci/cdns,*.txt
F: drivers/pci/cadence/pcie-cadence*
PCI DRIVER FOR FREESCALE LAYERSCAPE
-M: Minghuan Lian <minghuan.Lian@freescale.com>
-M: Mingkai Hu <mingkai.hu@freescale.com>
-M: Roy Zang <tie-fei.zang@freescale.com>
+M: Minghuan Lian <minghuan.Lian@nxp.com>
+M: Mingkai Hu <mingkai.hu@nxp.com>
+M: Roy Zang <roy.zang@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org
@@ -11054,8 +11057,8 @@ M: Shawn Lin <shawn.lin@rock-chips.com>
L: linux-pci@vger.kernel.org
L: linux-rockchip@lists.infradead.org
S: Maintained
-F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt
-F: drivers/pci/host/pcie-rockchip.c
+F: Documentation/devicetree/bindings/pci/rockchip-pcie*
+F: drivers/pci/host/pcie-rockchip*
PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC
M: Linus Walleij <linus.walleij@linaro.org>
@@ -11218,6 +11221,7 @@ L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
+F: drivers/pinctrl/mediatek/mtk-eint.*
F: drivers/pinctrl/mediatek/pinctrl-mtk-common.*
F: drivers/pinctrl/mediatek/pinctrl-mt2701.c
F: drivers/pinctrl/mediatek/pinctrl-mt7622.c
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index ecf613761e78e..17ea72b1b3898 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -78,7 +78,8 @@ CONFIG_PCIE_ARMADA_8K=y
CONFIG_PCI_AARDVARK=y
CONFIG_PCI_TEGRA=y
CONFIG_PCIE_RCAR=y
-CONFIG_PCIE_ROCKCHIP=m
+CONFIG_PCIE_ROCKCHIP=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCI_XGENE=y
CONFIG_PCI_HOST_THUNDER_PEM=y
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 66cf3a5a2f831..859c19828dd44 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -61,10 +61,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
#define HAVE_PCI_LEGACY 1
-extern void pcibios_claim_one_bus(struct pci_bus *b);
-
-extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
-
extern void pcibios_resource_survey(void);
struct file;
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 161f9758c631b..f34346d560954 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -915,67 +915,6 @@ void __init pcibios_resource_survey(void)
pci_assign_unassigned_resources();
}
-/* This is used by the PCI hotplug driver to allocate resource
- * of newly plugged busses. We can try to consolidate with the
- * rest of the code later, for now, keep it as-is as our main
- * resource allocation function doesn't deal with sub-trees yet.
- */
-void pcibios_claim_one_bus(struct pci_bus *bus)
-{
- struct pci_dev *dev;
- struct pci_bus *child_bus;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *r = &dev->resource[i];
-
- if (r->parent || !r->start || !r->flags)
- continue;
-
- pr_debug("PCI: Claiming %s: ", pci_name(dev));
- pr_debug("Resource %d: %016llx..%016llx [%x]\n",
- i, (unsigned long long)r->start,
- (unsigned long long)r->end,
- (unsigned int)r->flags);
-
- if (pci_claim_resource(dev, i) == 0)
- continue;
-
- pci_claim_bridge_resource(dev, i);
- }
- }
-
- list_for_each_entry(child_bus, &bus->children, node)
- pcibios_claim_one_bus(child_bus);
-}
-EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
-
-
-/* pcibios_finish_adding_to_bus
- *
- * This is to be called by the hotplug code after devices have been
- * added to a bus, this include calling it for a PHB that is just
- * being added
- */
-void pcibios_finish_adding_to_bus(struct pci_bus *bus)
-{
- pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
-
- /* Allocate bus and devices resources */
- pcibios_allocate_bus_resources(bus);
- pcibios_claim_one_bus(bus);
-
- /* Add new devices to global lists. Register in proc, sysfs. */
- pci_bus_add_devices(bus);
-
- /* Fixup EEH */
- /* eeh_add_device_tree_late(bus); */
-}
-EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
-
static void pcibios_setup_phb_resources(struct pci_controller *hose,
struct list_head *resources)
{
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 0c65c38e05d6c..f1e92bf743c27 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -263,9 +263,8 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
if (!r->start && r->end) {
- printk(KERN_ERR "PCI: Device %s not available "
- "because of resource collisions\n",
- pci_name(dev));
+ pci_err(dev,
+ "can't enable device: resource collisions\n");
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
@@ -274,8 +273,7 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
+ pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index 15b59169c535a..e5e5ff6b9a5c5 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -60,50 +60,30 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
pci_bus_add_devices(root_bus);
}
-void pcibios_fixup_bus(struct pci_bus *pbus)
+int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- struct pci_dev *dev;
- int i, has_io, has_mem;
- u16 cmd;
+ u16 cmd, oldcmd;
+ int i;
- list_for_each_entry(dev, &pbus->devices, bus_list) {
- /*
- * We can not rely on that the bootloader has enabled I/O
- * or memory access to PCI devices. Instead we enable it here
- * if the device has BARs of respective type.
- */
- has_io = has_mem = 0;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- unsigned long f = dev->resource[i].flags;
- if (f & IORESOURCE_IO)
- has_io = 1;
- else if (f & IORESOURCE_MEM)
- has_mem = 1;
- }
- /* ROM BARs are mapped into 32-bit memory space */
- if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
- dev->resource[PCI_ROM_RESOURCE].flags |=
- IORESOURCE_ROM_ENABLE;
- has_mem = 1;
- }
- pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
- if (has_io && !(cmd & PCI_COMMAND_IO)) {
-#ifdef CONFIG_PCI_DEBUG
- printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
- pci_name(dev));
-#endif
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<i)))
+ continue;
+
+ if (res->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
- pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
- cmd);
- }
- if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
-#ifdef CONFIG_PCI_DEBUG
- printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
- "%s\n", pci_name(dev));
-#endif
+ if (res->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
- pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
- cmd);
- }
}
+
+ if (cmd != oldcmd) {
+ pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 41b20edb427d7..17ea16a1337ce 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -214,8 +214,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
if (!addrs)
return;
if (ofpci_verbose)
- printk(" parse addresses (%d bytes) @ %p\n",
- proplen, addrs);
+ pci_info(dev, " parse addresses (%d bytes) @ %p\n",
+ proplen, addrs);
op_res = &op->resource[0];
for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
struct resource *res;
@@ -227,8 +227,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
continue;
i = addrs[0] & 0xff;
if (ofpci_verbose)
- printk(" start: %llx, end: %llx, i: %x\n",
- op_res->start, op_res->end, i);
+ pci_info(dev, " start: %llx, end: %llx, i: %x\n",
+ op_res->start, op_res->end, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -236,13 +236,15 @@ static void pci_parse_of_addrs(struct platform_device *op,
res = &dev->resource[PCI_ROM_RESOURCE];
flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
} else {
- printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
+ pci_err(dev, "bad cfg reg num 0x%x\n", i);
continue;
}
res->start = op_res->start;
res->end = op_res->end;
res->flags = flags;
res->name = pci_name(dev);
+
+ pci_info(dev, "reg 0x%x: %pR\n", i, res);
}
}
@@ -289,8 +291,8 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
type = "";
if (ofpci_verbose)
- printk(" create device, devfn: %x, type: %s\n",
- devfn, type);
+ pci_info(bus," create device, devfn: %x, type: %s\n",
+ devfn, type);
dev->sysdata = node;
dev->dev.parent = bus->bridge;
@@ -323,10 +325,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
- if (ofpci_verbose)
- printk(" class: 0x%x device name: %s\n",
- dev->class, pci_name(dev));
-
/* I have seen IDE devices which will not respond to
* the bmdma simplex check reads if bus mastering is
* disabled.
@@ -353,10 +351,13 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->irq = PCI_IRQ_NONE;
}
+ pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
+ dev->vendor, dev->device, dev->hdr_type, dev->class);
+
pci_parse_of_addrs(sd->op, node, dev);
if (ofpci_verbose)
- printk(" adding to system ...\n");
+ pci_info(dev, " adding to system ...\n");
pci_device_add(dev, bus);
@@ -430,19 +431,19 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
u64 size;
if (ofpci_verbose)
- printk("of_scan_pci_bridge(%s)\n", node->full_name);
+ pci_info(dev, "of_scan_pci_bridge(%s)\n", node->full_name);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
- printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
+ pci_info(dev, "Can't get bus-range for PCI-PCI bridge %s\n",
node->full_name);
return;
}
if (ofpci_verbose)
- printk(" Bridge bus range [%u --> %u]\n",
- busrange[0], busrange[1]);
+ pci_info(dev, " Bridge bus range [%u --> %u]\n",
+ busrange[0], busrange[1]);
ranges = of_get_property(node, "ranges", &len);
simba = 0;
@@ -454,8 +455,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
if (!bus) {
- printk(KERN_ERR "Failed to create pci bus for %s\n",
- node->full_name);
+ pci_err(dev, "Failed to create pci bus for %s\n",
+ node->full_name);
return;
}
@@ -464,8 +465,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
bus->bridge_ctl = 0;
if (ofpci_verbose)
- printk(" Bridge ranges[%p] simba[%d]\n",
- ranges, simba);
+ pci_info(dev, " Bridge ranges[%p] simba[%d]\n",
+ ranges, simba);
/* parse ranges property, or cook one up by hand for Simba */
/* PCI #address-cells == 3 and #size-cells == 2 always */
@@ -487,10 +488,10 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
u64 start;
if (ofpci_verbose)
- printk(" RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
- "%08x:%08x]\n",
- ranges[0], ranges[1], ranges[2], ranges[3],
- ranges[4], ranges[5], ranges[6], ranges[7]);
+ pci_info(dev, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
+ "%08x:%08x]\n",
+ ranges[0], ranges[1], ranges[2], ranges[3],
+ ranges[4], ranges[5], ranges[6], ranges[7]);
flags = pci_parse_of_flags(ranges[0]);
size = GET_64BIT(ranges, 6);
@@ -510,14 +511,14 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
if (flags & IORESOURCE_IO) {
res = bus->resource[0];
if (res->flags) {
- printk(KERN_ERR "PCI: ignoring extra I/O range"
- " for bridge %s\n", node->full_name);
+ pci_err(dev, "ignoring extra I/O range"
+ " for bridge %s\n", node->full_name);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
- printk(KERN_ERR "PCI: too many memory ranges"
- " for bridge %s\n", node->full_name);
+ pci_err(dev, "too many memory ranges"
+ " for bridge %s\n", node->full_name);
continue;
}
res = bus->resource[i];
@@ -529,8 +530,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
region.end = region.start + size - 1;
if (ofpci_verbose)
- printk(" Using flags[%08x] start[%016llx] size[%016llx]\n",
- flags, start, size);
+ pci_info(dev, " Using flags[%08x] start[%016llx] size[%016llx]\n",
+ flags, start, size);
pcibios_bus_to_resource(dev->bus, res, &region);
}
@@ -538,7 +539,7 @@ after_ranges:
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
if (ofpci_verbose)
- printk(" bus name: %s\n", bus->name);
+ pci_info(dev, " bus name: %s\n", bus->name);
pci_of_scan_bus(pbm, node, bus);
}
@@ -553,14 +554,14 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
struct pci_dev *dev;
if (ofpci_verbose)
- printk("PCI: scan_bus[%s] bus no %d\n",
- node->full_name, bus->number);
+ pci_info(bus, "scan_bus[%s] bus no %d\n",
+ node->full_name, bus->number);
child = NULL;
prev_devfn = -1;
while ((child = of_get_next_child(node, child)) != NULL) {
if (ofpci_verbose)
- printk(" * %s\n", child->full_name);
+ pci_info(bus, " * %s\n", child->full_name);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
@@ -581,8 +582,7 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
if (!dev)
continue;
if (ofpci_verbose)
- printk("PCI: dev header type: %x\n",
- dev->hdr_type);
+ pci_info(dev, "dev header type: %x\n", dev->hdr_type);
if (pci_is_bridge(dev))
of_scan_pci_bridge(pbm, child, dev);
@@ -624,6 +624,45 @@ static void pci_bus_register_of_sysfs(struct pci_bus *bus)
pci_bus_register_of_sysfs(child_bus);
}
+static void pci_claim_legacy_resources(struct pci_dev *dev)
+{
+ struct pci_bus_region region;
+ struct resource *p, *root, *conflict;
+
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
+
+ p->name = "Video RAM area";
+ p->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ region.start = 0xa0000UL;
+ region.end = region.start + 0x1ffffUL;
+ pcibios_bus_to_resource(dev->bus, p, &region);
+
+ root = pci_find_parent_resource(dev, p);
+ if (!root) {
+ pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p);
+ goto err;
+ }
+
+ conflict = request_resource_conflict(root, p);
+ if (conflict) {
+ pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n",
+ p, conflict->name, conflict);
+ goto err;
+ }
+
+ pci_info(dev, "VGA legacy framebuffer %pR\n", p);
+ return;
+
+err:
+ kfree(p);
+}
+
static void pci_claim_bus_resources(struct pci_bus *bus)
{
struct pci_bus *child_bus;
@@ -639,15 +678,13 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
continue;
if (ofpci_verbose)
- printk("PCI: Claiming %s: "
- "Resource %d: %016llx..%016llx [%x]\n",
- pci_name(dev), i,
- (unsigned long long)r->start,
- (unsigned long long)r->end,
- (unsigned int)r->flags);
+ pci_info(dev, "Claiming Resource %d: %pR\n",
+ i, r);
pci_claim_resource(dev, i);
}
+
+ pci_claim_legacy_resources(dev);
}
list_for_each_entry(child_bus, &bus->children, node)
@@ -687,6 +724,7 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
pci_bus_register_of_sysfs(bus);
pci_claim_bus_resources(bus);
+
pci_bus_add_devices(bus);
return bus;
}
@@ -713,9 +751,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
}
if (cmd != oldcmd) {
- printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
- pci_name(dev), cmd);
- /* Enable the appropriate bits in the PCI command register. */
+ pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
@@ -1075,8 +1111,8 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
sp = prop->names;
if (ofpci_verbose)
- printk("PCI: Making slots for [%s] mask[0x%02x]\n",
- node->full_name, mask);
+ pci_info(bus, "Making slots for [%s] mask[0x%02x]\n",
+ node->full_name, mask);
i = 0;
while (mask) {
@@ -1089,12 +1125,12 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
}
if (ofpci_verbose)
- printk("PCI: Making slot [%s]\n", sp);
+ pci_info(bus, "Making slot [%s]\n", sp);
pci_slot = pci_create_slot(bus, i, sp, NULL);
if (IS_ERR(pci_slot))
- printk(KERN_ERR "PCI: pci_create_slot returned %ld\n",
- PTR_ERR(pci_slot));
+ pci_err(bus, "pci_create_slot returned %ld\n",
+ PTR_ERR(pci_slot));
sp += strlen(sp) + 1;
mask &= ~this_bit;
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 38d46bcc8634e..4759ccd542fe6 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -329,23 +329,6 @@ void pci_get_pbm_props(struct pci_pbm_info *pbm)
}
}
-static void pci_register_legacy_regions(struct resource *io_res,
- struct resource *mem_res)
-{
- struct resource *p;
-
- /* VGA Video RAM. */
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return;
-
- p->name = "Video RAM area";
- p->start = mem_res->start + 0xa0000UL;
- p->end = p->start + 0x1ffffUL;
- p->flags = IORESOURCE_BUSY;
- request_resource(mem_res, p);
-}
-
static void pci_register_iommu_region(struct pci_pbm_info *pbm)
{
const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma",
@@ -487,8 +470,6 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
if (pbm->mem64_space.flags)
request_resource(&iomem_resource, &pbm->mem64_space);
- pci_register_legacy_regions(&pbm->io_space,
- &pbm->mem_space);
pci_register_iommu_region(pbm);
}
@@ -508,8 +489,8 @@ void pci_scan_for_target_abort(struct pci_pbm_info *pbm,
PCI_STATUS_REC_TARGET_ABORT));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s saw Target Abort [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device saw Target Abort [%016x]\n",
+ pbm->name, status);
}
}
@@ -531,8 +512,8 @@ void pci_scan_for_master_abort(struct pci_pbm_info *pbm,
(status & (PCI_STATUS_REC_MASTER_ABORT));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s received Master Abort [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device received Master Abort "
+ "[%016x]\n", pbm->name, status);
}
}
@@ -555,8 +536,8 @@ void pci_scan_for_parity_error(struct pci_pbm_info *pbm,
PCI_STATUS_DETECTED_PARITY));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
- printk("%s: Device %s saw Parity Error [%016x]\n",
- pbm->name, pci_name(pdev), status);
+ pci_info(pdev, "%s: Device saw Parity Error [%016x]\n",
+ pbm->name, status);
}
}
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 1994d7638406e..fb5899cbfa515 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -191,8 +191,8 @@ static void sparc64_teardown_msi_irq(unsigned int irq,
break;
}
if (i >= pbm->msi_num) {
- printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
- pbm->name, irq);
+ pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name,
+ irq);
return;
}
@@ -201,9 +201,9 @@ static void sparc64_teardown_msi_irq(unsigned int irq,
err = ops->msi_teardown(pbm, msi_num);
if (err) {
- printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
- "irq %u, gives error %d\n",
- pbm->name, msi_num, irq, err);
+ pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, "
+ "irq %u, gives error %d\n", pbm->name, msi_num, irq,
+ err);
return;
}
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 22f8774977d54..ee4c9a9a171cc 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -518,10 +518,10 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
* board in a PCI slot. We must remap it
* under 64K but it is not done yet. XXX
*/
- printk("PCIC: Skipping I/O space at 0x%lx, "
- "this will Oops if a driver attaches "
- "device '%s' at %02x:%02x)\n", address,
- namebuf, dev->bus->number, dev->devfn);
+ pci_info(dev, "PCIC: Skipping I/O space at "
+ "0x%lx, this will Oops if a driver "
+ "attaches device '%s'\n", address,
+ namebuf);
}
}
}
@@ -551,8 +551,8 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
p++;
}
if (i >= pcic->pcic_imdim) {
- printk("PCIC: device %s devfn %02x:%02x not found in %d\n",
- namebuf, dev->bus->number, dev->devfn, pcic->pcic_imdim);
+ pci_info(dev, "PCIC: device %s not found in %d\n", namebuf,
+ pcic->pcic_imdim);
dev->irq = 0;
return;
}
@@ -565,7 +565,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
real_irq = ivec >> ((i-4) << 2) & 0xF;
} else { /* Corrupted map */
- printk("PCIC: BAD PIN %d\n", i); for (;;) {}
+ pci_info(dev, "PCIC: BAD PIN %d\n", i); for (;;) {}
}
/* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
@@ -574,10 +574,10 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
*/
if (real_irq == 0 || p->force) {
if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */
- printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
+ pci_info(dev, "PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
}
- printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n",
- p->irq, p->pin, dev->bus->number, dev->devfn);
+ pci_info(dev, "PCIC: setting irq %d at pin %d\n", p->irq,
+ p->pin);
real_irq = p->irq;
i = p->pin;
@@ -602,15 +602,13 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
- int i, has_io, has_mem;
- unsigned int cmd = 0;
struct linux_pcic *pcic;
/* struct linux_pbm_info* pbm = &pcic->pbm; */
int node;
struct pcidev_cookie *pcp;
if (!pcic0_up) {
- printk("pcibios_fixup_bus: no PCIC\n");
+ pci_info(bus, "pcibios_fixup_bus: no PCIC\n");
return;
}
pcic = &pcic0;
@@ -619,44 +617,12 @@ void pcibios_fixup_bus(struct pci_bus *bus)
* Next crud is an equivalent of pbm = pcic_bus_to_pbm(bus);
*/
if (bus->number != 0) {
- printk("pcibios_fixup_bus: nonzero bus 0x%x\n", bus->number);
+ pci_info(bus, "pcibios_fixup_bus: nonzero bus 0x%x\n",
+ bus->number);
return;
}
list_for_each_entry(dev, &bus->devices, bus_list) {
-
- /*
- * Comment from i386 branch:
- * There are buggy BIOSes that forget to enable I/O and memory
- * access to PCI devices. We try to fix this, but we need to
- * be sure that the BIOS didn't forget to assign an address
- * to the device. [mj]
- * OBP is a case of such BIOS :-)
- */
- has_io = has_mem = 0;
- for(i=0; i<6; i++) {
- unsigned long f = dev->resource[i].flags;
- if (f & IORESOURCE_IO) {
- has_io = 1;
- } else if (f & IORESOURCE_MEM)
- has_mem = 1;
- }
- pcic_read_config(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
- if (has_io && !(cmd & PCI_COMMAND_IO)) {
- printk("PCIC: Enabling I/O for device %02x:%02x\n",
- dev->bus->number, dev->devfn);
- cmd |= PCI_COMMAND_IO;
- pcic_write_config(dev->bus, dev->devfn,
- PCI_COMMAND, 2, cmd);
- }
- if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
- printk("PCIC: Enabling memory for device %02x:%02x\n",
- dev->bus->number, dev->devfn);
- cmd |= PCI_COMMAND_MEMORY;
- pcic_write_config(dev->bus, dev->devfn,
- PCI_COMMAND, 2, cmd);
- }
-
node = pdev_to_pnode(&pcic->pbm, dev);
if(node == 0)
node = -1;
@@ -675,6 +641,34 @@ void pcibios_fixup_bus(struct pci_bus *bus)
}
}
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, oldcmd;
+ int i;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<i)))
+ continue;
+
+ if (res->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (res->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ if (cmd != oldcmd) {
+ pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
/* Makes compiler happy */
static volatile int pcic_timer_dummy;
@@ -747,17 +741,11 @@ static void watchdog_reset() {
}
#endif
-int pcibios_enable_device(struct pci_dev *pdev, int mask)
-{
- return 0;
-}
-
/*
* NMI
*/
void pcic_nmi(unsigned int pend, struct pt_regs *regs)
{
-
pend = swab32(pend);
if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index f0114007e9159..e5f753cbb1c3d 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -59,24 +59,15 @@ int early_pci_allowed(void)
void early_dump_pci_device(u8 bus, u8 slot, u8 func)
{
+ u32 value[256 / 4];
int i;
- int j;
- u32 val;
- printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:",
- bus, slot, func);
+ pr_info("pci 0000:%02x:%02x.%d config space:\n", bus, slot, func);
- for (i = 0; i < 256; i += 4) {
- if (!(i & 0x0f))
- printk("\n %02x:",i);
+ for (i = 0; i < 256; i += 4)
+ value[i / 4] = read_pci_config(bus, slot, func, i);
- val = read_pci_config(bus, slot, func, i);
- for (j = 0; j < 4; j++) {
- printk(" %02x", val & 0xff);
- val >>= 8;
- }
- }
- printk("\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, value, 256, false);
}
void early_dump_pci_devices(void)
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 54ef19e90705a..13f4485ca388c 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -636,6 +636,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 6ddf0a30c60d1..883024054b056 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -20,8 +20,6 @@
#define pcibios_assign_all_busses() 0
-extern struct pci_controller* pcibios_alloc_controller(void);
-
/* Assume some values. (We should revise them, if necessary) */
#define PCIBIOS_MIN_IO 0x2000
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index b7c7a60c70009..21f13e9aabe1b 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -41,8 +41,8 @@
* pci_bus_add_device
*/
-struct pci_controller* pci_ctrl_head;
-struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
+static struct pci_controller *pci_ctrl_head;
+static struct pci_controller **pci_ctrl_tail = &pci_ctrl_head;
static int pci_bus_count;
@@ -80,50 +80,6 @@ pcibios_align_resource(void *data, const struct resource *res,
return start;
}
-int
-pcibios_enable_resources(struct pci_dev *dev, int mask)
-{
- u16 cmd, old_cmd;
- int idx;
- struct resource *r;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
- for(idx=0; idx<6; idx++) {
- r = &dev->resource[idx];
- if (!r->start && r->end) {
- pr_err("PCI: Device %s not available because "
- "of resource collisions\n", pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- if (dev->resource[PCI_ROM_RESOURCE].start)
- cmd |= PCI_COMMAND_MEMORY;
- if (cmd != old_cmd) {
- pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
-}
-
-struct pci_controller * __init pcibios_alloc_controller(void)
-{
- struct pci_controller *pci_ctrl;
-
- pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
- memset(pci_ctrl, 0, sizeof(struct pci_controller));
-
- *pci_ctrl_tail = pci_ctrl;
- pci_ctrl_tail = &pci_ctrl->next;
-
- return pci_ctrl;
-}
-
static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
struct list_head *resources)
{
@@ -223,8 +179,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
for (idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
- pr_err("PCI: Device %s not available because "
- "of resource collisions\n", pci_name(dev));
+ pci_err(dev, "can't enable device: resource collisions\n");
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
@@ -233,29 +188,13 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
- pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
+ pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
-#ifdef CONFIG_PROC_FS
-
-/*
- * Return the index of the PCI controller for device pdev.
- */
-
-int
-pci_controller_num(struct pci_dev *dev)
-{
- struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
- return pci_ctrl->index;
-}
-
-#endif /* CONFIG_PROC_FS */
-
/*
* Platform support for /proc/bus/pci/X/Y mmap()s.
* -- paulus.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 0da18bde6a165..7433035ded955 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -153,6 +153,7 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = {
{ OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
{ OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
{ OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
+ { OSC_PCI_EXPRESS_LTR_CONTROL, "LTR" },
};
static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
@@ -472,9 +473,17 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
}
control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
| OSC_PCI_EXPRESS_PME_CONTROL;
+ if (IS_ENABLED(CONFIG_PCIEASPM))
+ control |= OSC_PCI_EXPRESS_LTR_CONTROL;
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
+ control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
+
if (pci_aer_available()) {
if (aer_acpi_firmware_first())
dev_info(&device->dev,
@@ -900,11 +909,15 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
host_bridge = to_pci_host_bridge(bus->bridge);
if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- host_bridge->native_hotplug = 0;
+ host_bridge->native_pcie_hotplug = 0;
+ if (!(root->osc_control_set & OSC_PCI_SHPC_NATIVE_HP_CONTROL))
+ host_bridge->native_shpc_hotplug = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_AER_CONTROL))
host_bridge->native_aer = 0;
if (!(root->osc_control_set & OSC_PCI_EXPRESS_PME_CONTROL))
host_bridge->native_pme = 0;
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_LTR_CONTROL))
+ host_bridge->native_ltr = 0;
pci_scan_child_bus(bus);
pci_set_host_bridge_release(host_bridge, acpi_pci_root_release_info,
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 2c2ed9cf87962..57410f9c5d44c 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -14,9 +14,6 @@ menuconfig AUXDISPLAY
If you say N, all options in this submenu will be skipped and disabled.
-config CHARLCD
- tristate "Character LCD core support" if COMPILE_TEST
-
if AUXDISPLAY
config HD44780
@@ -137,8 +134,8 @@ config CFAG12864B_RATE
config IMG_ASCII_LCD
tristate "Imagination Technologies ASCII LCD Display"
depends on HAS_IOMEM
- default y if MIPS_MALTA || MIPS_SEAD3
- select SYSCON
+ default y if MIPS_MALTA
+ select MFD_SYSCON
help
Enable this to support the simple ASCII LCD displays found on
development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
@@ -157,8 +154,6 @@ config HT16K33
Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
LED controller driver with keyscan.
-endif # AUXDISPLAY
-
config ARM_CHARLCD
bool "ARM Ltd. Character LCD Driver"
depends on PLAT_VERSATILE
@@ -169,7 +164,9 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
-config PANEL
+endif # AUXDISPLAY
+
+menuconfig PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
select CHARLCD
@@ -448,3 +445,6 @@ config PANEL_BOOT_MESSAGE
printf()-formatted message is valid with newline and escape codes.
endif # PANEL
+
+config CHARLCD
+ tristate "Character LCD core support" if COMPILE_TEST
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index b3176ee92b90d..296fb30dfa006 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the on-board character LCD found on some ARM reference boards
* This is basically an Hitachi HD44780 LCD with a custom IP block to drive it
* http://en.wikipedia.org/wiki/HD44780_Character_LCD
* Currently it will just display the text "ARM Linux" and the linux version
*
- * License terms: GNU General Public License (GPL) version 2
* Author: Linus Walleij <triad@df.lth.se>
*/
#include <linux/init.h>
@@ -54,12 +54,14 @@
#define HD_BUSY_FLAG 0x80U
/**
+ * struct charlcd - Private data structure
* @dev: a pointer back to containing device
* @phybase: the offset to the controller in physical memory
* @physize: the size of the physical page
* @virtbase: the offset to the controller in virtual memory
* @irq: reserved interrupt number
* @complete: completion structure for the last LCD command
+ * @init_work: delayed work structure to initialize the display on boot
*/
struct charlcd {
struct device *dev;
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 41ce4bd96813e..6bd2f65e116a0 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -1,26 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Filename: cfag12864b.c
* Version: 0.1.0
* Description: cfag12864b LCD driver
- * License: GPLv2
* Depends: ks0108
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/init.h>
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index a3874034e2ce1..40c8a552a478e 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -1,26 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Filename: cfag12864bfb.c
* Version: 0.1.0
* Description: cfag12864b LCD framebuffer driver
- * License: GPLv2
* Depends: cfag12864b
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/init.h>
@@ -37,7 +23,7 @@
#define CFAG12864BFB_NAME "cfag12864bfb"
-static struct fb_fix_screeninfo cfag12864bfb_fix = {
+static const struct fb_fix_screeninfo cfag12864bfb_fix = {
.id = "cfag12864b",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -48,7 +34,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo cfag12864bfb_var = {
+static const struct fb_var_screeninfo cfag12864bfb_var = {
.xres = CFAG12864B_WIDTH,
.yres = CFAG12864B_HEIGHT,
.xres_virtual = CFAG12864B_WIDTH,
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 642afd88870ba..8673fc2b9eb87 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -1,16 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Character LCD driver for Linux
*
* Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
* Copyright (C) 2016-2017 Glider bvba
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/atomic.h>
+#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
@@ -192,10 +189,11 @@ static void charlcd_print(struct charlcd *lcd, char c)
c = lcd->char_conv[(unsigned char)c];
lcd->ops->write_data(lcd, c);
priv->addr.x++;
+
+ /* prevents the cursor from wrapping onto the next line */
+ if (priv->addr.x == lcd->bwidth)
+ charlcd_gotoxy(lcd);
}
- /* prevents the cursor from wrapping onto the next line */
- if (priv->addr.x == lcd->bwidth)
- charlcd_gotoxy(lcd);
}
static void charlcd_clear_fast(struct charlcd *lcd)
@@ -293,6 +291,79 @@ static int charlcd_init_display(struct charlcd *lcd)
}
/*
+ * Parses an unsigned integer from a string, until a non-digit character
+ * is found. The empty string is not accepted. No overflow checks are done.
+ *
+ * Returns whether the parsing was successful. Only in that case
+ * the output parameters are written to.
+ *
+ * TODO: If the kernel adds an inplace version of kstrtoul(), this function
+ * could be easily replaced by that.
+ */
+static bool parse_n(const char *s, unsigned long *res, const char **next_s)
+{
+ if (!isdigit(*s))
+ return false;
+
+ *res = 0;
+ while (isdigit(*s)) {
+ *res = *res * 10 + (*s - '0');
+ ++s;
+ }
+
+ *next_s = s;
+ return true;
+}
+
+/*
+ * Parses a movement command of the form "(.*);", where the group can be
+ * any number of subcommands of the form "(x|y)[0-9]+".
+ *
+ * Returns whether the command is valid. The position arguments are
+ * only written if the parsing was successful.
+ *
+ * For instance:
+ * - ";" returns (<original x>, <original y>).
+ * - "x1;" returns (1, <original y>).
+ * - "y2x1;" returns (1, 2).
+ * - "x12y34x56;" returns (56, 34).
+ * - "" fails.
+ * - "x" fails.
+ * - "x;" fails.
+ * - "x1" fails.
+ * - "xy12;" fails.
+ * - "x12yy12;" fails.
+ * - "xx" fails.
+ */
+static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
+{
+ unsigned long new_x = *x;
+ unsigned long new_y = *y;
+
+ for (;;) {
+ if (!*s)
+ return false;
+
+ if (*s == ';')
+ break;
+
+ if (*s == 'x') {
+ if (!parse_n(s + 1, &new_x, &s))
+ return false;
+ } else if (*s == 'y') {
+ if (!parse_n(s + 1, &new_y, &s))
+ return false;
+ } else {
+ return false;
+ }
+ }
+
+ *x = new_x;
+ *y = new_y;
+ return true;
+}
+
+/*
* These are the file operation function for user access to /dev/lcd
* This function can also be called from inside the kernel, by
* setting file and ppos to NULL.
@@ -362,6 +433,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
break;
case 'N': /* Two Lines */
priv->flags |= LCD_FLAG_N;
+ processed = 1;
break;
case 'l': /* Shift Cursor Left */
if (priv->addr.x > 0) {
@@ -441,9 +513,9 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
shift ^= 4;
if (*esc >= '0' && *esc <= '9') {
value |= (*esc - '0') << shift;
- } else if (*esc >= 'A' && *esc <= 'Z') {
+ } else if (*esc >= 'A' && *esc <= 'F') {
value |= (*esc - 'A' + 10) << shift;
- } else if (*esc >= 'a' && *esc <= 'z') {
+ } else if (*esc >= 'a' && *esc <= 'f') {
value |= (*esc - 'a' + 10) << shift;
} else {
esc++;
@@ -469,24 +541,11 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
}
case 'x': /* gotoxy : LxXXX[yYYY]; */
case 'y': /* gotoxy : LyYYY[xXXX]; */
- if (!strchr(esc, ';'))
- break;
-
- while (*esc) {
- if (*esc == 'x') {
- esc++;
- if (kstrtoul(esc, 10, &priv->addr.x) < 0)
- break;
- } else if (*esc == 'y') {
- esc++;
- if (kstrtoul(esc, 10, &priv->addr.y) < 0)
- break;
- } else {
- break;
- }
- }
+ /* If the command is valid, move to the new address */
+ if (parse_xy(esc, &priv->addr.x, &priv->addr.y))
+ charlcd_gotoxy(lcd);
- charlcd_gotoxy(lcd);
+ /* Regardless of its validity, mark as processed */
processed = 1;
break;
}
@@ -527,7 +586,7 @@ static void charlcd_write_char(struct charlcd *lcd, char c)
if ((c != '\n') && priv->esc_seq.len >= 0) {
/* yes, let's add this char to the buffer */
priv->esc_seq.buf[priv->esc_seq.len++] = c;
- priv->esc_seq.buf[priv->esc_seq.len] = 0;
+ priv->esc_seq.buf[priv->esc_seq.len] = '\0';
} else {
/* aborts any previous escape sequence */
priv->esc_seq.len = -1;
@@ -536,7 +595,7 @@ static void charlcd_write_char(struct charlcd *lcd, char c)
case LCD_ESCAPE_CHAR:
/* start of an escape sequence */
priv->esc_seq.len = 0;
- priv->esc_seq.buf[priv->esc_seq.len] = 0;
+ priv->esc_seq.buf[priv->esc_seq.len] = '\0';
break;
case '\b':
/* go back one char and clear it */
@@ -555,7 +614,7 @@ static void charlcd_write_char(struct charlcd *lcd, char c)
/* back one char again */
lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT);
break;
- case '\014':
+ case '\f':
/* quickly clear the display */
charlcd_clear_fast(lcd);
break;
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 036eec4042894..78d8f1986fec0 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* HD44780 Character LCD driver for Linux
*
* Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
* Copyright (C) 2016-2017 Glider bvba
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/delay.h>
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index fbfa5b4cc567e..a43276c76fc68 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HT16K33 driver
*
* Author: Robin van der Gracht <robin@protonic.nl>
*
* Copyright: (C) 2016 Protonic Holland.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 816de9eaac264..abfe3fa9e6f45 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -1,26 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Filename: ks0108.c
* Version: 0.1.0
* Description: ks0108 LCD Controller driver
- * License: GPLv2
* Depends: parport
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index ec5e8800f8adf..3b25a643058c9 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Front panel driver for Linux
* Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
* Copyright (C) 2016-2017 Glider bvba
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This code drives an LCD module (/dev/lcd), and a keypad (/dev/keypad)
* connected to a parallel printer port.
*
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index acd758381c58b..4e9c33ca1f8fd 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -4,11 +4,11 @@
#
obj-$(CONFIG_TCG_TPM) += tpm.o
tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
- tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
- tpm2-space.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
-tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
-tpm-$(CONFIG_OF) += tpm_eventlog_of.o
+ tpm-dev-common.o tpmrm-dev.o eventlog/common.o eventlog/tpm1.o \
+ eventlog/tpm2.o tpm2-space.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
+tpm-$(CONFIG_EFI) += eventlog/efi.o
+tpm-$(CONFIG_OF) += eventlog/of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm_eventlog_acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 66f19e93c216c..7c53b1973b62c 100644
--- a/drivers/char/tpm/tpm_eventlog_acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -27,7 +27,8 @@
#include <linux/acpi.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
struct acpi_tcpa {
struct acpi_table_header hdr;
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
new file mode 100644
index 0000000000000..5a8720df2b513
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ * Kent Yoder <key@linux.vnet.ibm.com>
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Nayna Jain <nayna@linux.vnet.ibm.com>
+ *
+ * Access to the event log created by a system's firmware / BIOS
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/tpm_eventlog.h>
+
+#include "../tpm.h"
+#include "common.h"
+
+static int tpm_bios_measurements_open(struct inode *inode,
+ struct file *file)
+{
+ int err;
+ struct seq_file *seq;
+ struct tpm_chip_seqops *chip_seqops;
+ const struct seq_operations *seqops;
+ struct tpm_chip *chip;
+
+ inode_lock(inode);
+ if (!inode->i_private) {
+ inode_unlock(inode);
+ return -ENODEV;
+ }
+ chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+ seqops = chip_seqops->seqops;
+ chip = chip_seqops->chip;
+ get_device(&chip->dev);
+ inode_unlock(inode);
+
+ /* now register seq file */
+ err = seq_open(file, seqops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = chip;
+ }
+
+ return err;
+}
+
+static int tpm_bios_measurements_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct tpm_chip *chip = (struct tpm_chip *)seq->private;
+
+ put_device(&chip->dev);
+
+ return seq_release(inode, file);
+}
+
+static const struct file_operations tpm_bios_measurements_ops = {
+ .owner = THIS_MODULE,
+ .open = tpm_bios_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tpm_bios_measurements_release,
+};
+
+static int tpm_read_log(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->log.bios_event_log != NULL) {
+ dev_dbg(&chip->dev,
+ "%s: ERROR - event log already initialized\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ rc = tpm_read_log_acpi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
+ rc = tpm_read_log_efi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
+ return tpm_read_log_of(chip);
+}
+
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ *
+ * Returns -ENODEV if the firmware has no event log or securityfs is not
+ * supported.
+ */
+int tpm_bios_log_setup(struct tpm_chip *chip)
+{
+ const char *name = dev_name(&chip->dev);
+ unsigned int cnt;
+ int log_version;
+ int rc = 0;
+
+ rc = tpm_read_log(chip);
+ if (rc < 0)
+ return rc;
+ log_version = rc;
+
+ cnt = 0;
+ chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+ * compiled out. The caller should ignore the ENODEV return code.
+ */
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ chip->bin_log_seqops.chip = chip;
+ if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ chip->bin_log_seqops.seqops =
+ &tpm2_binary_b_measurements_seqops;
+ else
+ chip->bin_log_seqops.seqops =
+ &tpm1_binary_b_measurements_seqops;
+
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("binary_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->bin_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+
+ chip->ascii_log_seqops.chip = chip;
+ chip->ascii_log_seqops.seqops =
+ &tpm1_ascii_b_measurements_seqops;
+
+ chip->bios_dir[cnt] =
+ securityfs_create_file("ascii_bios_measurements",
+ 0440, chip->bios_dir[0],
+ (void *)&chip->ascii_log_seqops,
+ &tpm_bios_measurements_ops);
+ if (IS_ERR(chip->bios_dir[cnt]))
+ goto err;
+ cnt++;
+ }
+
+ return 0;
+
+err:
+ rc = PTR_ERR(chip->bios_dir[cnt]);
+ chip->bios_dir[cnt] = NULL;
+ tpm_bios_log_teardown(chip);
+ return rc;
+}
+
+void tpm_bios_log_teardown(struct tpm_chip *chip)
+{
+ int i;
+ struct inode *inode;
+
+ /* securityfs_remove currently doesn't take care of handling sync
+ * between removal and opening of pseudo files. To handle this, a
+ * workaround is added by making i_private = NULL here during removal
+ * and to check it during open(), both within inode_lock()/unlock().
+ * This design ensures that open() either safely gets kref or fails.
+ */
+ for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+ if (chip->bios_dir[i]) {
+ inode = d_inode(chip->bios_dir[i]);
+ inode_lock(inode);
+ inode->i_private = NULL;
+ inode_unlock(inode);
+ securityfs_remove(chip->bios_dir[i]);
+ }
+ }
+}
diff --git a/drivers/char/tpm/eventlog/common.h b/drivers/char/tpm/eventlog/common.h
new file mode 100644
index 0000000000000..47ff8136ceb53
--- /dev/null
+++ b/drivers/char/tpm/eventlog/common.h
@@ -0,0 +1,35 @@
+#ifndef __TPM_EVENTLOG_COMMON_H__
+#define __TPM_EVENTLOG_COMMON_H__
+
+#include "../tpm.h"
+
+extern const struct seq_operations tpm1_ascii_b_measurements_seqops;
+extern const struct seq_operations tpm1_binary_b_measurements_seqops;
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_eventlog_efi.c b/drivers/char/tpm/eventlog/efi.c
index e3f9ffd341d23..3e673ab22cb45 100644
--- a/drivers/char/tpm/tpm_eventlog_efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -14,7 +14,8 @@
#include <linux/efi.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
/* read binary bios log from EFI configuration table */
int tpm_read_log_efi(struct tpm_chip *chip)
@@ -50,10 +51,9 @@ int tpm_read_log_efi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = kmalloc(log_size, GFP_KERNEL);
+ log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
if (!log->bios_event_log)
goto err_memunmap;
- memcpy(log->bios_event_log, log_tbl->log, log_size);
log->bios_event_log_end = log->bios_event_log + log_size;
tpm_log_version = log_tbl->version;
diff --git a/drivers/char/tpm/tpm_eventlog_of.c b/drivers/char/tpm/eventlog/of.c
index 96fd5646f866d..bba5fba6cb3b2 100644
--- a/drivers/char/tpm/tpm_eventlog_of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -19,7 +19,8 @@
#include <linux/of.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
int tpm_read_log_of(struct tpm_chip *chip)
{
@@ -56,8 +57,8 @@ int tpm_read_log_of(struct tpm_chip *chip)
* but physical tpm needs the conversion.
*/
if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0) {
- size = be32_to_cpup(sizep);
- base = be64_to_cpup(basep);
+ size = be32_to_cpup((__force __be32 *)sizep);
+ base = be64_to_cpup((__force __be64 *)basep);
} else {
size = *sizep;
base = *basep;
@@ -68,14 +69,12 @@ int tpm_read_log_of(struct tpm_chip *chip)
return -EIO;
}
- log->bios_event_log = kmalloc(size, GFP_KERNEL);
+ log->bios_event_log = kmemdup(__va(base), size, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
log->bios_event_log_end = log->bios_event_log + size;
- memcpy(log->bios_event_log, __va(base), size);
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
diff --git a/drivers/char/tpm/tpm1_eventlog.c b/drivers/char/tpm/eventlog/tpm1.c
index add798bd69d0f..58c84784ba25c 100644
--- a/drivers/char/tpm/tpm1_eventlog.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -28,7 +28,8 @@
#include <linux/slab.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
static const char* tcpa_event_type_strings[] = {
@@ -71,7 +72,7 @@ static const char* tcpa_pc_event_id_strings[] = {
};
/* returns pointer to start of pos. entry of tcg log */
-static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
+static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t i;
struct tpm_chip *chip = m->private;
@@ -118,7 +119,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
return addr;
}
-static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
+static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
loff_t *pos)
{
struct tcpa_event *event = v;
@@ -149,7 +150,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
return v;
}
-static void tpm_bios_measurements_stop(struct seq_file *m, void *v)
+static void tpm1_bios_measurements_stop(struct seq_file *m, void *v)
{
}
@@ -232,7 +233,7 @@ static int get_event_name(char *dest, struct tcpa_event *event,
}
-static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+static int tpm1_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
struct tcpa_event temp_event;
@@ -261,18 +262,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
}
-static int tpm_bios_measurements_release(struct inode *inode,
- struct file *file)
-{
- struct seq_file *seq = (struct seq_file *)file->private_data;
- struct tpm_chip *chip = (struct tpm_chip *)seq->private;
-
- put_device(&chip->dev);
-
- return seq_release(inode, file);
-}
-
-static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
+static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
{
int len = 0;
char *eventname;
@@ -305,172 +295,16 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations tpm_ascii_b_measurements_seqops = {
- .start = tpm_bios_measurements_start,
- .next = tpm_bios_measurements_next,
- .stop = tpm_bios_measurements_stop,
- .show = tpm_ascii_bios_measurements_show,
+const struct seq_operations tpm1_ascii_b_measurements_seqops = {
+ .start = tpm1_bios_measurements_start,
+ .next = tpm1_bios_measurements_next,
+ .stop = tpm1_bios_measurements_stop,
+ .show = tpm1_ascii_bios_measurements_show,
};
-static const struct seq_operations tpm_binary_b_measurements_seqops = {
- .start = tpm_bios_measurements_start,
- .next = tpm_bios_measurements_next,
- .stop = tpm_bios_measurements_stop,
- .show = tpm_binary_bios_measurements_show,
-};
-
-static int tpm_bios_measurements_open(struct inode *inode,
- struct file *file)
-{
- int err;
- struct seq_file *seq;
- struct tpm_chip_seqops *chip_seqops;
- const struct seq_operations *seqops;
- struct tpm_chip *chip;
-
- inode_lock(inode);
- if (!inode->i_private) {
- inode_unlock(inode);
- return -ENODEV;
- }
- chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
- seqops = chip_seqops->seqops;
- chip = chip_seqops->chip;
- get_device(&chip->dev);
- inode_unlock(inode);
-
- /* now register seq file */
- err = seq_open(file, seqops);
- if (!err) {
- seq = file->private_data;
- seq->private = chip;
- }
-
- return err;
-}
-
-static const struct file_operations tpm_bios_measurements_ops = {
- .owner = THIS_MODULE,
- .open = tpm_bios_measurements_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tpm_bios_measurements_release,
+const struct seq_operations tpm1_binary_b_measurements_seqops = {
+ .start = tpm1_bios_measurements_start,
+ .next = tpm1_bios_measurements_next,
+ .stop = tpm1_bios_measurements_stop,
+ .show = tpm1_binary_bios_measurements_show,
};
-
-static int tpm_read_log(struct tpm_chip *chip)
-{
- int rc;
-
- if (chip->log.bios_event_log != NULL) {
- dev_dbg(&chip->dev,
- "%s: ERROR - event log already initialized\n",
- __func__);
- return -EFAULT;
- }
-
- rc = tpm_read_log_acpi(chip);
- if (rc != -ENODEV)
- return rc;
-
- rc = tpm_read_log_efi(chip);
- if (rc != -ENODEV)
- return rc;
-
- return tpm_read_log_of(chip);
-}
-
-/*
- * tpm_bios_log_setup() - Read the event log from the firmware
- * @chip: TPM chip to use.
- *
- * If an event log is found then the securityfs files are setup to
- * export it to userspace, otherwise nothing is done.
- *
- * Returns -ENODEV if the firmware has no event log or securityfs is not
- * supported.
- */
-int tpm_bios_log_setup(struct tpm_chip *chip)
-{
- const char *name = dev_name(&chip->dev);
- unsigned int cnt;
- int log_version;
- int rc = 0;
-
- rc = tpm_read_log(chip);
- if (rc < 0)
- return rc;
- log_version = rc;
-
- cnt = 0;
- chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
- /* NOTE: securityfs_create_dir can return ENODEV if securityfs is
- * compiled out. The caller should ignore the ENODEV return code.
- */
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
-
- chip->bin_log_seqops.chip = chip;
- if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
- chip->bin_log_seqops.seqops =
- &tpm2_binary_b_measurements_seqops;
- else
- chip->bin_log_seqops.seqops =
- &tpm_binary_b_measurements_seqops;
-
-
- chip->bios_dir[cnt] =
- securityfs_create_file("binary_bios_measurements",
- 0440, chip->bios_dir[0],
- (void *)&chip->bin_log_seqops,
- &tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
-
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
-
- chip->ascii_log_seqops.chip = chip;
- chip->ascii_log_seqops.seqops =
- &tpm_ascii_b_measurements_seqops;
-
- chip->bios_dir[cnt] =
- securityfs_create_file("ascii_bios_measurements",
- 0440, chip->bios_dir[0],
- (void *)&chip->ascii_log_seqops,
- &tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
- }
-
- return 0;
-
-err:
- rc = PTR_ERR(chip->bios_dir[cnt]);
- chip->bios_dir[cnt] = NULL;
- tpm_bios_log_teardown(chip);
- return rc;
-}
-
-void tpm_bios_log_teardown(struct tpm_chip *chip)
-{
- int i;
- struct inode *inode;
-
- /* securityfs_remove currently doesn't take care of handling sync
- * between removal and opening of pseudo files. To handle this, a
- * workaround is added by making i_private = NULL here during removal
- * and to check it during open(), both within inode_lock()/unlock().
- * This design ensures that open() either safely gets kref or fails.
- */
- for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
- if (chip->bios_dir[i]) {
- inode = d_inode(chip->bios_dir[i]);
- inode_lock(inode);
- inode->i_private = NULL;
- inode_unlock(inode);
- securityfs_remove(chip->bios_dir[i]);
- }
- }
-}
diff --git a/drivers/char/tpm/tpm2_eventlog.c b/drivers/char/tpm/eventlog/tpm2.c
index 1ce4411292ba9..1b8fa9de2cace 100644
--- a/drivers/char/tpm/tpm2_eventlog.c
+++ b/drivers/char/tpm/eventlog/tpm2.c
@@ -23,7 +23,8 @@
#include <linux/slab.h>
#include <linux/tpm_eventlog.h>
-#include "tpm.h"
+#include "../tpm.h"
+#include "common.h"
/*
* calc_tpm2_event_size() - calculate the event size, where event
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index 0fc4f20b5f831..d7909ab287a85 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -40,7 +40,7 @@
#define ST33ZP24_OK 0x5A
#define ST33ZP24_UNDEFINED_ERR 0x80
#define ST33ZP24_BADLOCALITY 0x81
-#define ST33ZP24_TISREGISTER_UKNOWN 0x82
+#define ST33ZP24_TISREGISTER_UNKNOWN 0x82
#define ST33ZP24_LOCALITY_NOT_ACTIVATED 0x83
#define ST33ZP24_HASH_END_BEFORE_HASH_START 0x84
#define ST33ZP24_BAD_COMMAND_ORDER 0x85
@@ -84,7 +84,7 @@ static int st33zp24_status_to_errno(u8 code)
return 0;
case ST33ZP24_UNDEFINED_ERR:
case ST33ZP24_BADLOCALITY:
- case ST33ZP24_TISREGISTER_UKNOWN:
+ case ST33ZP24_TISREGISTER_UNKNOWN:
case ST33ZP24_LOCALITY_NOT_ACTIVATED:
case ST33ZP24_HASH_END_BEFORE_HASH_START:
case ST33ZP24_BAD_COMMAND_ORDER:
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index f95b9c75175bc..abd675bec88c8 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -373,8 +373,6 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
int ret;
u8 data;
- if (!chip)
- return -EBUSY;
if (len < TPM_HEADER_SIZE)
return -EBUSY;
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 230b992880249..e4a04b2d3c32d 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work)
struct file_priv *priv = container_of(work, struct file_priv, work);
mutex_lock(&priv->buffer_mutex);
- atomic_set(&priv->data_pending, 0);
+ priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
}
@@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
struct file_priv *priv)
{
priv->chip = chip;
- atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
INIT_WORK(&priv->work, timeout_work);
@@ -58,29 +57,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
- ssize_t ret_size;
- ssize_t orig_ret_size;
+ ssize_t ret_size = 0;
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
- ret_size = atomic_read(&priv->data_pending);
- if (ret_size > 0) { /* relay data */
- orig_ret_size = ret_size;
- if (size < ret_size)
- ret_size = size;
+ mutex_lock(&priv->buffer_mutex);
- mutex_lock(&priv->buffer_mutex);
+ if (priv->data_pending) {
+ ret_size = min_t(ssize_t, size, priv->data_pending);
rc = copy_to_user(buf, priv->data_buffer, ret_size);
- memset(priv->data_buffer, 0, orig_ret_size);
+ memset(priv->data_buffer, 0, priv->data_pending);
if (rc)
ret_size = -EFAULT;
- mutex_unlock(&priv->buffer_mutex);
+ priv->data_pending = 0;
}
- atomic_set(&priv->data_pending, 0);
-
+ mutex_unlock(&priv->buffer_mutex);
return ret_size;
}
@@ -91,17 +85,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
size_t in_size = size;
ssize_t out_size;
+ if (in_size > TPM_BUFSIZE)
+ return -E2BIG;
+
+ mutex_lock(&priv->buffer_mutex);
+
/* Cannot perform a write until the read has cleared either via
* tpm_read or a user_read_timer timeout. This also prevents split
* buffered writes from blocking here.
*/
- if (atomic_read(&priv->data_pending) != 0)
+ if (priv->data_pending != 0) {
+ mutex_unlock(&priv->buffer_mutex);
return -EBUSY;
-
- if (in_size > TPM_BUFSIZE)
- return -E2BIG;
-
- mutex_lock(&priv->buffer_mutex);
+ }
if (copy_from_user
(priv->data_buffer, (void __user *) buf, in_size)) {
@@ -132,7 +128,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return out_size;
}
- atomic_set(&priv->data_pending, out_size);
+ priv->data_pending = out_size;
mutex_unlock(&priv->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
@@ -149,5 +145,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
file->private_data = NULL;
- atomic_set(&priv->data_pending, 0);
+ priv->data_pending = 0;
}
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index ba3b6f9dacf7c..b24cfb4d3ee1e 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -8,7 +8,7 @@ struct file_priv {
struct tpm_chip *chip;
/* Data passed to and from the tpm via the read/write calls */
- atomic_t data_pending;
+ size_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index c43a9e28995eb..e32f6e85dc6d0 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -489,7 +489,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
goto out;
}
- tpm_msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT_POLL);
rmb();
} while (time_before(jiffies, stop));
@@ -587,7 +587,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
*/
if (rc == TPM2_RC_TESTING && cc == TPM2_CC_SELF_TEST)
break;
- delay_msec *= 2;
+
if (delay_msec > TPM2_DURATION_LONG) {
if (rc == TPM2_RC_RETRY)
dev_err(&chip->dev, "in retry loop\n");
@@ -597,6 +597,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
break;
}
tpm_msleep(delay_msec);
+ delay_msec *= 2;
memcpy(buf, save, save_size);
}
return ret;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 7f2d0f489e9c0..4426649e431c7 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -53,7 +53,10 @@ enum tpm_const {
enum tpm_timeout {
TPM_TIMEOUT = 5, /* msecs */
TPM_TIMEOUT_RETRY = 100, /* msecs */
- TPM_TIMEOUT_RANGE_US = 300 /* usecs */
+ TPM_TIMEOUT_RANGE_US = 300, /* usecs */
+ TPM_TIMEOUT_POLL = 1, /* msecs */
+ TPM_TIMEOUT_USECS_MIN = 100, /* usecs */
+ TPM_TIMEOUT_USECS_MAX = 500 /* usecs */
};
/* TPM addresses */
@@ -590,33 +593,6 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
u32 cc, u8 *buf, size_t *bufsiz);
-extern const struct seq_operations tpm2_binary_b_measurements_seqops;
-
-#if defined(CONFIG_ACPI)
-int tpm_read_log_acpi(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_acpi(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-#if defined(CONFIG_OF)
-int tpm_read_log_of(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_of(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-#if defined(CONFIG_EFI)
-int tpm_read_log_efi(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_efi(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-
int tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
#endif
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 4e4014eabdb9c..6122d3276f722 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -102,8 +102,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
* TPM_RC_REFERENCE_H0 means the session has been
* flushed outside the space
*/
- rc = -ENOENT;
+ *handle = 0;
tpm_buf_destroy(&tbuf);
+ return -ENOENT;
} else if (rc > 0) {
dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
__func__, rc);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 7f78482cd157b..34fbc6cb097bd 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -511,8 +511,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
sizeof(struct crb_regs_tail));
- if (IS_ERR(priv->regs_t))
- return PTR_ERR(priv->regs_t);
+ if (IS_ERR(priv->regs_t)) {
+ ret = PTR_ERR(priv->regs_t);
+ goto out_relinquish_locality;
+ }
/*
* PTT HW bug w/a: wake up the device to access
@@ -520,7 +522,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
*/
ret = crb_cmd_ready(dev, priv);
if (ret)
- return ret;
+ goto out_relinquish_locality;
pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
@@ -565,6 +567,8 @@ out:
crb_go_idle(dev, priv);
+out_relinquish_locality:
+
__crb_relinquish_locality(dev, priv, 0);
return ret;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 5a1f47b43947c..8b46aaa9e0492 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -31,12 +31,6 @@
#include "tpm.h"
#include "tpm_tis_core.h"
-/* This is a polling delay to check for status and burstcount.
- * As per ddwg input, expectation is that status check and burstcount
- * check should return within few usecs.
- */
-#define TPM_POLL_SLEEP 1 /* msec */
-
static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
@@ -90,7 +84,8 @@ again:
}
} else {
do {
- tpm_msleep(TPM_POLL_SLEEP);
+ usleep_range(TPM_TIMEOUT_USECS_MIN,
+ TPM_TIMEOUT_USECS_MAX);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
@@ -143,13 +138,58 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}
+static bool locality_inactive(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return false;
+
+ if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
+ == TPM_ACCESS_VALID)
+ return true;
+
+ return false;
+}
+
static int release_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop, timeout;
+ long rc;
tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
- return 0;
+ stop = jiffies + chip->timeout_a;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -1;
+
+ rc = wait_event_interruptible_timeout(priv->int_queue,
+ (locality_inactive(chip, l)),
+ timeout);
+
+ if (rc > 0)
+ return 0;
+
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ if (locality_inactive(chip, l))
+ return 0;
+ tpm_msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ }
+ return -1;
}
static int request_locality(struct tpm_chip *chip, int l)
@@ -234,7 +274,7 @@ static int get_burstcount(struct tpm_chip *chip)
burstcnt = (value >> 8) & 0xFFFF;
if (burstcnt)
return burstcnt;
- tpm_msleep(TPM_POLL_SLEEP);
+ usleep_range(TPM_TIMEOUT_USECS_MIN, TPM_TIMEOUT_USECS_MAX);
} while (time_before(jiffies, stop));
return -EBUSY;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 55d596f3035e5..5dfd3c17fffc9 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2078,6 +2078,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config);
* @pctldev: the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_group: name of the pin group inside the pin controller
+ *
+ * Calling this function directly from a DeviceTree-supported
+ * pinctrl driver is DEPRECATED. Please see Section 2.1 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+ * bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
int gpiochip_add_pingroup_range(struct gpio_chip *chip,
struct pinctrl_dev *pctldev,
@@ -2131,6 +2136,11 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
*
* Returns:
* 0 on success, or a negative error-code on failure.
+ *
+ * Calling this function directly from a DeviceTree-supported
+ * pinctrl driver is DEPRECATED. Please see Section 2.1 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+ * bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 3e58ed93d5b1d..2a3b8d7972b54 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -17,3 +17,10 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
+
+# 'remote-endpoint' is fixed up at run-time
+DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7791 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7793 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7795 += -Wno-graph_endpoint
+DTC_FLAGS_rcar_du_of_lvds_r8a7796 += -Wno-graph_endpoint
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 2a972ed6851b2..b03af54367c09 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -35,6 +35,17 @@ config INFINIBAND_USER_ACCESS
libibverbs, libibcm and a hardware driver library from
rdma-core <https://github.com/linux-rdma/rdma-core>.
+config INFINIBAND_USER_ACCESS_UCM
+ bool "Userspace CM (UCM, DEPRECATED)"
+ depends on BROKEN
+ depends on INFINIBAND_USER_ACCESS
+ help
+ The UCM module has known security flaws, which no one is
+ interested to fix. The user-space part of this code was
+ dropped from the upstream a long time ago.
+
+ This option is DEPRECATED and planned to be removed.
+
config INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI
bool "Allow experimental legacy verbs in new ioctl uAPI (EXPERIMENTAL)"
depends on INFINIBAND_USER_ACCESS
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index dda9e856e3fa3..61667705d7467 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -5,15 +5,16 @@ user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \
$(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
-obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
- $(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- security.o nldev.o restrack.o
+ nldev.o restrack.o
+ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
@@ -36,4 +37,4 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
rdma_core.o uverbs_std_types.o uverbs_ioctl.o \
uverbs_ioctl_merge.o uverbs_std_types_cq.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
- uverbs_std_types_mr.o
+ uverbs_std_types_mr.o uverbs_std_types_counters.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 88a7542d8c7be..4f32c4062fb68 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -56,7 +56,6 @@ struct addr_req {
struct sockaddr_storage src_addr;
struct sockaddr_storage dst_addr;
struct rdma_dev_addr *addr;
- struct rdma_addr_client *client;
void *context;
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context);
@@ -68,11 +67,8 @@ struct addr_req {
static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
-static void process_req(struct work_struct *work);
-
-static DEFINE_MUTEX(lock);
+static DEFINE_SPINLOCK(lock);
static LIST_HEAD(req_list);
-static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
@@ -112,7 +108,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
memcpy(&gid, nla_data(curr), nla_len(curr));
}
- mutex_lock(&lock);
+ spin_lock_bh(&lock);
list_for_each_entry(req, &req_list, list) {
if (nlh->nlmsg_seq != req->seq)
continue;
@@ -122,7 +118,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
found = 1;
break;
}
- mutex_unlock(&lock);
+ spin_unlock_bh(&lock);
if (!found)
pr_info("Couldn't find request waiting for DGID: %pI6\n",
@@ -223,28 +219,6 @@ int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
}
EXPORT_SYMBOL(rdma_addr_size_kss);
-static struct rdma_addr_client self;
-
-void rdma_addr_register_client(struct rdma_addr_client *client)
-{
- atomic_set(&client->refcount, 1);
- init_completion(&client->comp);
-}
-EXPORT_SYMBOL(rdma_addr_register_client);
-
-static inline void put_client(struct rdma_addr_client *client)
-{
- if (atomic_dec_and_test(&client->refcount))
- complete(&client->comp);
-}
-
-void rdma_addr_unregister_client(struct rdma_addr_client *client)
-{
- put_client(client);
- wait_for_completion(&client->comp);
-}
-EXPORT_SYMBOL(rdma_addr_unregister_client);
-
void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
const struct net_device *dev,
const unsigned char *dst_dev_addr)
@@ -302,7 +276,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
}
EXPORT_SYMBOL(rdma_translate_ip);
-static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
+static void set_timeout(struct addr_req *req, unsigned long time)
{
unsigned long delay;
@@ -310,23 +284,15 @@ static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
if ((long)delay < 0)
delay = 0;
- mod_delayed_work(addr_wq, delayed_work, delay);
+ mod_delayed_work(addr_wq, &req->work, delay);
}
static void queue_req(struct addr_req *req)
{
- struct addr_req *temp_req;
-
- mutex_lock(&lock);
- list_for_each_entry_reverse(temp_req, &req_list, list) {
- if (time_after_eq(req->timeout, temp_req->timeout))
- break;
- }
-
- list_add(&req->list, &temp_req->list);
-
- set_timeout(&req->work, req->timeout);
- mutex_unlock(&lock);
+ spin_lock_bh(&lock);
+ list_add_tail(&req->list, &req_list);
+ set_timeout(req, req->timeout);
+ spin_unlock_bh(&lock);
}
static int ib_nl_fetch_ha(const struct dst_entry *dst,
@@ -584,7 +550,6 @@ static void process_one_req(struct work_struct *_work)
struct addr_req *req;
struct sockaddr *src_in, *dst_in;
- mutex_lock(&lock);
req = container_of(_work, struct addr_req, work.work);
if (req->status == -ENODATA) {
@@ -596,72 +561,33 @@ static void process_one_req(struct work_struct *_work)
req->status = -ETIMEDOUT;
} else if (req->status == -ENODATA) {
/* requeue the work for retrying again */
- set_timeout(&req->work, req->timeout);
- mutex_unlock(&lock);
+ spin_lock_bh(&lock);
+ if (!list_empty(&req->list))
+ set_timeout(req, req->timeout);
+ spin_unlock_bh(&lock);
return;
}
}
- list_del(&req->list);
- mutex_unlock(&lock);
-
- /*
- * Although the work will normally have been canceled by the
- * workqueue, it can still be requeued as long as it is on the
- * req_list, so it could have been requeued before we grabbed &lock.
- * We need to cancel it after it is removed from req_list to really be
- * sure it is safe to free.
- */
- cancel_delayed_work(&req->work);
req->callback(req->status, (struct sockaddr *)&req->src_addr,
req->addr, req->context);
- put_client(req->client);
- kfree(req);
-}
-
-static void process_req(struct work_struct *work)
-{
- struct addr_req *req, *temp_req;
- struct sockaddr *src_in, *dst_in;
- struct list_head done_list;
-
- INIT_LIST_HEAD(&done_list);
-
- mutex_lock(&lock);
- list_for_each_entry_safe(req, temp_req, &req_list, list) {
- if (req->status == -ENODATA) {
- src_in = (struct sockaddr *) &req->src_addr;
- dst_in = (struct sockaddr *) &req->dst_addr;
- req->status = addr_resolve(src_in, dst_in, req->addr,
- true, req->seq);
- if (req->status && time_after_eq(jiffies, req->timeout))
- req->status = -ETIMEDOUT;
- else if (req->status == -ENODATA) {
- set_timeout(&req->work, req->timeout);
- continue;
- }
- }
- list_move_tail(&req->list, &done_list);
- }
-
- mutex_unlock(&lock);
-
- list_for_each_entry_safe(req, temp_req, &done_list, list) {
- list_del(&req->list);
- /* It is safe to cancel other work items from this work item
- * because at a time there can be only one work item running
- * with this single threaded work queue.
+ req->callback = NULL;
+
+ spin_lock_bh(&lock);
+ if (!list_empty(&req->list)) {
+ /*
+ * Although the work will normally have been canceled by the
+ * workqueue, it can still be requeued as long as it is on the
+ * req_list.
*/
cancel_delayed_work(&req->work);
- req->callback(req->status, (struct sockaddr *) &req->src_addr,
- req->addr, req->context);
- put_client(req->client);
+ list_del_init(&req->list);
kfree(req);
}
+ spin_unlock_bh(&lock);
}
-int rdma_resolve_ip(struct rdma_addr_client *client,
- struct sockaddr *src_addr, struct sockaddr *dst_addr,
+int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
@@ -693,8 +619,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->addr = addr;
req->callback = callback;
req->context = context;
- req->client = client;
- atomic_inc(&client->refcount);
INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
@@ -710,7 +634,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
break;
default:
ret = req->status;
- atomic_dec(&client->refcount);
goto err;
}
return ret;
@@ -742,18 +665,36 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
void rdma_addr_cancel(struct rdma_dev_addr *addr)
{
struct addr_req *req, *temp_req;
+ struct addr_req *found = NULL;
- mutex_lock(&lock);
+ spin_lock_bh(&lock);
list_for_each_entry_safe(req, temp_req, &req_list, list) {
if (req->addr == addr) {
- req->status = -ECANCELED;
- req->timeout = jiffies;
- list_move(&req->list, &req_list);
- set_timeout(&req->work, req->timeout);
+ /*
+ * Removing from the list means we take ownership of
+ * the req
+ */
+ list_del_init(&req->list);
+ found = req;
break;
}
}
- mutex_unlock(&lock);
+ spin_unlock_bh(&lock);
+
+ if (!found)
+ return;
+
+ /*
+ * sync canceling the work after removing it from the req_list
+ * guarentees no work is running and none will be started.
+ */
+ cancel_delayed_work_sync(&found->work);
+
+ if (found->callback)
+ found->callback(-ECANCELED, (struct sockaddr *)&found->src_addr,
+ found->addr, found->context);
+
+ kfree(found);
}
EXPORT_SYMBOL(rdma_addr_cancel);
@@ -791,8 +732,8 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
dev_addr.net = &init_net;
init_completion(&ctx.comp);
- ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, &ctx);
+ ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr,
+ &dev_addr, 1000, resolve_cb, &ctx);
if (ret)
return ret;
@@ -810,11 +751,17 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
{
+ struct addr_req *req;
+
if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
- if (neigh->nud_state & NUD_VALID)
- set_timeout(&work, jiffies);
+ if (neigh->nud_state & NUD_VALID) {
+ spin_lock_bh(&lock);
+ list_for_each_entry(req, &req_list, list)
+ set_timeout(req, jiffies);
+ spin_unlock_bh(&lock);
+ }
}
return 0;
}
@@ -830,14 +777,13 @@ int addr_init(void)
return -ENOMEM;
register_netevent_notifier(&nb);
- rdma_addr_register_client(&self);
return 0;
}
void addr_cleanup(void)
{
- rdma_addr_unregister_client(&self);
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
+ WARN_ON(!list_empty(&req_list));
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3330d97faa1e5..71a34bee453d8 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -125,6 +125,16 @@ const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
}
EXPORT_SYMBOL(ib_cache_gid_type_str);
+/** rdma_is_zero_gid - Check if given GID is zero or not.
+ * @gid: GID to check
+ * Returns true if given GID is zero, returns false otherwise.
+ */
+bool rdma_is_zero_gid(const union ib_gid *gid)
+{
+ return !memcmp(gid, &zgid, sizeof(*gid));
+}
+EXPORT_SYMBOL(rdma_is_zero_gid);
+
int ib_cache_gid_parse_type_str(const char *buf)
{
unsigned int i;
@@ -149,6 +159,11 @@ int ib_cache_gid_parse_type_str(const char *buf)
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
+static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
+{
+ return device->cache.ports[port - rdma_start_port(device)].gid;
+}
+
static void del_roce_gid(struct ib_device *device, u8 port_num,
struct ib_gid_table *table, int ix)
{
@@ -231,7 +246,7 @@ static int add_modify_gid(struct ib_gid_table *table,
* So ignore such behavior for IB link layer and don't
* fail the call, but don't add such entry to GID cache.
*/
- if (!memcmp(gid, &zgid, sizeof(*gid)))
+ if (rdma_is_zero_gid(gid))
return 0;
}
@@ -264,7 +279,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
if (rdma_protocol_roce(ib_dev, port))
del_roce_gid(ib_dev, port, table, ix);
- memcpy(&table->data_vec[ix].gid, &zgid, sizeof(zgid));
+ memset(&table->data_vec[ix].gid, 0, sizeof(table->data_vec[ix].gid));
memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr));
table->data_vec[ix].context = NULL;
}
@@ -363,10 +378,10 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
* IB spec version 1.3 section 4.1.1 point (6) and
* section 12.7.10 and section 12.7.20
*/
- if (!memcmp(gid, &zgid, sizeof(*gid)))
+ if (rdma_is_zero_gid(gid))
return -EINVAL;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
@@ -433,7 +448,7 @@ _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
int ret = 0;
int ix;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
@@ -472,7 +487,7 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
int ix;
bool deleted = false;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock);
@@ -496,7 +511,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
{
struct ib_gid_table *table;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
if (index < 0 || index >= table->sz)
return -EINVAL;
@@ -589,7 +604,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
if (!rdma_is_port_valid(ib_dev, port))
return -ENOENT;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
if (ndev)
mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -647,7 +662,7 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
!rdma_protocol_roce(ib_dev, port))
return -EPROTONOSUPPORT;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) {
@@ -724,8 +739,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (memcmp(&table->data_vec[i].gid, &zgid,
- sizeof(table->data_vec[i].gid))) {
+ if (!rdma_is_zero_gid(&table->data_vec[i].gid)) {
del_gid(ib_dev, port, table, i);
deleted = true;
}
@@ -747,7 +761,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
unsigned int gid_type;
unsigned long mask;
- table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+ table = rdma_gid_table(ib_dev, port);
mask = GID_ATTR_FIND_MASK_GID_TYPE |
GID_ATTR_FIND_MASK_DEFAULT |
@@ -772,8 +786,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
}
}
-static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table)
+static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table)
{
unsigned int i;
unsigned long roce_gid_type_mask;
@@ -783,8 +797,7 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
num_default_gids = hweight_long(roce_gid_type_mask);
for (i = 0; i < num_default_gids && i < table->sz; i++) {
- struct ib_gid_table_entry *entry =
- &table->data_vec[i];
+ struct ib_gid_table_entry *entry = &table->data_vec[i];
entry->props |= GID_TABLE_ENTRY_DEFAULT;
current_gid = find_next_bit(&roce_gid_type_mask,
@@ -792,59 +805,42 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
current_gid);
entry->attr.gid_type = current_gid++;
}
+}
- return 0;
+
+static void gid_table_release_one(struct ib_device *ib_dev)
+{
+ struct ib_gid_table *table;
+ u8 port;
+
+ for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+ table = ib_dev->cache.ports[port].gid;
+ release_gid_table(table);
+ ib_dev->cache.ports[port].gid = NULL;
+ }
}
static int _gid_table_setup_one(struct ib_device *ib_dev)
{
u8 port;
struct ib_gid_table *table;
- int err = 0;
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
u8 rdma_port = port + rdma_start_port(ib_dev);
- table =
- alloc_gid_table(
+ table = alloc_gid_table(
ib_dev->port_immutable[rdma_port].gid_tbl_len);
- if (!table) {
- err = -ENOMEM;
+ if (!table)
goto rollback_table_setup;
- }
- err = gid_table_reserve_default(ib_dev,
- port + rdma_start_port(ib_dev),
- table);
- if (err)
- goto rollback_table_setup;
+ gid_table_reserve_default(ib_dev, rdma_port, table);
ib_dev->cache.ports[port].gid = table;
}
-
return 0;
rollback_table_setup:
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- table = ib_dev->cache.ports[port].gid;
-
- cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
- table);
- release_gid_table(table);
- }
-
- return err;
-}
-
-static void gid_table_release_one(struct ib_device *ib_dev)
-{
- struct ib_gid_table *table;
- u8 port;
-
- for (port = 0; port < ib_dev->phys_port_cnt; port++) {
- table = ib_dev->cache.ports[port].gid;
- release_gid_table(table);
- ib_dev->cache.ports[port].gid = NULL;
- }
+ gid_table_release_one(ib_dev);
+ return -ENOMEM;
}
static void gid_table_cleanup_one(struct ib_device *ib_dev)
@@ -886,7 +882,7 @@ int ib_get_cached_gid(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- table = device->cache.ports[port_num - rdma_start_port(device)].gid;
+ table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
read_unlock_irqrestore(&table->rwlock, flags);
@@ -1104,7 +1100,7 @@ static int config_non_roce_gid_cache(struct ib_device *device,
gid_attr.device = device;
gid_attr.port_num = port;
- table = device->cache.ports[port - rdma_start_port(device)].gid;
+ table = rdma_gid_table(device, port);
mutex_lock(&table->lock);
for (i = 0; i < gid_tbl_len; ++i) {
@@ -1137,7 +1133,7 @@ static void ib_cache_update(struct ib_device *device,
if (!rdma_is_port_valid(device, port))
return;
- table = device->cache.ports[port - rdma_start_port(device)].gid;
+ table = rdma_gid_table(device, port);
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
@@ -1300,13 +1296,3 @@ void ib_cache_cleanup_one(struct ib_device *device)
flush_workqueue(ib_wq);
gid_table_cleanup_one(device);
}
-
-void __init ib_cache_setup(void)
-{
- roce_gid_mgmt_init();
-}
-
-void __exit ib_cache_cleanup(void)
-{
- roce_gid_mgmt_cleanup();
-}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 36a4d90a7b47b..27a7b0a2e27a7 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -452,6 +452,32 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
cm_id_priv->private_data_len = private_data_len;
}
+static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
+ struct ib_grh *grh, struct cm_av *av)
+{
+ struct rdma_ah_attr new_ah_attr;
+ int ret;
+
+ av->port = port;
+ av->pkey_index = wc->pkey_index;
+
+ /*
+ * av->ah_attr might be initialized based on past wc during incoming
+ * connect request or while sending out connect request. So initialize
+ * a new ah_attr on stack. If initialization fails, old ah_attr is
+ * used for sending any responses. If initialization is successful,
+ * than new ah_attr is used by overwriting old one.
+ */
+ ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
+ port->port_num, wc,
+ grh, &new_ah_attr);
+ if (ret)
+ return ret;
+
+ memcpy(&av->ah_attr, &new_ah_attr, sizeof(new_ah_attr));
+ return 0;
+}
+
static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
struct ib_grh *grh, struct cm_av *av)
{
@@ -509,6 +535,7 @@ static struct cm_port *get_cm_port_from_path(struct sa_path_rec *path)
static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
struct cm_id_private *cm_id_priv)
{
+ struct rdma_ah_attr new_ah_attr;
struct cm_device *cm_dev;
struct cm_port *port;
int ret;
@@ -524,15 +551,26 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
return ret;
av->port = port;
+
+ /*
+ * av->ah_attr might be initialized based on wc or during
+ * request processing time. So initialize a new ah_attr on stack.
+ * If initialization fails, old ah_attr is used for sending any
+ * responses. If initialization is successful, than new ah_attr
+ * is used by overwriting the old one.
+ */
ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
- &av->ah_attr);
+ &new_ah_attr);
if (ret)
return ret;
av->timeout = path->packet_life_time + 1;
ret = add_cm_id_to_port_list(cm_id_priv, av, port);
- return ret;
+ if (ret)
+ return ret;
+ memcpy(&av->ah_attr, &new_ah_attr, sizeof(new_ah_attr));
+ return 0;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -1669,7 +1707,9 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
spin_lock_irq(&cm_id_priv->lock);
work = cm_dequeue_work(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
- BUG_ON(!work);
+ if (!work)
+ return;
+
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
&work->cm_event);
cm_free_work(work);
@@ -3189,12 +3229,6 @@ static int cm_lap_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL;
- ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
- if (ret)
- goto deref;
-
param = &work->cm_event.param.lap_rcvd;
memset(&work->path[0], 0, sizeof(work->path[1]));
cm_path_set_rec_type(work->port->cm_dev->ib_device,
@@ -3239,10 +3273,16 @@ static int cm_lap_handler(struct cm_work *work)
goto unlock;
}
- cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
- cm_id_priv->tid = lap_msg->hdr.tid;
+ ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto unlock;
+
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
cm_id_priv);
+ cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
+ cm_id_priv->tid = lap_msg->hdr.tid;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a693fcd4c513a..6813ee717a38e 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -146,6 +146,34 @@ const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
}
EXPORT_SYMBOL(rdma_consumer_reject_data);
+/**
+ * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
+ * @id: Communication Identifier
+ */
+struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
+{
+ struct rdma_id_private *id_priv;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (id->device->node_type == RDMA_NODE_RNIC)
+ return id_priv->cm_id.iw;
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_iw_cm_id);
+
+/**
+ * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
+ * @res: rdma resource tracking entry pointer
+ */
+struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
+{
+ struct rdma_id_private *id_priv =
+ container_of(res, struct rdma_id_private, res);
+
+ return &id_priv->id;
+}
+EXPORT_SYMBOL(rdma_res_to_id);
+
static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
@@ -156,7 +184,6 @@ static struct ib_client cma_client = {
};
static struct ib_sa_client sa_client;
-static struct rdma_addr_client addr_client;
static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
@@ -2103,7 +2130,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
event.param.conn.responder_resources = iw_event->ord;
break;
default:
- BUG_ON(1);
+ goto out;
}
event.status = iw_event->status;
@@ -2936,7 +2963,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
- ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
+ ret = rdma_resolve_ip(cma_src_addr(id_priv),
dst_addr, &id->route.addr.dev_addr,
timeout_ms, addr_handler, id_priv);
}
@@ -4573,7 +4600,6 @@ static int __init cma_init(void)
goto err_wq;
ib_sa_register_client(&sa_client);
- rdma_addr_register_client(&addr_client);
register_netdevice_notifier(&cma_nb);
ret = ib_register_client(&cma_client);
@@ -4587,7 +4613,6 @@ static int __init cma_init(void)
err:
unregister_netdevice_notifier(&cma_nb);
- rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
err_wq:
destroy_workqueue(cma_wq);
@@ -4600,7 +4625,6 @@ static void __exit cma_cleanup(void)
rdma_nl_unregister(RDMA_NL_RDMA_CM);
ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb);
- rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
unregister_pernet_subsys(&cma_pernet_operations);
destroy_workqueue(cma_wq);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 54163a6e4067f..fae417a391fb2 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -88,9 +88,6 @@ int ib_device_register_sysfs(struct ib_device *device,
u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
-void ib_cache_setup(void);
-void ib_cache_cleanup(void);
-
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ea9fbcfb21bd4..84f51386e1e30 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1225,7 +1225,7 @@ static int __init ib_core_init(void)
nldev_init();
rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
- ib_cache_setup();
+ roce_gid_mgmt_init();
return 0;
@@ -1248,7 +1248,7 @@ err:
static void __exit ib_core_cleanup(void)
{
- ib_cache_cleanup();
+ roce_gid_mgmt_cleanup();
nldev_exit();
rdma_nl_unregister(RDMA_NL_LS);
unregister_lsm_notifier(&ibdev_lsm_nb);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b28452a55a08f..f742ae7a768b2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -651,7 +651,6 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
struct ib_mad_queue *mad_queue;
unsigned long flags;
- BUG_ON(!mad_list->mad_queue);
mad_queue = mad_list->mad_queue;
spin_lock_irqsave(&mad_queue->lock, flags);
list_del(&mad_list->list);
@@ -1557,7 +1556,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
mad_reg_req->oui, 3)) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i];
- BUG_ON(!*method);
+ if (!*method)
+ goto error3;
goto check_in_use;
}
}
@@ -1567,10 +1567,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
vclass]->oui[i])) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i];
- BUG_ON(*method);
/* Allocate method table for this OUI */
- if ((ret = allocate_method_table(method)))
- goto error3;
+ if (!*method) {
+ ret = allocate_method_table(method);
+ if (ret)
+ goto error3;
+ }
memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
mad_reg_req->oui, 3);
goto check_in_use;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index eb567765f45c7..340c7bea45ab2 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -98,8 +98,83 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
.len = IFNAMSIZ },
+ [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
+ .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
+ [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
+ [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
+ [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
};
+static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type)
+{
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
+ return -EMSGSIZE;
+ if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
+ nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type,
+ u32 value)
+{
+ if (put_driver_name_print_type(msg, name, print_type))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
+ enum rdma_nldev_print_type print_type,
+ u64 value)
+{
+ if (put_driver_name_print_type(msg, name, print_type))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
+ RDMA_NLDEV_ATTR_PAD))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
+{
+ return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u32);
+
+int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
+ u32 value)
+{
+ return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
+
+int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
+{
+ return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u64);
+
+int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
+{
+ return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
+ value);
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
+
static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
@@ -122,7 +197,8 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
- device->attrs.device_cap_flags, 0))
+ device->attrs.device_cap_flags,
+ RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
ib_get_device_fw_str(device, fw);
@@ -131,10 +207,12 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
return -EMSGSIZE;
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
- be64_to_cpu(device->node_guid), 0))
+ be64_to_cpu(device->node_guid),
+ RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
- be64_to_cpu(device->attrs.sys_image_guid), 0))
+ be64_to_cpu(device->attrs.sys_image_guid),
+ RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
return -EMSGSIZE;
@@ -161,11 +239,11 @@ static int fill_port_info(struct sk_buff *msg,
BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
- (u64)attr.port_cap_flags, 0))
+ (u64)attr.port_cap_flags, RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
if (rdma_protocol_ib(device, port) &&
nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
- attr.subnet_prefix, 0))
+ attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
return -EMSGSIZE;
if (rdma_protocol_ib(device, port)) {
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
@@ -209,8 +287,8 @@ static int fill_res_info_entry(struct sk_buff *msg,
if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
goto err;
- if (nla_put_u64_64bit(msg,
- RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 0))
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
+ RDMA_NLDEV_ATTR_PAD))
goto err;
nla_nest_end(msg, entry_attr);
@@ -282,6 +360,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_qp *qp = container_of(res, struct ib_qp, res);
+ struct rdma_restrack_root *resroot = &qp->device->res;
struct ib_qp_init_attr qp_init_attr;
struct nlattr *entry_attr;
struct ib_qp_attr qp_attr;
@@ -331,6 +410,9 @@ static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
@@ -346,6 +428,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg,
{
struct rdma_id_private *id_priv =
container_of(res, struct rdma_id_private, res);
+ struct rdma_restrack_root *resroot = &id_priv->id.device->res;
struct rdma_cm_id *cm_id = &id_priv->id;
struct nlattr *entry_attr;
@@ -387,6 +470,9 @@ static int fill_res_cm_id_entry(struct sk_buff *msg,
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
@@ -400,6 +486,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_cq *cq = container_of(res, struct ib_cq, res);
+ struct rdma_restrack_root *resroot = &cq->device->res;
struct nlattr *entry_attr;
entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
@@ -409,7 +496,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
goto err;
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
- atomic_read(&cq->usecnt), 0))
+ atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err;
/* Poll context is only valid for kernel CQs */
@@ -420,6 +507,9 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
@@ -433,6 +523,7 @@ static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct rdma_restrack_root *resroot = &mr->pd->device->res;
struct nlattr *entry_attr;
entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
@@ -444,17 +535,18 @@ static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
goto err;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
goto err;
- if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_IOVA,
- mr->iova, 0))
- goto err;
}
- if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 0))
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
+ RDMA_NLDEV_ATTR_PAD))
goto err;
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
@@ -468,6 +560,7 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_pd *pd = container_of(res, struct ib_pd, res);
+ struct rdma_restrack_root *resroot = &pd->device->res;
struct nlattr *entry_attr;
entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
@@ -484,7 +577,7 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
goto err;
}
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
- atomic_read(&pd->usecnt), 0))
+ atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err;
if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
@@ -494,6 +587,9 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (fill_res_name_pid(msg, res))
goto err;
+ if (resroot->fill_res_entry(msg, res))
+ goto err;
+
nla_nest_end(msg, entry_attr);
return 0;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index efddd13e3edbe..3b7fa0ccaa08a 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
*/
@@ -12,9 +12,16 @@
#include "cma_priv.h"
+static int fill_res_noop(struct sk_buff *msg,
+ struct rdma_restrack_entry *entry)
+{
+ return 0;
+}
+
void rdma_restrack_init(struct rdma_restrack_root *res)
{
init_rwsem(&res->rwsem);
+ res->fill_res_entry = fill_res_noop;
}
static const char *type2str(enum rdma_restrack_type type)
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index c0e4fd55e2ccb..a4fbdc5d28fa0 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -44,8 +44,6 @@
static struct workqueue_struct *gid_cache_wq;
-static struct workqueue_struct *gid_cache_wq;
-
enum gid_op_type {
GID_DEL = 0,
GID_ADD
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index b61dda6b04fc9..9b0bea8303e07 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -30,8 +30,6 @@
* SOFTWARE.
*/
-#ifdef CONFIG_SECURITY_INFINIBAND
-
#include <linux/security.h>
#include <linux/completion.h>
#include <linux/list.h>
@@ -751,5 +749,3 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
pkey_index,
map->agent.security);
}
-
-#endif /* CONFIG_SECURITY_INFINIBAND */
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index eab43b17e9cf2..ec8fb289621fb 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -235,7 +235,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
return NULL;
mutex_lock(&mut);
- mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
+ mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
mutex_unlock(&mut);
if (mc->id < 0)
goto error;
@@ -1421,6 +1421,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
goto err3;
}
+ mutex_lock(&mut);
+ idr_replace(&multicast_idr, mc, mc->id);
+ mutex_unlock(&mut);
+
mutex_unlock(&file->mut);
ucma_put_ctx(ctx);
return 0;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2b6c9b5160705..54ab6335c48d2 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -64,8 +64,6 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
}
sg_free_table(&umem->sg_head);
- return;
-
}
/**
@@ -119,16 +117,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
- /*
- * We ask for writable memory if any of the following
- * access flags are set. "Local write" and "remote write"
- * obviously require write access. "Remote atomic" can do
- * things like fetch and add, which will modify memory, and
- * "MW bind" can change permissions by binding a window.
- */
- umem->writable = !!(access &
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+ umem->writable = ib_access_writable(access);
if (access & IB_ACCESS_ON_DEMAND) {
ret = ib_umem_odp_get(context, umem, access);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index cfb51618ab7a7..c0d40fc3a53a6 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -263,6 +263,7 @@ struct ib_uverbs_flow_spec {
struct ib_uverbs_flow_spec_action_tag flow_tag;
struct ib_uverbs_flow_spec_action_drop drop;
struct ib_uverbs_flow_spec_action_handle action;
+ struct ib_uverbs_flow_spec_action_count flow_count;
};
};
@@ -287,6 +288,7 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL);
extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_XRCD);
extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION);
extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DM);
+extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS);
#define IB_UVERBS_DECLARE_CMD(name) \
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e3662a8ee4655..3179a95c6f5ea 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2748,43 +2748,82 @@ out_put:
struct ib_uflow_resources {
size_t max;
size_t num;
- struct ib_flow_action *collection[0];
+ size_t collection_num;
+ size_t counters_num;
+ struct ib_counters **counters;
+ struct ib_flow_action **collection;
};
static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
{
struct ib_uflow_resources *resources;
- resources =
- kmalloc(struct_size(resources, collection, num_specs),
- GFP_KERNEL);
+ resources = kzalloc(sizeof(*resources), GFP_KERNEL);
if (!resources)
- return NULL;
+ goto err_res;
+
+ resources->counters =
+ kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
+
+ if (!resources->counters)
+ goto err_cnt;
+
+ resources->collection =
+ kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
+
+ if (!resources->collection)
+ goto err_collection;
- resources->num = 0;
resources->max = num_specs;
return resources;
+
+err_collection:
+ kfree(resources->counters);
+err_cnt:
+ kfree(resources);
+err_res:
+ return NULL;
}
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
unsigned int i;
- for (i = 0; i < uflow_res->num; i++)
+ for (i = 0; i < uflow_res->collection_num; i++)
atomic_dec(&uflow_res->collection[i]->usecnt);
+ for (i = 0; i < uflow_res->counters_num; i++)
+ atomic_dec(&uflow_res->counters[i]->usecnt);
+
+ kfree(uflow_res->collection);
+ kfree(uflow_res->counters);
kfree(uflow_res);
}
static void flow_resources_add(struct ib_uflow_resources *uflow_res,
- struct ib_flow_action *action)
+ enum ib_flow_spec_type type,
+ void *ibobj)
{
WARN_ON(uflow_res->num >= uflow_res->max);
- atomic_inc(&action->usecnt);
- uflow_res->collection[uflow_res->num++] = action;
+ switch (type) {
+ case IB_FLOW_SPEC_ACTION_HANDLE:
+ atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
+ uflow_res->collection[uflow_res->collection_num++] =
+ (struct ib_flow_action *)ibobj;
+ break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
+ uflow_res->counters[uflow_res->counters_num++] =
+ (struct ib_counters *)ibobj;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ uflow_res->num++;
}
static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
@@ -2821,9 +2860,29 @@ static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
return -EINVAL;
ib_spec->action.size =
sizeof(struct ib_flow_spec_action_handle);
- flow_resources_add(uflow_res, ib_spec->action.act);
+ flow_resources_add(uflow_res,
+ IB_FLOW_SPEC_ACTION_HANDLE,
+ ib_spec->action.act);
uobj_put_obj_read(ib_spec->action.act);
break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ if (kern_spec->flow_count.size !=
+ sizeof(struct ib_uverbs_flow_spec_action_count))
+ return -EINVAL;
+ ib_spec->flow_count.counters =
+ uobj_get_obj_read(counters,
+ UVERBS_OBJECT_COUNTERS,
+ kern_spec->flow_count.handle,
+ ucontext);
+ if (!ib_spec->flow_count.counters)
+ return -EINVAL;
+ ib_spec->flow_count.size =
+ sizeof(struct ib_flow_spec_action_count);
+ flow_resources_add(uflow_res,
+ IB_FLOW_SPEC_ACTION_COUNT,
+ ib_spec->flow_count.counters);
+ uobj_put_obj_read(ib_spec->flow_count.counters);
+ break;
default:
return -EINVAL;
}
@@ -2948,6 +3007,28 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
break;
+ case IB_FLOW_SPEC_GRE:
+ ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
+ memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
+ break;
+ case IB_FLOW_SPEC_MPLS:
+ ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
+ memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
+ break;
default:
return -EINVAL;
}
@@ -3507,6 +3588,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
uflow_res);
if (err)
goto err_free;
+
flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size;
cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
@@ -3519,11 +3601,16 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
err = -EINVAL;
goto err_free;
}
- flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
+
+ flow_id = qp->device->create_flow(qp, flow_attr,
+ IB_FLOW_DOMAIN_USER, uhw);
+
if (IS_ERR(flow_id)) {
err = PTR_ERR(flow_id);
goto err_free;
}
+ atomic_inc(&qp->usecnt);
+ flow_id->qp = qp;
flow_id->uobject = uobj;
uobj->object = flow_id;
uflow = container_of(uobj, typeof(*uflow), uobject);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 4445d8ee93144..3ae2339dd27a9 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -41,6 +41,8 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
#include <linux/file.h>
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
@@ -1090,6 +1092,44 @@ err:
return;
}
+static void ib_uverbs_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct ib_device *ib_dev = ibcontext->device;
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+ if (!owning_process)
+ return;
+
+ owning_mm = get_task_mm(owning_process);
+ if (!owning_mm) {
+ pr_info("no mm, disassociate ucontext is pending task termination\n");
+ while (1) {
+ put_task_struct(owning_process);
+ usleep_range(1000, 2000);
+ owning_process = get_pid_task(ibcontext->tgid,
+ PIDTYPE_PID);
+ if (!owning_process ||
+ owning_process->state == TASK_DEAD) {
+ pr_info("disassociate ucontext done, task was terminated\n");
+ /* in case task was dead need to release the
+ * task struct.
+ */
+ if (owning_process)
+ put_task_struct(owning_process);
+ return;
+ }
+ }
+ }
+
+ down_write(&owning_mm->mmap_sem);
+ ib_dev->disassociate_ucontext(ibcontext);
+ up_write(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
+}
+
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
struct ib_device *ib_dev)
{
@@ -1130,7 +1170,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
* (e.g mmput).
*/
ib_uverbs_event_handler(&file->event_handler, &event);
- ib_dev->disassociate_ucontext(ucontext);
+ ib_uverbs_disassociate_ucontext(ucontext);
mutex_lock(&file->cleanup_mutex);
ib_uverbs_cleanup_ucontext(file, ucontext, true);
mutex_unlock(&file->cleanup_mutex);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 569f48bd821e9..b570acbd94af2 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -302,7 +302,8 @@ static DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
&UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
&UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
&UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION),
- &UVERBS_OBJECT(UVERBS_OBJECT_DM));
+ &UVERBS_OBJECT(UVERBS_OBJECT_DM),
+ &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS));
const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
{
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
new file mode 100644
index 0000000000000..03b182a684a64
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "uverbs.h"
+#include <rdma/uverbs_std_types.h>
+
+static int uverbs_free_counters(struct ib_uobject *uobject,
+ enum rdma_remove_reason why)
+{
+ struct ib_counters *counters = uobject->object;
+
+ if (why == RDMA_REMOVE_DESTROY &&
+ atomic_read(&counters->usecnt))
+ return -EBUSY;
+
+ return counters->device->destroy_counters(counters);
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(struct ib_device *ib_dev,
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_counters *counters;
+ struct ib_uobject *uobj;
+ int ret;
+
+ /*
+ * This check should be removed once the infrastructure
+ * have the ability to remove methods from parse tree once
+ * such condition is met.
+ */
+ if (!ib_dev->create_counters)
+ return -EOPNOTSUPP;
+
+ uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
+ counters = ib_dev->create_counters(ib_dev, attrs);
+ if (IS_ERR(counters)) {
+ ret = PTR_ERR(counters);
+ goto err_create_counters;
+ }
+
+ counters->device = ib_dev;
+ counters->uobject = uobj;
+ uobj->object = counters;
+ atomic_set(&counters->usecnt, 0);
+
+ return 0;
+
+err_create_counters:
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(struct ib_device *ib_dev,
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_counters_read_attr read_attr = {};
+ const struct uverbs_attr *uattr;
+ struct ib_counters *counters =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE);
+ int ret;
+
+ if (!ib_dev->read_counters)
+ return -EOPNOTSUPP;
+
+ if (!atomic_read(&counters->usecnt))
+ return -EINVAL;
+
+ ret = uverbs_copy_from(&read_attr.flags, attrs,
+ UVERBS_ATTR_READ_COUNTERS_FLAGS);
+ if (ret)
+ return ret;
+
+ uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
+ read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
+ read_attr.counters_buff = kcalloc(read_attr.ncounters,
+ sizeof(u64), GFP_KERNEL);
+ if (!read_attr.counters_buff)
+ return -ENOMEM;
+
+ ret = ib_dev->read_counters(counters,
+ &read_attr,
+ attrs);
+ if (ret)
+ goto err_read;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF,
+ read_attr.counters_buff,
+ read_attr.ncounters * sizeof(u64));
+
+err_read:
+ kfree(read_attr.counters_buff);
+ return ret;
+}
+
+static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_CREATE,
+ &UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_NEW,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+
+static DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(UVERBS_METHOD_COUNTERS_DESTROY,
+ uverbs_destroy_def_handler,
+ &UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_DESTROY,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+
+#define MAX_COUNTERS_BUFF_SIZE USHRT_MAX
+static DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_COUNTERS_READ,
+ &UVERBS_ATTR_IDR(UVERBS_ATTR_READ_COUNTERS_HANDLE,
+ UVERBS_OBJECT_COUNTERS,
+ UVERBS_ACCESS_READ,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_READ_COUNTERS_BUFF,
+ UVERBS_ATTR_SIZE(0, MAX_COUNTERS_BUFF_SIZE),
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_PTR_IN(UVERBS_ATTR_READ_COUNTERS_FLAGS,
+ UVERBS_ATTR_TYPE(__u32),
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+
+DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS,
+ &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_counters),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY),
+ &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ));
+
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index b0dbae9dd0d75..3d293d01afea4 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -65,7 +65,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
struct ib_cq_init_attr attr = {};
struct ib_cq *cq;
struct ib_uverbs_completion_event_file *ev_file = NULL;
- const struct uverbs_attr *ev_file_attr;
struct ib_uobject *ev_file_uobj;
if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
@@ -87,10 +86,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
UVERBS_ATTR_CREATE_CQ_FLAGS)))
return -EFAULT;
- ev_file_attr = uverbs_attr_get(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
- if (!IS_ERR(ev_file_attr)) {
- ev_file_uobj = ev_file_attr->obj_attr.uobject;
-
+ ev_file_uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL);
+ if (!IS_ERR(ev_file_uobj)) {
ev_file = container_of(ev_file_uobj,
struct ib_uverbs_completion_event_file,
uobj_file.uobj);
@@ -102,8 +99,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(struct ib_device *ib_dev,
goto err_event_file;
}
- obj = container_of(uverbs_attr_get(attrs,
- UVERBS_ATTR_CREATE_CQ_HANDLE)->obj_attr.uobject,
+ obj = container_of(uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_CQ_HANDLE),
typeof(*obj), uobject);
obj->uverbs_file = ucontext->ufile;
obj->comp_events_reported = 0;
@@ -170,13 +167,17 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(struct ib_device *ib_dev,
struct ib_uverbs_file *file,
struct uverbs_attr_bundle *attrs)
{
- struct ib_uverbs_destroy_cq_resp resp;
struct ib_uobject *uobj =
- uverbs_attr_get(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE)->obj_attr.uobject;
- struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object,
- uobject);
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE);
+ struct ib_uverbs_destroy_cq_resp resp;
+ struct ib_ucq_object *obj;
int ret;
+ if (IS_ERR(uobj))
+ return PTR_ERR(uobj);
+
+ obj = container_of(uobj, struct ib_ucq_object, uobject);
+
if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index b4f016dfa23db..a7be51cf2e42c 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -320,7 +320,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(struct ib_device
return ret;
/* No need to check as this attribute is marked as MANDATORY */
- uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject;
+ uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs);
if (IS_ERR(action))
return PTR_ERR(action);
@@ -350,7 +350,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device
if (ret)
return ret;
- uobj = uverbs_attr_get(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE)->obj_attr.uobject;
+ uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE);
action = uobj->object;
if (action->type != IB_FLOW_ACTION_ESP)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 6ddfb1fade79a..0b56828c1319b 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1983,7 +1983,7 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
if (!qp->device->create_flow)
return ERR_PTR(-EOPNOTSUPP);
- flow_id = qp->device->create_flow(qp, flow_attr, domain);
+ flow_id = qp->device->create_flow(qp, flow_attr, domain, NULL);
if (!IS_ERR(flow_id)) {
atomic_inc(&qp->usecnt);
flow_id->qp = qp;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 837862287a298..c69bc4f520491 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -162,7 +162,6 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
spin_unlock_irq(&rhp->lock);
idr_preload_end();
- BUG_ON(ret == -ENOSPC);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index 0a671a61fc926..e0522a5d5a066 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET
+ depends on INFINIBAND_ADDR_TRANS
select CHELSIO_LIB
select GENERIC_ALLOCATOR
---help---
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index fa40b685831b3..9edd92023e18f 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -3,4 +3,5 @@ ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
-iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
+iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o \
+ restrack.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 4cf17c650c36d..0912fa026327b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3210,6 +3210,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
+ cm_id->provider_data = ep;
ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 8310277171211..870649ff049cd 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -55,6 +55,7 @@
#include <rdma/iw_cm.h>
#include <rdma/rdma_netlink.h>
#include <rdma/iw_portmap.h>
+#include <rdma/restrack.h>
#include "cxgb4.h"
#include "cxgb4_uld.h"
@@ -1082,4 +1083,8 @@ extern int use_dsgl;
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
+typedef int c4iw_restrack_func(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
+extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
+
#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 0b9cc73c3ded4..1feade8bb4b3b 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -551,6 +551,13 @@ static struct net_device *get_netdev(struct ib_device *dev, u8 port)
return ndev;
}
+static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res)
+{
+ return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) &&
+ c4iw_restrack_funcs[res->type]) ?
+ c4iw_restrack_funcs[res->type](msg, res) : 0;
+}
+
void c4iw_register_device(struct work_struct *work)
{
int ret;
@@ -645,6 +652,7 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+ dev->ibdev.res.fill_res_entry = fill_res_entry;
memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iwcm->ifname));
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index ae167b6866086..4106eed1b8fbd 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1297,8 +1297,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
- wqe = __skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID_V(qhp->ep->hwtid) |
@@ -1421,8 +1420,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- wqe = __skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(
FW_WR_OP_V(FW_RI_INIT_WR) |
FW_WR_COMPL_F);
@@ -1487,8 +1485,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
}
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
- wqe = __skb_put(skb, sizeof(*wqe));
- memset(wqe, 0, sizeof *wqe);
+ wqe = __skb_put_zero(skb, sizeof(*wqe));
wqe->op_compl = cpu_to_be32(
FW_WR_OP_V(FW_RI_INIT_WR) |
FW_WR_COMPL_F);
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
new file mode 100644
index 0000000000000..9a7520ee41e0f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/rdma_cm.h>
+
+#include "iw_cxgb4.h"
+#include <rdma/restrack.h>
+#include <uapi/rdma/rdma_netlink.h>
+
+static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
+{
+ /* WQ+SQ */
+ if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
+{
+ /* RQ */
+ if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx,
+ struct t4_swsqe *sqe)
+{
+ if (rdma_nl_put_driver_u32(msg, "idx", idx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
+ goto err;
+ if (sqe->complete &&
+ rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
+ goto err;
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+/*
+ * Dump the first and last pending sqes.
+ */
+static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq,
+ u16 first_idx, struct t4_swsqe *first_sqe,
+ u16 last_idx, struct t4_swsqe *last_sqe)
+{
+ if (!first_sqe)
+ goto out;
+ if (fill_swsqe(msg, sq, first_idx, first_sqe))
+ goto err;
+ if (!last_sqe)
+ goto out;
+ if (fill_swsqe(msg, sq, last_idx, last_sqe))
+ goto err;
+out:
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_qp *ibqp = container_of(res, struct ib_qp, res);
+ struct t4_swsqe *fsp = NULL, *lsp = NULL;
+ struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
+ u16 first_sq_idx = 0, last_sq_idx = 0;
+ struct t4_swsqe first_sqe, last_sqe;
+ struct nlattr *table_attr;
+ struct t4_wq wq;
+
+ /* User qp state is not available, so don't dump user qps */
+ if (qhp->ucontext)
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ /* Get a consistent snapshot */
+ spin_lock_irq(&qhp->lock);
+ wq = qhp->wq;
+
+ /* If there are any pending sqes, copy the first and last */
+ if (wq.sq.cidx != wq.sq.pidx) {
+ first_sq_idx = wq.sq.cidx;
+ first_sqe = qhp->wq.sq.sw_sq[first_sq_idx];
+ fsp = &first_sqe;
+ last_sq_idx = wq.sq.pidx;
+ if (last_sq_idx-- == 0)
+ last_sq_idx = wq.sq.size - 1;
+ if (last_sq_idx != first_sq_idx) {
+ last_sqe = qhp->wq.sq.sw_sq[last_sq_idx];
+ lsp = &last_sqe;
+ }
+ }
+ spin_unlock_irq(&qhp->lock);
+
+ if (fill_sq(msg, &wq))
+ goto err_cancel_table;
+
+ if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp))
+ goto err_cancel_table;
+
+ if (fill_rq(msg, &wq))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+union union_ep {
+ struct c4iw_listen_ep lep;
+ struct c4iw_ep ep;
+};
+
+static int fill_res_ep_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct rdma_cm_id *cm_id = rdma_res_to_id(res);
+ struct nlattr *table_attr;
+ struct c4iw_ep_common *epcp;
+ struct c4iw_listen_ep *listen_ep = NULL;
+ struct c4iw_ep *ep = NULL;
+ struct iw_cm_id *iw_cm_id;
+ union union_ep *uep;
+
+ iw_cm_id = rdma_iw_cm_id(cm_id);
+ if (!iw_cm_id)
+ return 0;
+ epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data;
+ if (!epcp)
+ return 0;
+ uep = kcalloc(1, sizeof(*uep), GFP_KERNEL);
+ if (!uep)
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err_free_uep;
+
+ /* Get a consistent snapshot */
+ mutex_lock(&epcp->mutex);
+ if (epcp->state == LISTEN) {
+ uep->lep = *(struct c4iw_listen_ep *)epcp;
+ mutex_unlock(&epcp->mutex);
+ listen_ep = &uep->lep;
+ epcp = &listen_ep->com;
+ } else {
+ uep->ep = *(struct c4iw_ep *)epcp;
+ mutex_unlock(&epcp->mutex);
+ ep = &uep->ep;
+ epcp = &ep->com;
+ }
+
+ if (rdma_nl_put_driver_u32(msg, "state", epcp->state))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64_hex(msg, "flags", epcp->flags))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
+ goto err_cancel_table;
+
+ if (epcp->state == LISTEN) {
+ if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
+ goto err_cancel_table;
+ } else {
+ if (rdma_nl_put_driver_u32(msg, "hwtid", ep->hwtid))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ord", ep->ord))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ird", ep->ird))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "emss", ep->emss))
+ goto err_cancel_table;
+
+ if (!ep->parent_ep && rdma_nl_put_driver_u32(msg, "atid",
+ ep->atid))
+ goto err_cancel_table;
+ }
+ nla_nest_end(msg, table_attr);
+ kfree(uep);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err_free_uep:
+ kfree(uep);
+ return -EMSGSIZE;
+}
+
+static int fill_cq(struct sk_buff *msg, struct t4_cq *cq)
+{
+ if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "size", cq->size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "vector", cq->vector))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "gen", cq->gen))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "error", cq->error))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
+ be64_to_cpu(cq->bits_type_ts)))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "flags", cq->flags))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_cqe(struct sk_buff *msg, struct t4_cqe *cqe, u16 idx,
+ const char *qstr)
+{
+ if (rdma_nl_put_driver_u32(msg, qstr, idx))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "header",
+ be32_to_cpu(cqe->header)))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "len", be32_to_cpu(cqe->len)))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "wrid_hi",
+ be32_to_cpu(cqe->u.gen.wrid_hi)))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "wrid_low",
+ be32_to_cpu(cqe->u.gen.wrid_low)))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
+ be64_to_cpu(cqe->bits_type_ts)))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq,
+ struct t4_cqe *cqes)
+{
+ u16 idx;
+
+ idx = (cq->cidx > 0) ? cq->cidx - 1 : cq->size - 1;
+ if (fill_cqe(msg, cqes, idx, "hwcq_idx"))
+ goto err;
+ idx = cq->cidx;
+ if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx"))
+ goto err;
+
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq,
+ struct t4_cqe *cqes)
+{
+ u16 idx;
+
+ if (!cq->sw_in_use)
+ return 0;
+
+ idx = cq->sw_cidx;
+ if (fill_cqe(msg, cqes, idx, "swcq_idx"))
+ goto err;
+ if (cq->sw_in_use == 1)
+ goto out;
+ idx = (cq->sw_pidx > 0) ? cq->sw_pidx - 1 : cq->size - 1;
+ if (fill_cqe(msg, cqes + 1, idx, "swcq_idx"))
+ goto err;
+out:
+ return 0;
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_cq_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_cq *ibcq = container_of(res, struct ib_cq, res);
+ struct c4iw_cq *chp = to_c4iw_cq(ibcq);
+ struct nlattr *table_attr;
+ struct t4_cqe hwcqes[2];
+ struct t4_cqe swcqes[2];
+ struct t4_cq cq;
+ u16 idx;
+
+ /* User cq state is not available, so don't dump user cqs */
+ if (ibcq->uobject)
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ /* Get a consistent snapshot */
+ spin_lock_irq(&chp->lock);
+
+ /* t4_cq struct */
+ cq = chp->cq;
+
+ /* get 2 hw cqes: cidx-1, and cidx */
+ idx = (cq.cidx > 0) ? cq.cidx - 1 : cq.size - 1;
+ hwcqes[0] = chp->cq.queue[idx];
+
+ idx = cq.cidx;
+ hwcqes[1] = chp->cq.queue[idx];
+
+ /* get first and last sw cqes */
+ if (cq.sw_in_use) {
+ swcqes[0] = chp->cq.sw_queue[cq.sw_cidx];
+ if (cq.sw_in_use > 1) {
+ idx = (cq.sw_pidx > 0) ? cq.sw_pidx - 1 : cq.size - 1;
+ swcqes[1] = chp->cq.sw_queue[idx];
+ }
+ }
+
+ spin_unlock_irq(&chp->lock);
+
+ if (fill_cq(msg, &cq))
+ goto err_cancel_table;
+
+ if (fill_swcqes(msg, &cq, swcqes))
+ goto err_cancel_table;
+
+ if (fill_hwcqes(msg, &cq, hwcqes))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
+ struct c4iw_dev *dev = mhp->rhp;
+ u32 stag = mhp->attr.stag;
+ struct nlattr *table_attr;
+ struct fw_ri_tpte tpte;
+ int ret;
+
+ if (!stag)
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, (__be32 *)&tpte);
+ if (ret) {
+ dev_err(&dev->rdev.lldi.pdev->dev,
+ "%s cxgb4_read_tpte err %d\n", __func__, ret);
+ return 0;
+ }
+
+ if (rdma_nl_put_driver_u32_hex(msg, "idx", stag >> 8))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "valid",
+ FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "key", stag & 0xff))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "state",
+ FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "pdid",
+ FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "perm",
+ FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32(msg, "ps",
+ FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid))))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u64(msg, "len",
+ ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo)))
+ goto err_cancel_table;
+ if (rdma_nl_put_driver_u32_hex(msg, "pbl_addr",
+ FW_RI_TPTE_PBLADDR_G(ntohl(tpte.nosnoop_pbladdr))))
+ goto err_cancel_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
+ [RDMA_RESTRACK_QP] = fill_res_qp_entry,
+ [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry,
+ [RDMA_RESTRACK_CQ] = fill_res_cq_entry,
+ [RDMA_RESTRACK_MR] = fill_res_mr_entry,
+};
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index ce4010bad9828..f451ba912f472 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -14,7 +14,15 @@ hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
verbs_txreq.o vnic_main.o vnic_sdma.o
-hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
+
+ifdef CONFIG_DEBUG_FS
+hfi1-y += debugfs.o
+ifdef CONFIG_FAULT_INJECTION
+ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+hfi1-y += fault.o
+endif
+endif
+endif
CFLAGS_trace.o = -I$(src)
ifdef MVERSION
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index b5fab55cc2750..fbe7198a715a1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -77,6 +77,58 @@ static inline void init_cpu_mask_set(struct cpu_mask_set *set)
set->gen = 0;
}
+/* Increment generation of CPU set if needed */
+static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
+{
+ if (cpumask_equal(&set->mask, &set->used)) {
+ /*
+ * We've used up all the CPUs, bump up the generation
+ * and reset the 'used' map
+ */
+ set->gen++;
+ cpumask_clear(&set->used);
+ }
+}
+
+static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
+{
+ if (cpumask_empty(&set->used) && set->gen) {
+ set->gen--;
+ cpumask_copy(&set->used, &set->mask);
+ }
+}
+
+/* Get the first CPU from the list of unused CPUs in a CPU set data structure */
+static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
+{
+ int cpu;
+
+ if (!diff || !set)
+ return -EINVAL;
+
+ _cpu_mask_set_gen_inc(set);
+
+ /* Find out CPUs left in CPU mask */
+ cpumask_andnot(diff, &set->mask, &set->used);
+
+ cpu = cpumask_first(diff);
+ if (cpu >= nr_cpu_ids) /* empty */
+ cpu = -EINVAL;
+ else
+ cpumask_set_cpu(cpu, &set->used);
+
+ return cpu;
+}
+
+static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
+{
+ if (!set)
+ return;
+
+ cpumask_clear_cpu(cpu, &set->used);
+ _cpu_mask_set_gen_dec(set);
+}
+
/* Initialize non-HT cpu cores mask */
void init_real_cpu_mask(void)
{
@@ -156,7 +208,13 @@ int node_affinity_init(void)
return 0;
}
-void node_affinity_destroy(void)
+static void node_affinity_destroy(struct hfi1_affinity_node *entry)
+{
+ free_percpu(entry->comp_vect_affinity);
+ kfree(entry);
+}
+
+void node_affinity_destroy_all(void)
{
struct list_head *pos, *q;
struct hfi1_affinity_node *entry;
@@ -166,7 +224,7 @@ void node_affinity_destroy(void)
entry = list_entry(pos, struct hfi1_affinity_node,
list);
list_del(pos);
- kfree(entry);
+ node_affinity_destroy(entry);
}
mutex_unlock(&node_affinity.lock);
kfree(hfi1_per_node_cntr);
@@ -180,6 +238,7 @@ static struct hfi1_affinity_node *node_affinity_allocate(int node)
if (!entry)
return NULL;
entry->node = node;
+ entry->comp_vect_affinity = alloc_percpu(u16);
INIT_LIST_HEAD(&entry->list);
return entry;
@@ -209,6 +268,341 @@ static struct hfi1_affinity_node *node_affinity_lookup(int node)
return NULL;
}
+static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
+ u16 __percpu *comp_vect_affinity)
+{
+ int curr_cpu;
+ u16 cntr;
+ u16 prev_cntr;
+ int ret_cpu;
+
+ if (!possible_cpumask) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ if (!comp_vect_affinity) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ ret_cpu = cpumask_first(possible_cpumask);
+ if (ret_cpu >= nr_cpu_ids) {
+ ret_cpu = -EINVAL;
+ goto fail;
+ }
+
+ prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu);
+ for_each_cpu(curr_cpu, possible_cpumask) {
+ cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
+
+ if (cntr < prev_cntr) {
+ ret_cpu = curr_cpu;
+ prev_cntr = cntr;
+ }
+ }
+
+ *per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1;
+
+fail:
+ return ret_cpu;
+}
+
+static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
+ u16 __percpu *comp_vect_affinity)
+{
+ int curr_cpu;
+ int max_cpu;
+ u16 cntr;
+ u16 prev_cntr;
+
+ if (!possible_cpumask)
+ return -EINVAL;
+
+ if (!comp_vect_affinity)
+ return -EINVAL;
+
+ max_cpu = cpumask_first(possible_cpumask);
+ if (max_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu);
+ for_each_cpu(curr_cpu, possible_cpumask) {
+ cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
+
+ if (cntr > prev_cntr) {
+ max_cpu = curr_cpu;
+ prev_cntr = cntr;
+ }
+ }
+
+ *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1;
+
+ return max_cpu;
+}
+
+/*
+ * Non-interrupt CPUs are used first, then interrupt CPUs.
+ * Two already allocated cpu masks must be passed.
+ */
+static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry,
+ cpumask_var_t non_intr_cpus,
+ cpumask_var_t available_cpus)
+ __must_hold(&node_affinity.lock)
+{
+ int cpu;
+ struct cpu_mask_set *set = dd->comp_vect;
+
+ lockdep_assert_held(&node_affinity.lock);
+ if (!non_intr_cpus) {
+ cpu = -1;
+ goto fail;
+ }
+
+ if (!available_cpus) {
+ cpu = -1;
+ goto fail;
+ }
+
+ /* Available CPUs for pinning completion vectors */
+ _cpu_mask_set_gen_inc(set);
+ cpumask_andnot(available_cpus, &set->mask, &set->used);
+
+ /* Available CPUs without SDMA engine interrupts */
+ cpumask_andnot(non_intr_cpus, available_cpus,
+ &entry->def_intr.used);
+
+ /* If there are non-interrupt CPUs available, use them first */
+ if (!cpumask_empty(non_intr_cpus))
+ cpu = cpumask_first(non_intr_cpus);
+ else /* Otherwise, use interrupt CPUs */
+ cpu = cpumask_first(available_cpus);
+
+ if (cpu >= nr_cpu_ids) { /* empty */
+ cpu = -1;
+ goto fail;
+ }
+ cpumask_set_cpu(cpu, &set->used);
+
+fail:
+ return cpu;
+}
+
+static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
+{
+ struct cpu_mask_set *set = dd->comp_vect;
+
+ if (cpu < 0)
+ return;
+
+ cpu_mask_set_put(set, cpu);
+}
+
+/* _dev_comp_vect_mappings_destroy() is reentrant */
+static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
+{
+ int i, cpu;
+
+ if (!dd->comp_vect_mappings)
+ return;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = dd->comp_vect_mappings[i];
+ _dev_comp_vect_cpu_put(dd, cpu);
+ dd->comp_vect_mappings[i] = -1;
+ hfi1_cdbg(AFFINITY,
+ "[%s] Release CPU %d from completion vector %d",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
+ }
+
+ kfree(dd->comp_vect_mappings);
+ dd->comp_vect_mappings = NULL;
+}
+
+/*
+ * This function creates the table for looking up CPUs for completion vectors.
+ * num_comp_vectors needs to have been initilized before calling this function.
+ */
+static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry)
+ __must_hold(&node_affinity.lock)
+{
+ int i, cpu, ret;
+ cpumask_var_t non_intr_cpus;
+ cpumask_var_t available_cpus;
+
+ lockdep_assert_held(&node_affinity.lock);
+
+ if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) {
+ free_cpumask_var(non_intr_cpus);
+ return -ENOMEM;
+ }
+
+ dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus,
+ sizeof(*dd->comp_vect_mappings),
+ GFP_KERNEL);
+ if (!dd->comp_vect_mappings) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++)
+ dd->comp_vect_mappings[i] = -1;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
+ available_cpus);
+ if (cpu < 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dd->comp_vect_mappings[i] = cpu;
+ hfi1_cdbg(AFFINITY,
+ "[%s] Completion Vector %d -> CPU %d",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
+ }
+
+ return 0;
+
+fail:
+ free_cpumask_var(available_cpus);
+ free_cpumask_var(non_intr_cpus);
+ _dev_comp_vect_mappings_destroy(dd);
+
+ return ret;
+}
+
+int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
+{
+ int ret;
+ struct hfi1_affinity_node *entry;
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ if (!entry) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ ret = _dev_comp_vect_mappings_create(dd, entry);
+unlock:
+ mutex_unlock(&node_affinity.lock);
+
+ return ret;
+}
+
+void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
+{
+ _dev_comp_vect_mappings_destroy(dd);
+}
+
+int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
+{
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+
+ if (!dd->comp_vect_mappings)
+ return -EINVAL;
+ if (comp_vect >= dd->comp_vect_possible_cpus)
+ return -EINVAL;
+
+ return dd->comp_vect_mappings[comp_vect];
+}
+
+/*
+ * It assumes dd->comp_vect_possible_cpus is available.
+ */
+static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry,
+ bool first_dev_init)
+ __must_hold(&node_affinity.lock)
+{
+ int i, j, curr_cpu;
+ int possible_cpus_comp_vect = 0;
+ struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask;
+
+ lockdep_assert_held(&node_affinity.lock);
+ /*
+ * If there's only one CPU available for completion vectors, then
+ * there will only be one completion vector available. Othewise,
+ * the number of completion vector available will be the number of
+ * available CPUs divide it by the number of devices in the
+ * local NUMA node.
+ */
+ if (cpumask_weight(&entry->comp_vect_mask) == 1) {
+ possible_cpus_comp_vect = 1;
+ dd_dev_warn(dd,
+ "Number of kernel receive queues is too large for completion vector affinity to be effective\n");
+ } else {
+ possible_cpus_comp_vect +=
+ cpumask_weight(&entry->comp_vect_mask) /
+ hfi1_per_node_cntr[dd->node];
+
+ /*
+ * If the completion vector CPUs available doesn't divide
+ * evenly among devices, then the first device device to be
+ * initialized gets an extra CPU.
+ */
+ if (first_dev_init &&
+ cpumask_weight(&entry->comp_vect_mask) %
+ hfi1_per_node_cntr[dd->node] != 0)
+ possible_cpus_comp_vect++;
+ }
+
+ dd->comp_vect_possible_cpus = possible_cpus_comp_vect;
+
+ /* Reserving CPUs for device completion vector */
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask,
+ entry->comp_vect_affinity);
+ if (curr_cpu < 0)
+ goto fail;
+
+ cpumask_set_cpu(curr_cpu, dev_comp_vect_mask);
+ }
+
+ hfi1_cdbg(AFFINITY,
+ "[%s] Completion vector affinity CPU set(s) %*pbl",
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi),
+ cpumask_pr_args(dev_comp_vect_mask));
+
+ return 0;
+
+fail:
+ for (j = 0; j < i; j++)
+ per_cpu_affinity_put_max(&entry->comp_vect_mask,
+ entry->comp_vect_affinity);
+
+ return curr_cpu;
+}
+
+/*
+ * It assumes dd->comp_vect_possible_cpus is available.
+ */
+static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
+ struct hfi1_affinity_node *entry)
+ __must_hold(&node_affinity.lock)
+{
+ int i, cpu;
+
+ lockdep_assert_held(&node_affinity.lock);
+ if (!dd->comp_vect_possible_cpus)
+ return;
+
+ for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
+ cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
+ entry->comp_vect_affinity);
+ /* Clearing CPU in device completion vector cpu mask */
+ if (cpu >= 0)
+ cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
+ }
+
+ dd->comp_vect_possible_cpus = 0;
+}
+
/*
* Interrupt affinity.
*
@@ -225,7 +619,8 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
int node = pcibus_to_node(dd->pcidev->bus);
struct hfi1_affinity_node *entry;
const struct cpumask *local_mask;
- int curr_cpu, possible, i;
+ int curr_cpu, possible, i, ret;
+ bool new_entry = false;
if (node < 0)
node = numa_node_id();
@@ -247,11 +642,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (!entry) {
dd_dev_err(dd,
"Unable to allocate global affinity node\n");
- mutex_unlock(&node_affinity.lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
+ new_entry = true;
+
init_cpu_mask_set(&entry->def_intr);
init_cpu_mask_set(&entry->rcv_intr);
+ cpumask_clear(&entry->comp_vect_mask);
cpumask_clear(&entry->general_intr_mask);
/* Use the "real" cpu mask of this node as the default */
cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
@@ -304,10 +702,64 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
&entry->general_intr_mask);
}
- node_affinity_add_tail(entry);
+ /* Determine completion vector CPUs for the entire node */
+ cpumask_and(&entry->comp_vect_mask,
+ &node_affinity.real_cpu_mask, local_mask);
+ cpumask_andnot(&entry->comp_vect_mask,
+ &entry->comp_vect_mask,
+ &entry->rcv_intr.mask);
+ cpumask_andnot(&entry->comp_vect_mask,
+ &entry->comp_vect_mask,
+ &entry->general_intr_mask);
+
+ /*
+ * If there ends up being 0 CPU cores leftover for completion
+ * vectors, use the same CPU core as the general/control
+ * context.
+ */
+ if (cpumask_weight(&entry->comp_vect_mask) == 0)
+ cpumask_copy(&entry->comp_vect_mask,
+ &entry->general_intr_mask);
}
+
+ ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry);
+ if (ret < 0)
+ goto fail;
+
+ if (new_entry)
+ node_affinity_add_tail(entry);
+
mutex_unlock(&node_affinity.lock);
+
return 0;
+
+fail:
+ if (new_entry)
+ node_affinity_destroy(entry);
+ mutex_unlock(&node_affinity.lock);
+ return ret;
+}
+
+void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
+{
+ struct hfi1_affinity_node *entry;
+
+ if (dd->node < 0)
+ return;
+
+ mutex_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ if (!entry)
+ goto unlock;
+
+ /*
+ * Free device completion vector CPUs to be used by future
+ * completion vectors
+ */
+ _dev_comp_vect_cpu_mask_clean_up(dd, entry);
+unlock:
+ mutex_unlock(&node_affinity.lock);
+ dd->node = -1;
}
/*
@@ -456,17 +908,12 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
return -ENOMEM;
- if (cpumask_equal(&set->mask, &set->used)) {
- /*
- * We've used up all the CPUs, bump up the generation
- * and reset the 'used' map
- */
- set->gen++;
- cpumask_clear(&set->used);
+ cpu = cpu_mask_set_get_first(set, diff);
+ if (cpu < 0) {
+ free_cpumask_var(diff);
+ dd_dev_err(dd, "Failure to obtain CPU for IRQ\n");
+ return cpu;
}
- cpumask_andnot(diff, &set->mask, &set->used);
- cpu = cpumask_first(diff);
- cpumask_set_cpu(cpu, &set->used);
free_cpumask_var(diff);
}
@@ -526,10 +973,7 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
if (set) {
cpumask_andnot(&set->used, &set->used, &msix->mask);
- if (cpumask_empty(&set->used) && set->gen) {
- set->gen--;
- cpumask_copy(&set->used, &set->mask);
- }
+ _cpu_mask_set_gen_dec(set);
}
irq_set_affinity_hint(msix->irq, NULL);
@@ -640,10 +1084,7 @@ int hfi1_get_proc_affinity(int node)
* If we've used all available HW threads, clear the mask and start
* overloading.
*/
- if (cpumask_equal(&set->mask, &set->used)) {
- set->gen++;
- cpumask_clear(&set->used);
- }
+ _cpu_mask_set_gen_inc(set);
/*
* If NUMA node has CPUs used by interrupt handlers, include them in the
@@ -767,11 +1208,7 @@ void hfi1_put_proc_affinity(int cpu)
return;
mutex_lock(&affinity->lock);
- cpumask_clear_cpu(cpu, &set->used);
+ cpu_mask_set_put(set, cpu);
hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
- if (cpumask_empty(&set->used) && set->gen) {
- set->gen--;
- cpumask_copy(&set->used, &set->mask);
- }
mutex_unlock(&affinity->lock);
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 2a1e374169c0a..6a7e6ea4e4260 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -98,9 +98,11 @@ void hfi1_put_proc_affinity(int cpu);
struct hfi1_affinity_node {
int node;
+ u16 __percpu *comp_vect_affinity;
struct cpu_mask_set def_intr;
struct cpu_mask_set rcv_intr;
struct cpumask general_intr_mask;
+ struct cpumask comp_vect_mask;
struct list_head list;
};
@@ -116,7 +118,11 @@ struct hfi1_affinity_node_list {
};
int node_affinity_init(void);
-void node_affinity_destroy(void);
+void node_affinity_destroy_all(void);
extern struct hfi1_affinity_node_list node_affinity;
+void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd);
+int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect);
+int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd);
+void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd);
#endif /* _HFI1_AFFINITY_H */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e6bdd0c1e80a9..6deb101cdd438 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -65,6 +65,7 @@
#include "aspm.h"
#include "affinity.h"
#include "debugfs.h"
+#include "fault.h"
#define NUM_IB_PORTS 1
@@ -1032,8 +1033,8 @@ static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
static void read_vc_remote_link_width(struct hfi1_devdata *dd,
u8 *remote_tx_rate, u16 *link_widths);
-static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
- u8 *flag_bits, u16 *link_widths);
+static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
+ u8 *flag_bits, u16 *link_widths);
static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
u8 *device_rev);
static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
@@ -6355,6 +6356,18 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
type);
hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
break;
+ case HREQ_LCB_RESET:
+ /* Put the LCB, RX FPE and TX FPE into reset */
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
+ /* Make sure the write completed */
+ (void)read_csr(dd, DCC_CFG_RESET);
+ /* Hold the reset long enough to take effect */
+ udelay(1);
+ /* Take the LCB, RX FPE and TX FPE out of reset */
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
+ hreq_response(dd, HREQ_SUCCESS, 0);
+
+ break;
case HREQ_CONFIG_DONE:
hreq_response(dd, HREQ_SUCCESS, 0);
break;
@@ -6465,8 +6478,7 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
reg = read_csr(dd, DCC_CFG_RESET);
write_csr(dd, DCC_CFG_RESET, reg |
- (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
- (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
+ DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
if (!abort) {
udelay(1); /* must hold for the longer of 16cclks or 20ns */
@@ -6531,7 +6543,7 @@ static void _dc_start(struct hfi1_devdata *dd)
__func__);
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
- write_csr(dd, DCC_CFG_RESET, 0x10);
+ write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
/* lcb_shutdown() with abort=1 does not restore these */
write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
dd->dc_shutdown = 0;
@@ -6829,7 +6841,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
}
rcvmask = HFI1_RCVCTRL_CTXT_ENB;
/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
- rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
+ rcvmask |= rcd->rcvhdrtail_kvaddr ?
HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
hfi1_rcvctrl(dd, rcvmask, rcd);
hfi1_rcd_put(rcd);
@@ -7352,7 +7364,7 @@ static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
u8 misc_bits, local_flags;
u16 active_tx, active_rx;
- read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
+ read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
tx = widths >> 12;
rx = (widths >> 8) & 0xf;
@@ -8355,7 +8367,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
u32 tail;
int present;
- if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
+ if (!rcd->rcvhdrtail_kvaddr)
present = (rcd->seq_cnt ==
rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
else /* is RDMA rtail */
@@ -8824,29 +8836,29 @@ static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
GENERAL_CONFIG, frame);
}
-static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
- u8 *flag_bits, u16 *link_widths)
+static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
+ u8 *flag_bits, u16 *link_widths)
{
u32 frame;
- read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
+ read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
&frame);
*misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
*flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
}
-static int write_vc_local_link_width(struct hfi1_devdata *dd,
- u8 misc_bits,
- u8 flag_bits,
- u16 link_widths)
+static int write_vc_local_link_mode(struct hfi1_devdata *dd,
+ u8 misc_bits,
+ u8 flag_bits,
+ u16 link_widths)
{
u32 frame;
frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
| (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
| (u32)link_widths << LINK_WIDTH_SHIFT;
- return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
+ return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
frame);
}
@@ -9316,8 +9328,16 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (loopback == LOOPBACK_SERDES)
misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
- ret = write_vc_local_link_width(dd, misc_bits, 0,
- opa_to_vc_link_widths(
+ /*
+ * An external device configuration request is used to reset the LCB
+ * to retry to obtain operational lanes when the first attempt is
+ * unsuccesful.
+ */
+ if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
+ misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
+
+ ret = write_vc_local_link_mode(dd, misc_bits, 0,
+ opa_to_vc_link_widths(
ppd->link_width_enabled));
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
@@ -10495,9 +10515,9 @@ u32 driver_pstate(struct hfi1_pportdata *ppd)
case HLS_DN_OFFLINE:
return OPA_PORTPHYSSTATE_OFFLINE;
case HLS_VERIFY_CAP:
- return IB_PORTPHYSSTATE_POLLING;
+ return IB_PORTPHYSSTATE_TRAINING;
case HLS_GOING_UP:
- return IB_PORTPHYSSTATE_POLLING;
+ return IB_PORTPHYSSTATE_TRAINING;
case HLS_GOING_OFFLINE:
return OPA_PORTPHYSSTATE_OFFLINE;
case HLS_LINK_COOLDOWN:
@@ -11823,7 +11843,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
/* reset the tail and hdr addresses, and sequence count */
write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
rcd->rcvhdrq_dma);
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
+ if (rcd->rcvhdrtail_kvaddr)
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
rcd->rcvhdrqtailaddr_dma);
rcd->seq_cnt = 1;
@@ -11903,7 +11923,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
+ if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
/* See comment on RcvCtxtCtrl.TailUpd above */
@@ -14620,7 +14640,9 @@ static void init_rxe(struct hfi1_devdata *dd)
/* Have 16 bytes (4DW) of bypass header available in header queue */
val = read_csr(dd, RCV_BYPASS);
- val |= (4ull << 16);
+ val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
+ val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
+ RCV_BYPASS_HDR_SIZE_SHIFT);
write_csr(dd, RCV_BYPASS, val);
}
@@ -15022,13 +15044,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret < 0)
goto bail_cleanup;
- /* verify that reads actually work, save revision for reset check */
- dd->revision = read_csr(dd, CCE_REVISION);
- if (dd->revision == ~(u64)0) {
- dd_dev_err(dd, "cannot read chip CSRs\n");
- ret = -EINVAL;
- goto bail_cleanup;
- }
dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
& CCE_REVISION_CHIP_REV_MAJOR_MASK;
dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
@@ -15224,6 +15239,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_cleanup;
+ ret = hfi1_comp_vectors_set_up(dd);
+ if (ret)
+ goto bail_clear_intr;
+
/* set up LCB access - must be after set_up_interrupts() */
init_lcb_access(dd);
@@ -15266,6 +15285,7 @@ bail_free_rcverr:
bail_free_cntrs:
free_cntrs(dd);
bail_clear_intr:
+ hfi1_comp_vectors_clean_up(dd);
hfi1_clean_up_interrupts(dd);
bail_cleanup:
hfi1_pcie_ddcleanup(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index c0d70f2550502..fdf389e46e19c 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -196,6 +196,15 @@
#define LSTATE_ARMED 0x3
#define LSTATE_ACTIVE 0x4
+/* DCC_CFG_RESET reset states */
+#define LCB_RX_FPE_TX_FPE_INTO_RESET (DCC_CFG_RESET_RESET_LCB | \
+ DCC_CFG_RESET_RESET_TX_FPE | \
+ DCC_CFG_RESET_RESET_RX_FPE | \
+ DCC_CFG_RESET_ENABLE_CCLK_BCC)
+ /* 0x17 */
+
+#define LCB_RX_FPE_TX_FPE_OUT_OF_RESET DCC_CFG_RESET_ENABLE_CCLK_BCC /* 0x10 */
+
/* DC8051_STS_CUR_STATE port values (physical link states) */
#define PLS_DISABLED 0x30
#define PLS_OFFLINE 0x90
@@ -283,6 +292,7 @@
#define HREQ_SET_TX_EQ_ABS 0x04
#define HREQ_SET_TX_EQ_REL 0x05
#define HREQ_ENABLE 0x06
+#define HREQ_LCB_RESET 0x07
#define HREQ_CONFIG_DONE 0xfe
#define HREQ_INTERFACE_TEST 0xff
@@ -383,7 +393,7 @@
#define TX_SETTINGS 0x06
#define VERIFY_CAP_LOCAL_PHY 0x07
#define VERIFY_CAP_LOCAL_FABRIC 0x08
-#define VERIFY_CAP_LOCAL_LINK_WIDTH 0x09
+#define VERIFY_CAP_LOCAL_LINK_MODE 0x09
#define LOCAL_DEVICE_ID 0x0a
#define RESERVED_REGISTERS 0x0b
#define LOCAL_LNI_INFO 0x0c
@@ -584,8 +594,9 @@ enum {
#define LOOPBACK_LCB 2
#define LOOPBACK_CABLE 3 /* external cable */
-/* set up serdes bit in MISC_CONFIG_BITS */
+/* set up bits in MISC_CONFIG_BITS */
#define LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT 0
+#define EXT_CFG_LCB_RESET_SUPPORTED_SHIFT 3
/* read and write hardware registers */
u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 793514f1d15fb..ee6dca5e2a2ff 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -97,8 +97,11 @@
#define DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT 32
#define DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK 0x700000000ull
#define DCC_CFG_RESET (DCC_CSRS + 0x000000000000)
-#define DCC_CFG_RESET_RESET_LCB_SHIFT 0
-#define DCC_CFG_RESET_RESET_RX_FPE_SHIFT 2
+#define DCC_CFG_RESET_RESET_LCB BIT_ULL(0)
+#define DCC_CFG_RESET_RESET_TX_FPE BIT_ULL(1)
+#define DCC_CFG_RESET_RESET_RX_FPE BIT_ULL(2)
+#define DCC_CFG_RESET_RESET_8051 BIT_ULL(3)
+#define DCC_CFG_RESET_ENABLE_CCLK_BCC BIT_ULL(4)
#define DCC_CFG_SC_VL_TABLE_15_0 (DCC_CSRS + 0x000000000028)
#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY0_SHIFT 0
#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY10_SHIFT 40
@@ -635,6 +638,12 @@
#define RCV_BTH_QP_KDETH_QP_MASK 0xFFull
#define RCV_BTH_QP_KDETH_QP_SHIFT 16
#define RCV_BYPASS (RXE + 0x000000000038)
+#define RCV_BYPASS_HDR_SIZE_SHIFT 16
+#define RCV_BYPASS_HDR_SIZE_MASK 0x1Full
+#define RCV_BYPASS_HDR_SIZE_SMASK 0x1F0000ull
+#define RCV_BYPASS_BYPASS_CONTEXT_SHIFT 0
+#define RCV_BYPASS_BYPASS_CONTEXT_MASK 0xFFull
+#define RCV_BYPASS_BYPASS_CONTEXT_SMASK 0xFFull
#define RCV_CONTEXTS (RXE + 0x000000000010)
#define RCV_COUNTER_ARRAY32 (RXE + 0x000000000400)
#define RCV_COUNTER_ARRAY64 (RXE + 0x000000000500)
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 852173bf05d0b..9f992ae36c891 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -60,15 +60,13 @@
#include "device.h"
#include "qp.h"
#include "sdma.h"
+#include "fault.h"
static struct dentry *hfi1_dbg_root;
/* wrappers to enforce srcu in seq file */
-static ssize_t hfi1_seq_read(
- struct file *file,
- char __user *buf,
- size_t size,
- loff_t *ppos)
+ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos)
{
struct dentry *d = file->f_path.dentry;
ssize_t r;
@@ -81,10 +79,7 @@ static ssize_t hfi1_seq_read(
return r;
}
-static loff_t hfi1_seq_lseek(
- struct file *file,
- loff_t offset,
- int whence)
+loff_t hfi1_seq_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry *d = file->f_path.dentry;
loff_t r;
@@ -100,48 +95,6 @@ static loff_t hfi1_seq_lseek(
#define private2dd(file) (file_inode(file)->i_private)
#define private2ppd(file) (file_inode(file)->i_private)
-#define DEBUGFS_SEQ_FILE_OPS(name) \
-static const struct seq_operations _##name##_seq_ops = { \
- .start = _##name##_seq_start, \
- .next = _##name##_seq_next, \
- .stop = _##name##_seq_stop, \
- .show = _##name##_seq_show \
-}
-
-#define DEBUGFS_SEQ_FILE_OPEN(name) \
-static int _##name##_open(struct inode *inode, struct file *s) \
-{ \
- struct seq_file *seq; \
- int ret; \
- ret = seq_open(s, &_##name##_seq_ops); \
- if (ret) \
- return ret; \
- seq = s->private_data; \
- seq->private = inode->i_private; \
- return 0; \
-}
-
-#define DEBUGFS_FILE_OPS(name) \
-static const struct file_operations _##name##_file_ops = { \
- .owner = THIS_MODULE, \
- .open = _##name##_open, \
- .read = hfi1_seq_read, \
- .llseek = hfi1_seq_lseek, \
- .release = seq_release \
-}
-
-#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
-do { \
- struct dentry *ent; \
- ent = debugfs_create_file(name, mode, parent, \
- data, ops); \
- if (!ent) \
- pr_warn("create of %s failed\n", name); \
-} while (0)
-
-#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
- DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
-
static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
{
struct hfi1_opcode_stats_perctx *opstats;
@@ -1160,232 +1113,6 @@ DEBUGFS_SEQ_FILE_OPS(sdma_cpu_list);
DEBUGFS_SEQ_FILE_OPEN(sdma_cpu_list)
DEBUGFS_FILE_OPS(sdma_cpu_list);
-#ifdef CONFIG_FAULT_INJECTION
-static void *_fault_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct hfi1_opcode_stats_perctx *opstats;
-
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void *_fault_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct hfi1_opcode_stats_perctx *opstats;
-
- ++*pos;
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void _fault_stats_seq_stop(struct seq_file *s, void *v)
-{
-}
-
-static int _fault_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos = v;
- loff_t i = *spos, j;
- u64 n_packets = 0, n_bytes = 0;
- struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
- struct hfi1_devdata *dd = dd_from_dev(ibd);
- struct hfi1_ctxtdata *rcd;
-
- for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
- rcd = hfi1_rcd_get_by_index(dd, j);
- if (rcd) {
- n_packets += rcd->opstats->stats[i].n_packets;
- n_bytes += rcd->opstats->stats[i].n_bytes;
- }
- hfi1_rcd_put(rcd);
- }
- for_each_possible_cpu(j) {
- struct hfi1_opcode_stats_perctx *sp =
- per_cpu_ptr(dd->tx_opstats, j);
-
- n_packets += sp->stats[i].n_packets;
- n_bytes += sp->stats[i].n_bytes;
- }
- if (!n_packets && !n_bytes)
- return SEQ_SKIP;
- if (!ibd->fault_opcode->n_rxfaults[i] &&
- !ibd->fault_opcode->n_txfaults[i])
- return SEQ_SKIP;
- seq_printf(s, "%02llx %llu/%llu (faults rx:%llu faults: tx:%llu)\n", i,
- (unsigned long long)n_packets,
- (unsigned long long)n_bytes,
- (unsigned long long)ibd->fault_opcode->n_rxfaults[i],
- (unsigned long long)ibd->fault_opcode->n_txfaults[i]);
- return 0;
-}
-
-DEBUGFS_SEQ_FILE_OPS(fault_stats);
-DEBUGFS_SEQ_FILE_OPEN(fault_stats);
-DEBUGFS_FILE_OPS(fault_stats);
-
-static void fault_exit_opcode_debugfs(struct hfi1_ibdev *ibd)
-{
- debugfs_remove_recursive(ibd->fault_opcode->dir);
- kfree(ibd->fault_opcode);
- ibd->fault_opcode = NULL;
-}
-
-static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
-{
- struct dentry *parent = ibd->hfi1_ibdev_dbg;
-
- ibd->fault_opcode = kzalloc(sizeof(*ibd->fault_opcode), GFP_KERNEL);
- if (!ibd->fault_opcode)
- return -ENOMEM;
-
- ibd->fault_opcode->attr.interval = 1;
- ibd->fault_opcode->attr.require_end = ULONG_MAX;
- ibd->fault_opcode->attr.stacktrace_depth = 32;
- ibd->fault_opcode->attr.dname = NULL;
- ibd->fault_opcode->attr.verbose = 0;
- ibd->fault_opcode->fault_by_opcode = false;
- ibd->fault_opcode->opcode = 0;
- ibd->fault_opcode->mask = 0xff;
-
- ibd->fault_opcode->dir =
- fault_create_debugfs_attr("fault_opcode",
- parent,
- &ibd->fault_opcode->attr);
- if (IS_ERR(ibd->fault_opcode->dir)) {
- kfree(ibd->fault_opcode);
- return -ENOENT;
- }
-
- DEBUGFS_SEQ_FILE_CREATE(fault_stats, ibd->fault_opcode->dir, ibd);
- if (!debugfs_create_bool("fault_by_opcode", 0600,
- ibd->fault_opcode->dir,
- &ibd->fault_opcode->fault_by_opcode))
- goto fail;
- if (!debugfs_create_x8("opcode", 0600, ibd->fault_opcode->dir,
- &ibd->fault_opcode->opcode))
- goto fail;
- if (!debugfs_create_x8("mask", 0600, ibd->fault_opcode->dir,
- &ibd->fault_opcode->mask))
- goto fail;
-
- return 0;
-fail:
- fault_exit_opcode_debugfs(ibd);
- return -ENOMEM;
-}
-
-static void fault_exit_packet_debugfs(struct hfi1_ibdev *ibd)
-{
- debugfs_remove_recursive(ibd->fault_packet->dir);
- kfree(ibd->fault_packet);
- ibd->fault_packet = NULL;
-}
-
-static int fault_init_packet_debugfs(struct hfi1_ibdev *ibd)
-{
- struct dentry *parent = ibd->hfi1_ibdev_dbg;
-
- ibd->fault_packet = kzalloc(sizeof(*ibd->fault_packet), GFP_KERNEL);
- if (!ibd->fault_packet)
- return -ENOMEM;
-
- ibd->fault_packet->attr.interval = 1;
- ibd->fault_packet->attr.require_end = ULONG_MAX;
- ibd->fault_packet->attr.stacktrace_depth = 32;
- ibd->fault_packet->attr.dname = NULL;
- ibd->fault_packet->attr.verbose = 0;
- ibd->fault_packet->fault_by_packet = false;
-
- ibd->fault_packet->dir =
- fault_create_debugfs_attr("fault_packet",
- parent,
- &ibd->fault_opcode->attr);
- if (IS_ERR(ibd->fault_packet->dir)) {
- kfree(ibd->fault_packet);
- return -ENOENT;
- }
-
- if (!debugfs_create_bool("fault_by_packet", 0600,
- ibd->fault_packet->dir,
- &ibd->fault_packet->fault_by_packet))
- goto fail;
- if (!debugfs_create_u64("fault_stats", 0400,
- ibd->fault_packet->dir,
- &ibd->fault_packet->n_faults))
- goto fail;
-
- return 0;
-fail:
- fault_exit_packet_debugfs(ibd);
- return -ENOMEM;
-}
-
-static void fault_exit_debugfs(struct hfi1_ibdev *ibd)
-{
- fault_exit_opcode_debugfs(ibd);
- fault_exit_packet_debugfs(ibd);
-}
-
-static int fault_init_debugfs(struct hfi1_ibdev *ibd)
-{
- int ret = 0;
-
- ret = fault_init_opcode_debugfs(ibd);
- if (ret)
- return ret;
-
- ret = fault_init_packet_debugfs(ibd);
- if (ret)
- fault_exit_opcode_debugfs(ibd);
-
- return ret;
-}
-
-bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
-{
- return ibd->fault_suppress_err;
-}
-
-bool hfi1_dbg_fault_opcode(struct rvt_qp *qp, u32 opcode, bool rx)
-{
- bool ret = false;
- struct hfi1_ibdev *ibd = to_idev(qp->ibqp.device);
-
- if (!ibd->fault_opcode || !ibd->fault_opcode->fault_by_opcode)
- return false;
- if (ibd->fault_opcode->opcode != (opcode & ibd->fault_opcode->mask))
- return false;
- ret = should_fail(&ibd->fault_opcode->attr, 1);
- if (ret) {
- trace_hfi1_fault_opcode(qp, opcode);
- if (rx)
- ibd->fault_opcode->n_rxfaults[opcode]++;
- else
- ibd->fault_opcode->n_txfaults[opcode]++;
- }
- return ret;
-}
-
-bool hfi1_dbg_fault_packet(struct hfi1_packet *packet)
-{
- struct rvt_dev_info *rdi = &packet->rcd->ppd->dd->verbs_dev.rdi;
- struct hfi1_ibdev *ibd = dev_from_rdi(rdi);
- bool ret = false;
-
- if (!ibd->fault_packet || !ibd->fault_packet->fault_by_packet)
- return false;
-
- ret = should_fail(&ibd->fault_packet->attr, 1);
- if (ret) {
- ++ibd->fault_packet->n_faults;
- trace_hfi1_fault_packet(packet);
- }
- return ret;
-}
-#endif
-
void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
{
char name[sizeof("port0counters") + 1];
@@ -1438,21 +1165,14 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
S_IRUGO : S_IRUGO | S_IWUSR);
}
-#ifdef CONFIG_FAULT_INJECTION
- debugfs_create_bool("fault_suppress_err", 0600,
- ibd->hfi1_ibdev_dbg,
- &ibd->fault_suppress_err);
- fault_init_debugfs(ibd);
-#endif
+ hfi1_fault_init_debugfs(ibd);
}
void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
{
if (!hfi1_dbg_root)
goto out;
-#ifdef CONFIG_FAULT_INJECTION
- fault_exit_debugfs(ibd);
-#endif
+ hfi1_fault_exit_debugfs(ibd);
debugfs_remove(ibd->hfi1_ibdev_link);
debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
out:
diff --git a/drivers/infiniband/hw/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index 38c38a98156d6..d5d824459fcc6 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_DEBUGFS_H
#define _HFI1_DEBUGFS_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015, 2016, 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -48,51 +48,59 @@
*/
struct hfi1_ibdev;
-#ifdef CONFIG_DEBUG_FS
-void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd);
-void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd);
-void hfi1_dbg_init(void);
-void hfi1_dbg_exit(void);
-
-#ifdef CONFIG_FAULT_INJECTION
-#include <linux/fault-inject.h>
-struct fault_opcode {
- struct fault_attr attr;
- struct dentry *dir;
- bool fault_by_opcode;
- u64 n_rxfaults[256];
- u64 n_txfaults[256];
- u8 opcode;
- u8 mask;
-};
-struct fault_packet {
- struct fault_attr attr;
- struct dentry *dir;
- bool fault_by_packet;
- u64 n_faults;
-};
+#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
+do { \
+ struct dentry *ent; \
+ const char *__name = name; \
+ ent = debugfs_create_file(__name, mode, parent, \
+ data, ops); \
+ if (!ent) \
+ pr_warn("create of %s failed\n", __name); \
+} while (0)
-bool hfi1_dbg_fault_opcode(struct rvt_qp *qp, u32 opcode, bool rx);
-bool hfi1_dbg_fault_packet(struct hfi1_packet *packet);
-bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd);
-#else
-static inline bool hfi1_dbg_fault_packet(struct hfi1_packet *packet)
-{
- return false;
+#define DEBUGFS_SEQ_FILE_OPS(name) \
+static const struct seq_operations _##name##_seq_ops = { \
+ .start = _##name##_seq_start, \
+ .next = _##name##_seq_next, \
+ .stop = _##name##_seq_stop, \
+ .show = _##name##_seq_show \
}
-static inline bool hfi1_dbg_fault_opcode(struct rvt_qp *qp,
- u32 opcode, bool rx)
-{
- return false;
+#define DEBUGFS_SEQ_FILE_OPEN(name) \
+static int _##name##_open(struct inode *inode, struct file *s) \
+{ \
+ struct seq_file *seq; \
+ int ret; \
+ ret = seq_open(s, &_##name##_seq_ops); \
+ if (ret) \
+ return ret; \
+ seq = s->private_data; \
+ seq->private = inode->i_private; \
+ return 0; \
}
-static inline bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
-{
- return false;
+#define DEBUGFS_FILE_OPS(name) \
+static const struct file_operations _##name##_file_ops = { \
+ .owner = THIS_MODULE, \
+ .open = _##name##_open, \
+ .read = hfi1_seq_read, \
+ .llseek = hfi1_seq_lseek, \
+ .release = seq_release \
}
-#endif
+
+#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
+ DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, 0444)
+
+ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos);
+loff_t hfi1_seq_lseek(struct file *file, loff_t offset, int whence);
+
+#ifdef CONFIG_DEBUG_FS
+void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd);
+void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd);
+void hfi1_dbg_init(void);
+void hfi1_dbg_exit(void);
#else
static inline void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
@@ -110,22 +118,6 @@ static inline void hfi1_dbg_init(void)
static inline void hfi1_dbg_exit(void)
{
}
-
-static inline bool hfi1_dbg_fault_packet(struct hfi1_packet *packet)
-{
- return false;
-}
-
-static inline bool hfi1_dbg_fault_opcode(struct rvt_qp *qp,
- u32 opcode, bool rx)
-{
- return false;
-}
-
-static inline bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
-{
- return false;
-}
#endif
#endif /* _HFI1_DEBUGFS_H */
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index bd837a048bf49..94dca95db04f4 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -61,6 +61,7 @@
#include "sdma.h"
#include "debugfs.h"
#include "vnic.h"
+#include "fault.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -1482,38 +1483,51 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = &ppd->ibport_data;
u8 l4;
- u8 grh_len;
packet->hdr = (struct hfi1_16b_header *)
hfi1_get_16B_header(packet->rcd->dd,
packet->rhf_addr);
- packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
-
l4 = hfi1_16B_get_l4(packet->hdr);
if (l4 == OPA_16B_L4_IB_LOCAL) {
- grh_len = 0;
packet->ohdr = packet->ebuf;
packet->grh = NULL;
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
+ /* hdr_len_by_opcode already has an IB LRH factored in */
+ packet->hlen = hdr_len_by_opcode[packet->opcode] +
+ (LRH_16B_BYTES - LRH_9B_BYTES);
+ packet->migrated = opa_bth_is_migration(packet->ohdr);
} else if (l4 == OPA_16B_L4_IB_GLOBAL) {
u32 vtf;
+ u8 grh_len = sizeof(struct ib_grh);
- grh_len = sizeof(struct ib_grh);
packet->ohdr = packet->ebuf + grh_len;
packet->grh = packet->ebuf;
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
+ /* hdr_len_by_opcode already has an IB LRH factored in */
+ packet->hlen = hdr_len_by_opcode[packet->opcode] +
+ (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
+ packet->migrated = opa_bth_is_migration(packet->ohdr);
+
if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
goto drop;
vtf = be32_to_cpu(packet->grh->version_tclass_flow);
if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
goto drop;
+ } else if (l4 == OPA_16B_L4_FM) {
+ packet->mgmt = packet->ebuf;
+ packet->ohdr = NULL;
+ packet->grh = NULL;
+ packet->opcode = IB_OPCODE_UD_SEND_ONLY;
+ packet->pad = OPA_16B_L4_FM_PAD;
+ packet->hlen = OPA_16B_L4_FM_HLEN;
+ packet->migrated = false;
} else {
goto drop;
}
/* Query commonly used fields from packet header */
- packet->opcode = ib_bth_get_opcode(packet->ohdr);
- /* hdr_len_by_opcode already has an IB LRH factored in */
- packet->hlen = hdr_len_by_opcode[packet->opcode] +
- (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len;
packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES;
packet->slid = hfi1_16B_get_slid(packet->hdr);
packet->dlid = hfi1_16B_get_dlid(packet->hdr);
@@ -1523,10 +1537,8 @@ static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
16B);
packet->sc = hfi1_16B_get_sc(packet->hdr);
packet->sl = ibp->sc_to_sl[packet->sc];
- packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
packet->extra_byte = SIZE_OF_LT;
packet->pkey = hfi1_16B_get_pkey(packet->hdr);
- packet->migrated = opa_bth_is_migration(packet->ohdr);
if (hfi1_bypass_ingress_pkt_check(packet))
goto drop;
@@ -1565,10 +1577,10 @@ void handle_eflags(struct hfi1_packet *packet)
*/
int process_receive_ib(struct hfi1_packet *packet)
{
- if (unlikely(hfi1_dbg_fault_packet(packet)))
+ if (hfi1_setup_9B_packet(packet))
return RHF_RCV_CONTINUE;
- if (hfi1_setup_9B_packet(packet))
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
return RHF_RCV_CONTINUE;
trace_hfi1_rcvhdr(packet);
@@ -1642,7 +1654,8 @@ int process_receive_error(struct hfi1_packet *packet)
/* KHdrHCRCErr -- KDETH packet with a bad HCRC */
if (unlikely(
hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
- rhf_rcv_type_err(packet->rhf) == 3))
+ (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR ||
+ packet->rhf & RHF_DC_ERR)))
return RHF_RCV_CONTINUE;
hfi1_setup_ib_header(packet);
@@ -1657,10 +1670,10 @@ int process_receive_error(struct hfi1_packet *packet)
int kdeth_process_expected(struct hfi1_packet *packet)
{
- if (unlikely(hfi1_dbg_fault_packet(packet)))
+ hfi1_setup_9B_packet(packet);
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
return RHF_RCV_CONTINUE;
- hfi1_setup_ib_header(packet);
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
@@ -1671,11 +1684,11 @@ int kdeth_process_expected(struct hfi1_packet *packet)
int kdeth_process_eager(struct hfi1_packet *packet)
{
- hfi1_setup_ib_header(packet);
+ hfi1_setup_9B_packet(packet);
+ if (unlikely(hfi1_dbg_should_fault_rx(packet)))
+ return RHF_RCV_CONTINUE;
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
- if (unlikely(hfi1_dbg_fault_packet(packet)))
- return RHF_RCV_CONTINUE;
dd_dev_err(packet->rcd->dd,
"Unhandled eager packet received. Dropping.\n");
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index 0af91675acc6d..1be49a0d9c11a 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -52,13 +52,24 @@
* exp_tid_group_init - initialize exp_tid_set
* @set - the set
*/
-void hfi1_exp_tid_group_init(struct exp_tid_set *set)
+static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
{
INIT_LIST_HEAD(&set->list);
set->count = 0;
}
/**
+ * hfi1_exp_tid_group_init - initialize rcd expected receive
+ * @rcd - the rcd
+ */
+void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
+{
+ hfi1_exp_tid_set_init(&rcd->tid_group_list);
+ hfi1_exp_tid_set_init(&rcd->tid_used_list);
+ hfi1_exp_tid_set_init(&rcd->tid_full_list);
+}
+
+/**
* alloc_ctxt_rcv_groups - initialize expected receive groups
* @rcd - the context to add the groupings to
*/
@@ -68,13 +79,17 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
u32 tidbase;
struct tid_group *grp;
int i;
+ u32 ngroups;
+ ngroups = rcd->expected_count / dd->rcv_entries.group_size;
+ rcd->groups =
+ kcalloc_node(ngroups, sizeof(*rcd->groups),
+ GFP_KERNEL, rcd->numa_id);
+ if (!rcd->groups)
+ return -ENOMEM;
tidbase = rcd->expected_base;
- for (i = 0; i < rcd->expected_count /
- dd->rcv_entries.group_size; i++) {
- grp = kzalloc(sizeof(*grp), GFP_KERNEL);
- if (!grp)
- goto bail;
+ for (i = 0; i < ngroups; i++) {
+ grp = &rcd->groups[i];
grp->size = dd->rcv_entries.group_size;
grp->base = tidbase;
tid_group_add_tail(grp, &rcd->tid_group_list);
@@ -82,9 +97,6 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
}
return 0;
-bail:
- hfi1_free_ctxt_rcv_groups(rcd);
- return -ENOMEM;
}
/**
@@ -100,15 +112,12 @@ bail:
*/
void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
{
- struct tid_group *grp, *gptr;
-
WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_full_list));
WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_used_list));
- list_for_each_entry_safe(grp, gptr, &rcd->tid_group_list.list, list) {
- tid_group_remove(grp, &rcd->tid_group_list);
- kfree(grp);
- }
+ kfree(rcd->groups);
+ rcd->groups = NULL;
+ hfi1_exp_tid_group_init(rcd);
hfi1_clear_tids(rcd);
}
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.h b/drivers/infiniband/hw/hfi1/exp_rcv.h
index 08719047628a0..f25362015095b 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.h
@@ -183,8 +183,30 @@ static inline u32 rcventry2tidinfo(u32 rcventry)
EXP_TID_SET(CTRL, 1 << (rcventry - pair));
}
+/**
+ * hfi1_tid_group_to_idx - convert an index to a group
+ * @rcd - the receive context
+ * @grp - the group pointer
+ */
+static inline u16
+hfi1_tid_group_to_idx(struct hfi1_ctxtdata *rcd, struct tid_group *grp)
+{
+ return grp - &rcd->groups[0];
+}
+
+/**
+ * hfi1_idx_to_tid_group - convert a group to an index
+ * @rcd - the receive context
+ * @idx - the index
+ */
+static inline struct tid_group *
+hfi1_idx_to_tid_group(struct hfi1_ctxtdata *rcd, u16 idx)
+{
+ return &rcd->groups[idx];
+}
+
int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
-void hfi1_exp_tid_group_init(struct exp_tid_set *set);
+void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd);
#endif /* _HFI1_EXP_RCV_H */
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
new file mode 100644
index 0000000000000..e2290f32c8d90
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#include "debugfs.h"
+#include "fault.h"
+#include "trace.h"
+
+#define HFI1_FAULT_DIR_TX BIT(0)
+#define HFI1_FAULT_DIR_RX BIT(1)
+#define HFI1_FAULT_DIR_TXRX (HFI1_FAULT_DIR_TX | HFI1_FAULT_DIR_RX)
+
+static void *_fault_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void *_fault_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_opcode_stats_perctx *opstats;
+
+ ++*pos;
+ if (*pos >= ARRAY_SIZE(opstats->stats))
+ return NULL;
+ return pos;
+}
+
+static void _fault_stats_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _fault_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ loff_t i = *spos, j;
+ u64 n_packets = 0, n_bytes = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
+
+ for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
+ rcd = hfi1_rcd_get_by_index(dd, j);
+ if (rcd) {
+ n_packets += rcd->opstats->stats[i].n_packets;
+ n_bytes += rcd->opstats->stats[i].n_bytes;
+ }
+ hfi1_rcd_put(rcd);
+ }
+ for_each_possible_cpu(j) {
+ struct hfi1_opcode_stats_perctx *sp =
+ per_cpu_ptr(dd->tx_opstats, j);
+
+ n_packets += sp->stats[i].n_packets;
+ n_bytes += sp->stats[i].n_bytes;
+ }
+ if (!n_packets && !n_bytes)
+ return SEQ_SKIP;
+ if (!ibd->fault->n_rxfaults[i] && !ibd->fault->n_txfaults[i])
+ return SEQ_SKIP;
+ seq_printf(s, "%02llx %llu/%llu (faults rx:%llu faults: tx:%llu)\n", i,
+ (unsigned long long)n_packets,
+ (unsigned long long)n_bytes,
+ (unsigned long long)ibd->fault->n_rxfaults[i],
+ (unsigned long long)ibd->fault->n_txfaults[i]);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(fault_stats);
+DEBUGFS_SEQ_FILE_OPEN(fault_stats);
+DEBUGFS_FILE_OPS(fault_stats);
+
+static int fault_opcodes_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ ssize_t ret = 0;
+ /* 1280 = 256 opcodes * 4 chars/opcode + 255 commas + NULL */
+ size_t copy, datalen = 1280;
+ char *data, *token, *ptr, *end;
+ struct fault *fault = file->private_data;
+
+ data = kcalloc(datalen, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ copy = min(len, datalen - 1);
+ if (copy_from_user(data, buf, copy))
+ return -EFAULT;
+
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+ return ret;
+ ptr = data;
+ token = ptr;
+ for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
+ char *dash;
+ unsigned long range_start, range_end, i;
+ bool remove = false;
+
+ end = strchr(ptr, ',');
+ if (end)
+ *end = '\0';
+ if (token[0] == '-') {
+ remove = true;
+ token++;
+ }
+ dash = strchr(token, '-');
+ if (dash)
+ *dash = '\0';
+ if (kstrtoul(token, 0, &range_start))
+ break;
+ if (dash) {
+ token = dash + 1;
+ if (kstrtoul(token, 0, &range_end))
+ break;
+ } else {
+ range_end = range_start;
+ }
+ if (range_start == range_end && range_start == -1UL) {
+ bitmap_zero(fault->opcodes, sizeof(fault->opcodes) *
+ BITS_PER_BYTE);
+ break;
+ }
+ for (i = range_start; i <= range_end; i++) {
+ if (remove)
+ clear_bit(i, fault->opcodes);
+ else
+ set_bit(i, fault->opcodes);
+ }
+ if (!end)
+ break;
+ }
+ ret = len;
+
+ debugfs_file_put(file->f_path.dentry);
+ kfree(data);
+ return ret;
+}
+
+static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ ssize_t ret = 0;
+ char *data;
+ size_t datalen = 1280, size = 0; /* see fault_opcodes_write() */
+ unsigned long bit = 0, zero = 0;
+ struct fault *fault = file->private_data;
+ size_t bitsize = sizeof(fault->opcodes) * BITS_PER_BYTE;
+
+ data = kcalloc(datalen, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+ return ret;
+ bit = find_first_bit(fault->opcodes, bitsize);
+ while (bit < bitsize) {
+ zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
+ if (zero - 1 != bit)
+ size += snprintf(data + size,
+ datalen - size - 1,
+ "0x%lx-0x%lx,", bit, zero - 1);
+ else
+ size += snprintf(data + size,
+ datalen - size - 1, "0x%lx,",
+ bit);
+ bit = find_next_bit(fault->opcodes, bitsize, zero);
+ }
+ debugfs_file_put(file->f_path.dentry);
+ data[size - 1] = '\n';
+ data[size] = '\0';
+ ret = simple_read_from_buffer(buf, len, pos, data, size);
+ kfree(data);
+ return ret;
+}
+
+static const struct file_operations __fault_opcodes_fops = {
+ .owner = THIS_MODULE,
+ .open = fault_opcodes_open,
+ .read = fault_opcodes_read,
+ .write = fault_opcodes_write,
+ .llseek = no_llseek
+};
+
+void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
+{
+ if (ibd->fault)
+ debugfs_remove_recursive(ibd->fault->dir);
+ kfree(ibd->fault);
+ ibd->fault = NULL;
+}
+
+int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
+{
+ struct dentry *parent = ibd->hfi1_ibdev_dbg;
+
+ ibd->fault = kzalloc(sizeof(*ibd->fault), GFP_KERNEL);
+ if (!ibd->fault)
+ return -ENOMEM;
+
+ ibd->fault->attr.interval = 1;
+ ibd->fault->attr.require_end = ULONG_MAX;
+ ibd->fault->attr.stacktrace_depth = 32;
+ ibd->fault->attr.dname = NULL;
+ ibd->fault->attr.verbose = 0;
+ ibd->fault->enable = false;
+ ibd->fault->opcode = false;
+ ibd->fault->fault_skip = 0;
+ ibd->fault->skip = 0;
+ ibd->fault->direction = HFI1_FAULT_DIR_TXRX;
+ ibd->fault->suppress_err = false;
+ bitmap_zero(ibd->fault->opcodes,
+ sizeof(ibd->fault->opcodes) * BITS_PER_BYTE);
+
+ ibd->fault->dir =
+ fault_create_debugfs_attr("fault", parent,
+ &ibd->fault->attr);
+ if (IS_ERR(ibd->fault->dir)) {
+ kfree(ibd->fault);
+ ibd->fault = NULL;
+ return -ENOENT;
+ }
+
+ DEBUGFS_SEQ_FILE_CREATE(fault_stats, ibd->fault->dir, ibd);
+ if (!debugfs_create_bool("enable", 0600, ibd->fault->dir,
+ &ibd->fault->enable))
+ goto fail;
+ if (!debugfs_create_bool("suppress_err", 0600,
+ ibd->fault->dir,
+ &ibd->fault->suppress_err))
+ goto fail;
+ if (!debugfs_create_bool("opcode_mode", 0600, ibd->fault->dir,
+ &ibd->fault->opcode))
+ goto fail;
+ if (!debugfs_create_file("opcodes", 0600, ibd->fault->dir,
+ ibd->fault, &__fault_opcodes_fops))
+ goto fail;
+ if (!debugfs_create_u64("skip_pkts", 0600,
+ ibd->fault->dir,
+ &ibd->fault->fault_skip))
+ goto fail;
+ if (!debugfs_create_u64("skip_usec", 0600,
+ ibd->fault->dir,
+ &ibd->fault->fault_skip_usec))
+ goto fail;
+ if (!debugfs_create_u8("direction", 0600, ibd->fault->dir,
+ &ibd->fault->direction))
+ goto fail;
+
+ return 0;
+fail:
+ hfi1_fault_exit_debugfs(ibd);
+ return -ENOMEM;
+}
+
+bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
+{
+ if (ibd->fault)
+ return ibd->fault->suppress_err;
+ return false;
+}
+
+static bool __hfi1_should_fault(struct hfi1_ibdev *ibd, u32 opcode,
+ u8 direction)
+{
+ bool ret = false;
+
+ if (!ibd->fault || !ibd->fault->enable)
+ return false;
+ if (!(ibd->fault->direction & direction))
+ return false;
+ if (ibd->fault->opcode) {
+ if (bitmap_empty(ibd->fault->opcodes,
+ (sizeof(ibd->fault->opcodes) *
+ BITS_PER_BYTE)))
+ return false;
+ if (!(test_bit(opcode, ibd->fault->opcodes)))
+ return false;
+ }
+ if (ibd->fault->fault_skip_usec &&
+ time_before(jiffies, ibd->fault->skip_usec))
+ return false;
+ if (ibd->fault->fault_skip && ibd->fault->skip) {
+ ibd->fault->skip--;
+ return false;
+ }
+ ret = should_fail(&ibd->fault->attr, 1);
+ if (ret) {
+ ibd->fault->skip = ibd->fault->fault_skip;
+ ibd->fault->skip_usec = jiffies +
+ usecs_to_jiffies(ibd->fault->fault_skip_usec);
+ }
+ return ret;
+}
+
+bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp, u32 opcode)
+{
+ struct hfi1_ibdev *ibd = to_idev(qp->ibqp.device);
+
+ if (__hfi1_should_fault(ibd, opcode, HFI1_FAULT_DIR_TX)) {
+ trace_hfi1_fault_opcode(qp, opcode);
+ ibd->fault->n_txfaults[opcode]++;
+ return true;
+ }
+ return false;
+}
+
+bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet)
+{
+ struct hfi1_ibdev *ibd = &packet->rcd->dd->verbs_dev;
+
+ if (__hfi1_should_fault(ibd, packet->opcode, HFI1_FAULT_DIR_RX)) {
+ trace_hfi1_fault_packet(packet);
+ ibd->fault->n_rxfaults[packet->opcode]++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/infiniband/hw/hfi1/fault.h b/drivers/infiniband/hw/hfi1/fault.h
new file mode 100644
index 0000000000000..a83382700a7c0
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/fault.h
@@ -0,0 +1,109 @@
+#ifndef _HFI1_FAULT_H
+#define _HFI1_FAULT_H
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/fault-inject.h>
+#include <linux/dcache.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <rdma/rdma_vt.h>
+
+#include "hfi.h"
+
+struct hfi1_ibdev;
+
+#if defined(CONFIG_FAULT_INJECTION) && defined(CONFIG_FAULT_INJECTION_DEBUG_FS)
+struct fault {
+ struct fault_attr attr;
+ struct dentry *dir;
+ u64 n_rxfaults[(1U << BITS_PER_BYTE)];
+ u64 n_txfaults[(1U << BITS_PER_BYTE)];
+ u64 fault_skip;
+ u64 skip;
+ u64 fault_skip_usec;
+ unsigned long skip_usec;
+ unsigned long opcodes[(1U << BITS_PER_BYTE) / BITS_PER_LONG];
+ bool enable;
+ bool suppress_err;
+ bool opcode;
+ u8 direction;
+};
+
+int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd);
+bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp, u32 opcode);
+bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet);
+bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd);
+void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd);
+
+#else
+
+static inline int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
+{
+ return 0;
+}
+
+static inline bool hfi1_dbg_should_fault_rx(struct hfi1_packet *packet)
+{
+ return false;
+}
+
+static inline bool hfi1_dbg_should_fault_tx(struct rvt_qp *qp,
+ u32 opcode)
+{
+ return false;
+}
+
+static inline bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
+{
+ return false;
+}
+
+static inline void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
+{
+}
+#endif
+#endif /* _HFI1_FAULT_H */
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index da4aa1a95b110..0fc4aa9455c32 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -110,7 +110,7 @@ static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
unsigned long arg);
-static int vma_fault(struct vm_fault *vmf);
+static vm_fault_t vma_fault(struct vm_fault *vmf);
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg);
@@ -505,7 +505,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EINVAL;
goto done;
}
- if (flags & VM_WRITE) {
+ if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
ret = -EPERM;
goto done;
}
@@ -591,7 +591,7 @@ done:
* Local (non-chip) user memory is not mapped right away but as it is
* accessed by the user-level code.
*/
-static int vma_fault(struct vm_fault *vmf)
+static vm_fault_t vma_fault(struct vm_fault *vmf)
{
struct page *page;
@@ -689,8 +689,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
* checks to default and disable the send context.
*/
if (uctxt->sc) {
- set_pio_integrity(uctxt->sc);
sc_disable(uctxt->sc);
+ set_pio_integrity(uctxt->sc);
}
hfi1_free_ctxt_rcv_groups(uctxt);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index cac2c62bc42d6..4ab8b5bfbed13 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -231,20 +231,22 @@ struct hfi1_ctxtdata {
/* job key */
u16 jkey;
/* number of RcvArray groups for this context. */
- u32 rcv_array_groups;
+ u16 rcv_array_groups;
/* index of first eager TID entry. */
- u32 eager_base;
+ u16 eager_base;
/* number of expected TID entries */
- u32 expected_count;
+ u16 expected_count;
/* index of first expected TID entry. */
- u32 expected_base;
+ u16 expected_base;
+ /* array of tid_groups */
+ struct tid_group *groups;
struct exp_tid_set tid_group_list;
struct exp_tid_set tid_used_list;
struct exp_tid_set tid_full_list;
- /* lock protecting all Expected TID data */
- struct mutex exp_lock;
+ /* lock protecting all Expected TID data of user contexts */
+ struct mutex exp_mutex;
/* per-context configuration flags */
unsigned long flags;
/* per-context event flags for fileops/intr communication */
@@ -282,7 +284,7 @@ struct hfi1_ctxtdata {
/* interrupt handling */
u64 imask; /* clear interrupt mask */
int ireg; /* clear interrupt register */
- unsigned numa_id; /* numa node of this context */
+ int numa_id; /* numa node of this context */
/* verbs rx_stats per rcd */
struct hfi1_opcode_stats_perctx *opstats;
@@ -333,6 +335,7 @@ struct hfi1_packet {
struct rvt_qp *qp;
struct ib_other_headers *ohdr;
struct ib_grh *grh;
+ struct opa_16b_mgmt *mgmt;
u64 rhf;
u32 maxcnt;
u32 rhqoff;
@@ -392,10 +395,17 @@ struct hfi1_packet {
*/
#define OPA_16B_L4_9B 0x00
#define OPA_16B_L2_TYPE 0x02
+#define OPA_16B_L4_FM 0x08
#define OPA_16B_L4_IB_LOCAL 0x09
#define OPA_16B_L4_IB_GLOBAL 0x0A
#define OPA_16B_L4_ETHR OPA_VNIC_L4_ETHR
+/*
+ * OPA 16B Management
+ */
+#define OPA_16B_L4_FM_PAD 3 /* fixed 3B pad */
+#define OPA_16B_L4_FM_HLEN 24 /* 16B(16) + L4_FM(8) */
+
static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr)
{
return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK);
@@ -472,6 +482,27 @@ static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr)
OPA_16B_BTH_PAD_MASK);
}
+/*
+ * 16B Management
+ */
+#define OPA_16B_MGMT_QPN_MASK 0xFFFFFF
+static inline u32 hfi1_16B_get_dest_qpn(struct opa_16b_mgmt *mgmt)
+{
+ return be32_to_cpu(mgmt->dest_qpn) & OPA_16B_MGMT_QPN_MASK;
+}
+
+static inline u32 hfi1_16B_get_src_qpn(struct opa_16b_mgmt *mgmt)
+{
+ return be32_to_cpu(mgmt->src_qpn) & OPA_16B_MGMT_QPN_MASK;
+}
+
+static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
+ u32 dest_qp, u32 src_qp)
+{
+ mgmt->dest_qpn = cpu_to_be32(dest_qp & OPA_16B_MGMT_QPN_MASK);
+ mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
+}
+
struct rvt_sge_state;
/*
@@ -880,9 +911,9 @@ typedef void (*hfi1_make_req)(struct rvt_qp *qp,
#define RHF_RCV_REPROCESS 2 /* stop. retain this packet */
struct rcv_array_data {
- u8 group_size;
u16 ngroups;
u16 nctxt_extra;
+ u8 group_size;
};
struct per_vl_data {
@@ -1263,6 +1294,9 @@ struct hfi1_devdata {
/* Save the enabled LCB error bits */
u64 lcb_err_en;
+ struct cpu_mask_set *comp_vect;
+ int *comp_vect_mappings;
+ u32 comp_vect_possible_cpus;
/*
* Capability to have different send engines simply by changing a
@@ -1856,6 +1890,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
#define HFI1_HAS_SDMA_TIMEOUT 0x8
#define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
#define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
+#define HFI1_SHUTDOWN 0x100 /* device is shutting down */
/* IB dword length mask in PBC (lower 11 bits); same for all chips */
#define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
@@ -2048,7 +2083,9 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
+#ifndef CONFIG_FAULT_INJECTION
| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
+#endif
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
@@ -2061,7 +2098,11 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
| SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
if (ctxt_type == SC_USER)
- base_sc_integrity |= HFI1_PKT_USER_SC_INTEGRITY;
+ base_sc_integrity |=
+#ifndef CONFIG_FAULT_INJECTION
+ SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK |
+#endif
+ HFI1_PKT_USER_SC_INTEGRITY;
else
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 6309edf811df6..f110842b91f56 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -113,8 +113,8 @@ module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
static uint hfi1_hdrq_entsize = 32;
-module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
-MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
+module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
+MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
unsigned int user_credit_return_threshold = 33; /* default is 33% */
module_param(user_credit_return_threshold, uint, S_IRUGO);
@@ -361,16 +361,14 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
}
INIT_LIST_HEAD(&rcd->qp_wait_list);
- hfi1_exp_tid_group_init(&rcd->tid_group_list);
- hfi1_exp_tid_group_init(&rcd->tid_used_list);
- hfi1_exp_tid_group_init(&rcd->tid_full_list);
+ hfi1_exp_tid_group_init(rcd);
rcd->ppd = ppd;
rcd->dd = dd;
__set_bit(0, rcd->in_use_ctxts);
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
- mutex_init(&rcd->exp_lock);
+ mutex_init(&rcd->exp_mutex);
hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
@@ -1058,6 +1056,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
unsigned pidx;
int i;
+ if (dd->flags & HFI1_SHUTDOWN)
+ return;
+ dd->flags |= HFI1_SHUTDOWN;
+
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -1240,6 +1242,8 @@ static void hfi1_clean_devdata(struct hfi1_devdata *dd)
dd->rcv_limit = NULL;
dd->send_schedule = NULL;
dd->tx_opstats = NULL;
+ kfree(dd->comp_vect);
+ dd->comp_vect = NULL;
sdma_clean(dd, dd->num_sdma);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1296,6 +1300,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
dd->unit = ret;
list_add(&dd->list, &hfi1_dev_list);
}
+ dd->node = -1;
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
idr_preload_end();
@@ -1348,6 +1353,12 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
goto bail;
}
+ dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
+ if (!dd->comp_vect) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
@@ -1391,6 +1402,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd)
static void remove_one(struct pci_dev *);
static int init_one(struct pci_dev *, const struct pci_device_id *);
+static void shutdown_one(struct pci_dev *);
#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": "
@@ -1407,6 +1419,7 @@ static struct pci_driver hfi1_pci_driver = {
.name = DRIVER_NAME,
.probe = init_one,
.remove = remove_one,
+ .shutdown = shutdown_one,
.id_table = hfi1_pci_tbl,
.err_handler = &hfi1_pci_err_handler,
};
@@ -1515,7 +1528,7 @@ module_init(hfi1_mod_init);
static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
- node_affinity_destroy();
+ node_affinity_destroy_all();
hfi1_wss_exit();
hfi1_dbg_exit();
@@ -1599,6 +1612,8 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
static void postinit_cleanup(struct hfi1_devdata *dd)
{
hfi1_start_cleanup(dd);
+ hfi1_comp_vectors_clean_up(dd);
+ hfi1_dev_affinity_clean_up(dd);
hfi1_pcie_ddcleanup(dd);
hfi1_pcie_cleanup(dd->pcidev);
@@ -1816,6 +1831,13 @@ static void remove_one(struct pci_dev *pdev)
postinit_cleanup(dd);
}
+static void shutdown_one(struct pci_dev *pdev)
+{
+ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+
+ shutdown_device(dd);
+}
+
/**
* hfi1_create_rcvhdrq - create a receive header queue
* @dd: the hfi1_ib device
@@ -1831,7 +1853,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
u64 reg;
if (!rcd->rcvhdrq) {
- dma_addr_t dma_hdrqtail;
gfp_t gfp_flags;
/*
@@ -1856,13 +1877,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
goto bail;
}
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
+ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
+ HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
- gfp_flags);
+ &dd->pcidev->dev, PAGE_SIZE,
+ &rcd->rcvhdrqtailaddr_dma, gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
- rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
}
rcd->rcvhdrq_size = amt;
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index e9962c65c68f9..0307405491e01 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1238,7 +1238,7 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
}
static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
- u32 logical_state, u32 phys_state)
+ u32 logical_state, u32 phys_state, int local_mad)
{
struct hfi1_devdata *dd = ppd->dd;
u32 link_state;
@@ -1314,7 +1314,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
* Don't send a reply if the response would be sent
* through the disabled port.
*/
- if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
+ if (link_state == HLS_DN_DISABLE && !local_mad)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
break;
case IB_PORT_ARMED:
@@ -1350,7 +1350,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
*/
static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len, u32 max_len)
+ u32 *resp_len, u32 max_len, int local_mad)
{
struct opa_port_info *pi = (struct opa_port_info *)data;
struct ib_event event;
@@ -1634,7 +1634,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
*/
if (!invalid) {
- ret = set_port_states(ppd, smp, ls_new, ps_new);
+ ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
if (ret)
return ret;
}
@@ -2085,7 +2085,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len, u32 max_len)
+ u32 *resp_len, u32 max_len, int local_mad)
{
u32 nports = OPA_AM_NPORT(am);
u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
@@ -2122,7 +2122,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
}
if (!invalid) {
- ret = set_port_states(ppd, smp, ls_new, ps_new);
+ ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
if (ret)
return ret;
}
@@ -3424,6 +3424,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)pmp);
}
+ rsp->port_number = port;
/* PortRcvErrorInfo */
rsp->port_rcv_ei.status_and_code =
@@ -4190,7 +4191,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev, u8 port,
- u32 *resp_len, u32 max_len)
+ u32 *resp_len, u32 max_len, int local_mad)
{
int ret;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -4198,7 +4199,7 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
switch (attr_id) {
case IB_SMP_ATTR_PORT_INFO:
ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
- resp_len, max_len);
+ resp_len, max_len, local_mad);
break;
case IB_SMP_ATTR_PKEY_TABLE:
ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
@@ -4222,7 +4223,7 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
break;
case OPA_ATTRIB_ID_PORT_STATE_INFO:
ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
- resp_len, max_len);
+ resp_len, max_len, local_mad);
break;
case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
@@ -4314,7 +4315,7 @@ static int subn_get_opa_aggregate(struct opa_smp *smp,
static int subn_set_opa_aggregate(struct opa_smp *smp,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, int local_mad)
{
int i;
u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
@@ -4344,7 +4345,9 @@ static int subn_set_opa_aggregate(struct opa_smp *smp,
}
(void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
- ibdev, port, NULL, (u32)agg_data_len);
+ ibdev, port, NULL, (u32)agg_data_len,
+ local_mad);
+
if (smp->status & IB_SMP_INVALID_FIELD)
break;
if (smp->status & ~IB_SMP_DIRECTION) {
@@ -4519,7 +4522,7 @@ static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
u8 port, const struct opa_mad *in_mad,
struct opa_mad *out_mad,
- u32 *resp_len)
+ u32 *resp_len, int local_mad)
{
struct opa_smp *smp = (struct opa_smp *)out_mad;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -4588,11 +4591,11 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
default:
ret = subn_set_opa_sma(attr_id, smp, am, data,
ibdev, port, resp_len,
- data_size);
+ data_size, local_mad);
break;
case OPA_ATTRIB_ID_AGGREGATE:
ret = subn_set_opa_aggregate(smp, ibdev, port,
- resp_len);
+ resp_len, local_mad);
break;
}
break;
@@ -4832,6 +4835,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
{
int ret;
int pkey_idx;
+ int local_mad = 0;
u32 resp_len = 0;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -4846,13 +4850,14 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- if (is_local_mad(ibp, in_mad, in_wc)) {
+ local_mad = is_local_mad(ibp, in_mad, in_wc);
+ if (local_mad) {
ret = opa_local_smp_check(ibp, in_wc);
if (ret)
return IB_MAD_RESULT_FAILURE;
}
ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
- out_mad, &resp_len);
+ out_mad, &resp_len, local_mad);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index c1c982908b4bb..4d4371bf2c7c8 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -56,11 +56,6 @@
#include "chip_registers.h"
#include "aspm.h"
-/* link speed vector for Gen3 speed - not in Linux headers */
-#define GEN1_SPEED_VECTOR 0x1
-#define GEN2_SPEED_VECTOR 0x2
-#define GEN3_SPEED_VECTOR 0x3
-
/*
* This file contains PCIe utility routines.
*/
@@ -183,6 +178,14 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
return -ENOMEM;
}
dd_dev_info(dd, "UC base1: %p for %x\n", dd->kregbase1, RCV_ARRAY);
+
+ /* verify that reads actually work, save revision for reset check */
+ dd->revision = readq(dd->kregbase1 + CCE_REVISION);
+ if (dd->revision == ~(u64)0) {
+ dd_dev_err(dd, "Cannot read chip CSRs\n");
+ goto nomem;
+ }
+
dd->chip_rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
dd_dev_info(dd, "RcvArray count: %u\n", dd->chip_rcv_array_count);
dd->base2_start = RCV_ARRAY + dd->chip_rcv_array_count * 8;
@@ -262,7 +265,7 @@ static u32 extract_speed(u16 linkstat)
case PCI_EXP_LNKSTA_CLS_5_0GB:
speed = 5000; /* Gen 2, 5GHz */
break;
- case GEN3_SPEED_VECTOR:
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
speed = 8000; /* Gen 3, 8GHz */
break;
}
@@ -317,7 +320,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
return ret;
}
- if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_8_0GB) {
dd_dev_info(dd,
"This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
linkcap & PCI_EXP_LNKCAP_SLS);
@@ -694,9 +697,6 @@ const struct pci_error_handlers hfi1_pci_err_handler = {
/* gasket block secondary bus reset delay */
#define SBR_DELAY_US 200000 /* 200ms */
-/* mask for PCIe capability register lnkctl2 target link speed */
-#define LNKCTL2_TARGET_LINK_SPEED_MASK 0xf
-
static uint pcie_target = 3;
module_param(pcie_target, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
@@ -1045,13 +1045,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
return 0;
if (pcie_target == 1) { /* target Gen1 */
- target_vector = GEN1_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_2_5GT;
target_speed = 2500;
} else if (pcie_target == 2) { /* target Gen2 */
- target_vector = GEN2_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_5_0GT;
target_speed = 5000;
} else if (pcie_target == 3) { /* target Gen3 */
- target_vector = GEN3_SPEED_VECTOR;
+ target_vector = PCI_EXP_LNKCTL2_TLS_8_0GT;
target_speed = 8000;
} else {
/* off or invalid target - skip */
@@ -1290,8 +1290,8 @@ retry:
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
/* only write to parent if target is not as high as ours */
- if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) {
- lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+ if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
+ lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
@@ -1316,7 +1316,7 @@ retry:
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
- lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
+ lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 40dac4d16eb89..9cac15d10c4ff 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -50,8 +50,6 @@
#include "qp.h"
#include "trace.h"
-#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
-
#define SC(name) SEND_CTXT_##name
/*
* Send Context functions
@@ -961,15 +959,40 @@ void sc_disable(struct send_context *sc)
}
/* return SendEgressCtxtStatus.PacketOccupancy */
-#define packet_occupancy(r) \
- (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
- >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
+static u64 packet_occupancy(u64 reg)
+{
+ return (reg &
+ SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
+ >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
+}
/* is egress halted on the context? */
-#define egress_halted(r) \
- ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
+static bool egress_halted(u64 reg)
+{
+ return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
+}
-/* wait for packet egress, optionally pause for credit return */
+/* is the send context halted? */
+static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
+{
+ return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
+ SC(STATUS_CTXT_HALTED_SMASK));
+}
+
+/**
+ * sc_wait_for_packet_egress
+ * @sc: valid send context
+ * @pause: wait for credit return
+ *
+ * Wait for packet egress, optionally pause for credit return
+ *
+ * Egress halt and Context halt are not necessarily the same thing, so
+ * check for both.
+ *
+ * NOTE: The context halt bit may not be set immediately. Because of this,
+ * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
+ * context bit to determine if the context is halted.
+ */
static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
{
struct hfi1_devdata *dd = sc->dd;
@@ -981,8 +1004,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
reg_prev = reg;
reg = read_csr(dd, sc->hw_context * 8 +
SEND_EGRESS_CTXT_STATUS);
- /* done if egress is stopped */
- if (egress_halted(reg))
+ /* done if any halt bits, SW or HW are set */
+ if (sc->flags & SCF_HALTED ||
+ is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
break;
reg = packet_occupancy(reg);
if (reg == 0)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index da58046a02eaa..1a1a47ac53c6f 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2012,7 +2012,7 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
unsigned long nsec = 1024 * ccti_timer;
hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_PINNED);
}
spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
@@ -2123,7 +2123,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2149,7 +2149,7 @@ send_middle:
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
/* consume RWQE */
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2159,7 +2159,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
case OP(SEND_ONLY_WITH_INVALIDATE):
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2271,7 +2271,7 @@ send_last:
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
goto no_immediate_data;
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret) {
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index c0071ca4147ae..ef4c566e206f9 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -53,156 +53,6 @@
#include "verbs_txreq.h"
#include "trace.h"
-/*
- * Validate a RWQE and fill in the SGE state.
- * Return 1 if OK.
- */
-static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
-{
- int i, j, ret;
- struct ib_wc wc;
- struct rvt_lkey_table *rkt;
- struct rvt_pd *pd;
- struct rvt_sge_state *ss;
-
- rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
- pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
- ss = &qp->r_sge;
- ss->sg_list = qp->r_sg_list;
- qp->r_len = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- NULL, &wqe->sg_list[i],
- IB_ACCESS_LOCAL_WRITE);
- if (unlikely(ret <= 0))
- goto bad_lkey;
- qp->r_len += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ss->total_len = qp->r_len;
- ret = 1;
- goto bail;
-
-bad_lkey:
- while (j) {
- struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
-
- rvt_put_mr(sge->mr);
- }
- ss->num_sge = 0;
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return -1 if there is a local error, 0 if no RWQE is available,
- * otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct rvt_rq *rq;
- struct rvt_rwq *wq;
- struct rvt_srq *srq;
- struct rvt_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = rvt_get_rwqe_ptr(rq, tail);
- /*
- * Even though we update the tail index in memory, the verbs
- * consumer is not supposed to post more entries until a
- * completion is generated.
- */
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- if (!wr_id_only && !init_sge(qp, wqe)) {
- ret = -1;
- goto unlock;
- }
- qp->r_wr_id = wqe->wr_id;
-
- ret = 1;
- set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * Validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
{
return (gid->global.interface_id == id &&
@@ -423,7 +273,7 @@ again:
/* FALLTHROUGH */
case IB_WR_SEND:
send:
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -435,7 +285,7 @@ send:
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 1f203309cf24b..298e0e3fc0c9f 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -923,9 +923,10 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
cpumask_var_t mask, new_mask;
unsigned long cpu;
int ret, vl, sz;
+ struct sdma_rht_node *rht_node;
vl = sdma_engine_get_vl(sde);
- if (unlikely(vl < 0))
+ if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map)))
return -EINVAL;
ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
@@ -953,19 +954,12 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
mutex_lock(&process_to_sde_mutex);
for_each_cpu(cpu, mask) {
- struct sdma_rht_node *rht_node;
-
/* Check if we have this already mapped */
if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
cpumask_set_cpu(cpu, new_mask);
continue;
}
- if (vl >= ARRAY_SIZE(rht_node->map)) {
- ret = -EINVAL;
- goto out;
- }
-
rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
sdma_rht_params);
if (!rht_node) {
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 89bd9851065b8..7c8aed0ffc07e 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -63,13 +63,20 @@ static u8 __get_ib_hdr_len(struct ib_header *hdr)
static u8 __get_16b_hdr_len(struct hfi1_16b_header *hdr)
{
- struct ib_other_headers *ohdr;
+ struct ib_other_headers *ohdr = NULL;
u8 opcode;
+ u8 l4 = hfi1_16B_get_l4(hdr);
+
+ if (l4 == OPA_16B_L4_FM) {
+ opcode = IB_OPCODE_UD_SEND_ONLY;
+ return (8 + 8); /* No BTH */
+ }
- if (hfi1_16B_get_l4(hdr) == OPA_16B_L4_IB_LOCAL)
+ if (l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
+
opcode = ib_bth_get_opcode(ohdr);
return hdr_len_by_opcode[opcode] == 0 ?
0 : hdr_len_by_opcode[opcode] - (12 + 8 + 8);
@@ -234,17 +241,24 @@ const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
#define BTH_16B_PRN \
"op:0x%.2x,%s se:%d m:%d pad:%d tver:%d " \
"qpn:0x%.6x a:%d psn:0x%.8x"
-const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
- u8 ack, bool becn, bool fecn, u8 mig,
- u8 se, u8 pad, u8 opcode, const char *opname,
- u8 tver, u16 pkey, u32 psn, u32 qpn)
+#define L4_FM_16B_PRN \
+ "op:0x%.2x,%s dest_qpn:0x%.6x src_qpn:0x%.6x"
+const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
+ u8 ack, bool becn, bool fecn, u8 mig,
+ u8 se, u8 pad, u8 opcode, const char *opname,
+ u8 tver, u16 pkey, u32 psn, u32 qpn,
+ u32 dest_qpn, u32 src_qpn)
{
const char *ret = trace_seq_buffer_ptr(p);
if (bypass)
- trace_seq_printf(p, BTH_16B_PRN,
- opcode, opname,
- se, mig, pad, tver, qpn, ack, psn);
+ if (l4 == OPA_16B_L4_FM)
+ trace_seq_printf(p, L4_FM_16B_PRN,
+ opcode, opname, dest_qpn, src_qpn);
+ else
+ trace_seq_printf(p, BTH_16B_PRN,
+ opcode, opname,
+ se, mig, pad, tver, qpn, ack, psn);
else
trace_seq_printf(p, BTH_9B_PRN,
@@ -258,12 +272,17 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
const char *parse_everbs_hdrs(
struct trace_seq *p,
- u8 opcode,
+ u8 opcode, u8 l4, u32 dest_qpn, u32 src_qpn,
void *ehdrs)
{
union ib_ehdrs *eh = ehdrs;
const char *ret = trace_seq_buffer_ptr(p);
+ if (l4 == OPA_16B_L4_FM) {
+ trace_seq_printf(p, "mgmt pkt");
+ goto out;
+ }
+
switch (opcode) {
/* imm */
case OP(RC, SEND_LAST_WITH_IMMEDIATE):
@@ -334,6 +353,7 @@ const char *parse_everbs_hdrs(
be32_to_cpu(eh->ieth));
break;
}
+out:
trace_seq_putc(p, 0);
return ret;
}
@@ -374,6 +394,7 @@ const char *print_u32_array(
return ret;
}
+__hfi1_trace_fn(AFFINITY);
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
__hfi1_trace_fn(SDMA);
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index 0e7d929530c5b..e62171fb7379f 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -1,5 +1,5 @@
/*
-* Copyright(c) 2015, 2016 Intel Corporation.
+* Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -113,6 +113,7 @@ void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
* hfi1_cdbg(LVL, fmt, ...); as well as take care of all
* the debugfs stuff.
*/
+__hfi1_trace_def(AFFINITY);
__hfi1_trace_def(PKT);
__hfi1_trace_def(PROC);
__hfi1_trace_def(SDMA);
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
index 2847626d3819e..1dc2c28fc96e7 100644
--- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -96,7 +96,9 @@ __print_symbolic(opcode, \
ib_opcode_name(CNP))
u8 ibhdr_exhdr_len(struct ib_header *hdr);
-const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
+const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode,
+ u8 l4, u32 dest_qpn, u32 src_qpn,
+ void *ehdrs);
u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
const char *hfi1_trace_get_packet_l4_str(u8 l4);
@@ -123,14 +125,16 @@ const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
u8 rc, u8 sc, u8 sl, u16 entropy,
u16 len, u16 pkey, u32 dlid, u32 slid);
-const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
- u8 ack, bool becn, bool fecn, u8 mig,
- u8 se, u8 pad, u8 opcode, const char *opname,
- u8 tver, u16 pkey, u32 psn, u32 qpn);
+const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
+ u8 ack, bool becn, bool fecn, u8 mig,
+ u8 se, u8 pad, u8 opcode, const char *opname,
+ u8 tver, u16 pkey, u32 psn, u32 qpn,
+ u32 dest_qpn, u32 src_qpn);
const char *hfi1_trace_get_packet_l2_str(u8 l2);
-#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
+#define __parse_ib_ehdrs(op, l4, dest_qpn, src_qpn, ehdrs) \
+ parse_everbs_hdrs(p, op, l4, dest_qpn, src_qpn, ehdrs)
#define lrh_name(lrh) { HFI1_##lrh, #lrh }
#define show_lnh(lrh) \
@@ -169,6 +173,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
__field(u32, psn)
__field(u32, qpn)
__field(u32, slid)
+ __field(u32, dest_qpn)
+ __field(u32, src_qpn)
/* extended headers */
__dynamic_array(u8, ehdrs,
hfi1_trace_packet_hdr_len(packet))
@@ -178,6 +184,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
__entry->etype = packet->etype;
__entry->l2 = hfi1_16B_get_l2(packet->hdr);
+ __entry->dest_qpn = 0;
+ __entry->src_qpn = 0;
if (__entry->etype == RHF_RCV_TYPE_BYPASS) {
hfi1_trace_parse_16b_hdr(packet->hdr,
&__entry->age,
@@ -192,16 +200,23 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
&__entry->dlid,
&__entry->slid);
- hfi1_trace_parse_16b_bth(packet->ohdr,
- &__entry->ack,
- &__entry->mig,
- &__entry->opcode,
- &__entry->pad,
- &__entry->se,
- &__entry->tver,
- &__entry->psn,
- &__entry->qpn);
+ if (__entry->l4 == OPA_16B_L4_FM) {
+ __entry->opcode = IB_OPCODE_UD_SEND_ONLY;
+ __entry->dest_qpn = hfi1_16B_get_dest_qpn(packet->mgmt);
+ __entry->src_qpn = hfi1_16B_get_src_qpn(packet->mgmt);
+ } else {
+ hfi1_trace_parse_16b_bth(packet->ohdr,
+ &__entry->ack,
+ &__entry->mig,
+ &__entry->opcode,
+ &__entry->pad,
+ &__entry->se,
+ &__entry->tver,
+ &__entry->psn,
+ &__entry->qpn);
+ }
} else {
+ __entry->l4 = OPA_16B_L4_9B;
hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
&__entry->lnh,
&__entry->lver,
@@ -223,8 +238,9 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
&__entry->pkey,
&__entry->psn,
&__entry->qpn);
- }
- /* extended headers */
+ }
+ /* extended headers */
+ if (__entry->l4 != OPA_16B_L4_FM)
memcpy(__get_dynamic_array(ehdrs),
&packet->ohdr->u,
__get_dynamic_array_len(ehdrs));
@@ -253,25 +269,31 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
__entry->pkey,
__entry->dlid,
__entry->slid),
- hfi1_trace_fmt_bth(p,
- __entry->etype ==
+ hfi1_trace_fmt_rest(p,
+ __entry->etype ==
RHF_RCV_TYPE_BYPASS,
- __entry->ack,
- __entry->becn,
- __entry->fecn,
- __entry->mig,
- __entry->se,
- __entry->pad,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->tver,
- __entry->pkey,
- __entry->psn,
- __entry->qpn),
+ __entry->l4,
+ __entry->ack,
+ __entry->becn,
+ __entry->fecn,
+ __entry->mig,
+ __entry->se,
+ __entry->pad,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->tver,
+ __entry->pkey,
+ __entry->psn,
+ __entry->qpn,
+ __entry->dest_qpn,
+ __entry->src_qpn),
/* extended headers */
__get_dynamic_array_len(ehdrs),
__parse_ib_ehdrs(
__entry->opcode,
+ __entry->l4,
+ __entry->dest_qpn,
+ __entry->src_qpn,
(void *)__get_dynamic_array(ehdrs))
)
);
@@ -310,6 +332,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
__field(u32, psn)
__field(u32, qpn)
__field(u32, slid)
+ __field(u32, dest_qpn)
+ __field(u32, src_qpn)
/* extended headers */
__dynamic_array(u8, ehdrs,
hfi1_trace_opa_hdr_len(opah))
@@ -320,6 +344,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
DD_DEV_ASSIGN(dd);
__entry->hdr_type = opah->hdr_type;
+ __entry->dest_qpn = 0;
+ __entry->src_qpn = 0;
if (__entry->hdr_type) {
hfi1_trace_parse_16b_hdr(&opah->opah,
&__entry->age,
@@ -334,19 +360,26 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
&__entry->dlid,
&__entry->slid);
- if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
- ohdr = &opah->opah.u.oth;
- else
- ohdr = &opah->opah.u.l.oth;
- hfi1_trace_parse_16b_bth(ohdr,
- &__entry->ack,
- &__entry->mig,
- &__entry->opcode,
- &__entry->pad,
- &__entry->se,
- &__entry->tver,
- &__entry->psn,
- &__entry->qpn);
+ if (__entry->l4 == OPA_16B_L4_FM) {
+ ohdr = NULL;
+ __entry->opcode = IB_OPCODE_UD_SEND_ONLY;
+ __entry->dest_qpn = hfi1_16B_get_dest_qpn(&opah->opah.u.mgmt);
+ __entry->src_qpn = hfi1_16B_get_src_qpn(&opah->opah.u.mgmt);
+ } else {
+ if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
+ ohdr = &opah->opah.u.oth;
+ else
+ ohdr = &opah->opah.u.l.oth;
+ hfi1_trace_parse_16b_bth(ohdr,
+ &__entry->ack,
+ &__entry->mig,
+ &__entry->opcode,
+ &__entry->pad,
+ &__entry->se,
+ &__entry->tver,
+ &__entry->psn,
+ &__entry->qpn);
+ }
} else {
__entry->l4 = OPA_16B_L4_9B;
hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
@@ -376,8 +409,9 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
}
/* extended headers */
- memcpy(__get_dynamic_array(ehdrs),
- &ohdr->u, __get_dynamic_array_len(ehdrs));
+ if (__entry->l4 != OPA_16B_L4_FM)
+ memcpy(__get_dynamic_array(ehdrs),
+ &ohdr->u, __get_dynamic_array_len(ehdrs));
),
TP_printk("[%s] (%s) %s %s hlen:%d %s",
__get_str(dev),
@@ -399,24 +433,30 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
__entry->pkey,
__entry->dlid,
__entry->slid),
- hfi1_trace_fmt_bth(p,
- !!__entry->hdr_type,
- __entry->ack,
- __entry->becn,
- __entry->fecn,
- __entry->mig,
- __entry->se,
- __entry->pad,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->tver,
- __entry->pkey,
- __entry->psn,
- __entry->qpn),
+ hfi1_trace_fmt_rest(p,
+ !!__entry->hdr_type,
+ __entry->l4,
+ __entry->ack,
+ __entry->becn,
+ __entry->fecn,
+ __entry->mig,
+ __entry->se,
+ __entry->pad,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->tver,
+ __entry->pkey,
+ __entry->psn,
+ __entry->qpn,
+ __entry->dest_qpn,
+ __entry->src_qpn),
/* extended headers */
__get_dynamic_array_len(ehdrs),
__parse_ib_ehdrs(
__entry->opcode,
+ __entry->l4,
+ __entry->dest_qpn,
+ __entry->src_qpn,
(void *)__get_dynamic_array(ehdrs))
)
);
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 9d7a3110c14c6..b7b671017e594 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -397,7 +397,7 @@ send_first:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
qp->r_sge = qp->s_rdma_read_sge;
} else {
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -542,7 +542,7 @@ rdma_last_imm:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
rvt_put_ss(&qp->s_rdma_read_sge);
} else {
- ret = hfi1_rvt_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 69c17a5ef0387..1ab332f1866e8 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -163,7 +163,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} else {
int ret;
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
@@ -399,16 +399,30 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp;
u32 dlid, slid, nwords, extra_bytes;
+ u32 dest_qp = wqe->ud_wr.remote_qpn;
+ u32 src_qp = qp->ibqp.qp_num;
u16 len, pkey;
u8 l4, sc5;
+ bool is_mgmt = false;
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
- /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
- ps->s_txreq->hdr_dwords = 9;
- if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
- ps->s_txreq->hdr_dwords++;
+
+ /*
+ * Build 16B Management Packet if either the destination
+ * or source queue pair number is 0 or 1.
+ */
+ if (dest_qp == 0 || src_qp == 0 || dest_qp == 1 || src_qp == 1) {
+ /* header size in dwords 16B LRH+L4_FM = (16+8)/4. */
+ ps->s_txreq->hdr_dwords = 6;
+ is_mgmt = true;
+ } else {
+ /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
+ ps->s_txreq->hdr_dwords = 9;
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ ps->s_txreq->hdr_dwords++;
+ }
/* SW provides space for CRC and LT for bypass packets. */
extra_bytes = hfi1_get_16b_padding((ps->s_txreq->hdr_dwords << 2),
@@ -453,7 +467,14 @@ void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1));
- hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
+ if (is_mgmt) {
+ l4 = OPA_16B_L4_FM;
+ pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
+ hfi1_16B_set_qpn(&ps->s_txreq->phdr.hdr.opah.u.mgmt,
+ dest_qp, src_qp);
+ } else {
+ hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
+ }
/* Convert dwords to flits */
len = (ps->s_txreq->hdr_dwords + nwords) >> 1;
@@ -845,10 +866,8 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
*/
void hfi1_ud_rcv(struct hfi1_packet *packet)
{
- struct ib_other_headers *ohdr = packet->ohdr;
u32 hdrsize = packet->hlen;
struct ib_wc wc;
- u32 qkey;
u32 src_qp;
u16 pkey;
int mgmt_pkey_idx = -1;
@@ -864,27 +883,35 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
u32 dlid = packet->dlid;
u32 slid = packet->slid;
u8 extra_bytes;
+ u8 l4 = 0;
bool dlid_is_permissive;
bool slid_is_permissive;
+ bool solicited = false;
extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2);
- qkey = ib_get_qkey(ohdr);
- src_qp = ib_get_sqpn(ohdr);
if (packet->etype == RHF_RCV_TYPE_BYPASS) {
u32 permissive_lid =
opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B);
+ l4 = hfi1_16B_get_l4(packet->hdr);
pkey = hfi1_16B_get_pkey(packet->hdr);
dlid_is_permissive = (dlid == permissive_lid);
slid_is_permissive = (slid == permissive_lid);
} else {
- pkey = ib_bth_get_pkey(ohdr);
+ pkey = ib_bth_get_pkey(packet->ohdr);
dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
}
sl_from_sc = ibp->sc_to_sl[sc5];
+ if (likely(l4 != OPA_16B_L4_FM)) {
+ src_qp = ib_get_sqpn(packet->ohdr);
+ solicited = ib_bth_is_solicited(packet->ohdr);
+ } else {
+ src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
+ }
+
process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
/*
* Get the number of bytes the message was padded by
@@ -922,8 +949,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (mgmt_pkey_idx < 0)
goto drop;
}
- if (unlikely(qkey != qp->qkey)) /* Silent drop */
- return;
+ if (unlikely(l4 != OPA_16B_L4_FM &&
+ ib_get_qkey(packet->ohdr) != qp->qkey))
+ return; /* Silent drop */
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
@@ -950,7 +978,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (qp->ibqp.qp_num > 1 &&
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
- wc.ex.imm_data = ohdr->u.ud.imm_data;
+ wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
@@ -974,7 +1002,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
} else {
int ret;
- ret = hfi1_rvt_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
@@ -1047,8 +1075,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- ib_bth_is_solicited(ohdr));
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, solicited);
return;
drop:
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 0d5330b7353d2..dbe7d14a5c76d 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -375,7 +375,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
* From this point on, we are going to be using shared (between master
* and subcontexts) context resources. We need to take the lock.
*/
- mutex_lock(&uctxt->exp_lock);
+ mutex_lock(&uctxt->exp_mutex);
/*
* The first step is to program the RcvArray entries which are complete
* groups.
@@ -437,7 +437,6 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
hfi1_cdbg(TID,
"Failed to program RcvArray entries %d",
ret);
- ret = -EFAULT;
goto unlock;
} else if (ret > 0) {
if (grp->used == grp->size)
@@ -462,7 +461,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
}
}
unlock:
- mutex_unlock(&uctxt->exp_lock);
+ mutex_unlock(&uctxt->exp_mutex);
nomem:
hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
mapped_pages, ret);
@@ -518,7 +517,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
if (IS_ERR(tidinfo))
return PTR_ERR(tidinfo);
- mutex_lock(&uctxt->exp_lock);
+ mutex_lock(&uctxt->exp_mutex);
for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
if (ret) {
@@ -531,7 +530,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
fd->tid_used -= tididx;
spin_unlock(&fd->tid_lock);
tinfo->tidcnt = tididx;
- mutex_unlock(&uctxt->exp_lock);
+ mutex_unlock(&uctxt->exp_mutex);
kfree(tidinfo);
return ret;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index a3d1924243448..d2bc77f75253f 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_USER_SDMA_H
#define _HFI1_USER_SDMA_H
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -122,8 +122,6 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
(req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
##__VA_ARGS__)
-extern uint extended_psn;
-
struct hfi1_user_sdma_pkt_q {
u16 ctxt;
u16 subctxt;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index c8cf4d4984d34..08991874c0e2c 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -63,6 +63,8 @@
#include "verbs_txreq.h"
#include "debugfs.h"
#include "vnic.h"
+#include "fault.h"
+#include "affinity.h"
static unsigned int hfi1_lkey_table_size = 16;
module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
@@ -615,7 +617,12 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
wake_up(&mcast->wait);
} else {
/* Get the destination QP number. */
- qp_num = ib_bth_get_qpn(packet->ohdr);
+ if (packet->etype == RHF_RCV_TYPE_BYPASS &&
+ hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM)
+ qp_num = hfi1_16B_get_dest_qpn(packet->mgmt);
+ else
+ qp_num = ib_bth_get_qpn(packet->ohdr);
+
rcu_read_lock();
packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
if (!packet->qp)
@@ -624,10 +631,6 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
if (hfi1_do_pkey_check(packet))
goto unlock_drop;
- if (unlikely(hfi1_dbg_fault_opcode(packet->qp, packet->opcode,
- true)))
- goto unlock_drop;
-
spin_lock_irqsave(&packet->qp->r_lock, flags);
packet_handler = qp_ok(packet);
if (likely(packet_handler))
@@ -934,8 +937,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
- if (unlikely(hfi1_dbg_fault_opcode(qp, ps->opcode,
- false)))
+ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd,
pbc,
@@ -1088,7 +1090,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
- if (unlikely(hfi1_dbg_fault_opcode(qp, ps->opcode, false)))
+
+ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
}
@@ -1310,21 +1313,23 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_qp_priv *priv = qp->priv;
- struct ib_other_headers *ohdr;
+ struct ib_other_headers *ohdr = NULL;
send_routine sr;
int ret;
u16 pkey;
u32 slid;
+ u8 l4 = 0;
/* locate the pkey within the headers */
if (ps->s_txreq->phdr.hdr.hdr_type) {
struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
- u8 l4 = hfi1_16B_get_l4(hdr);
- if (l4 == OPA_16B_L4_IB_GLOBAL)
- ohdr = &hdr->u.l.oth;
- else
+ l4 = hfi1_16B_get_l4(hdr);
+ if (l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &hdr->u.oth;
+ else if (l4 == OPA_16B_L4_IB_GLOBAL)
+ ohdr = &hdr->u.l.oth;
+
slid = hfi1_16B_get_slid(hdr);
pkey = hfi1_16B_get_pkey(hdr);
} else {
@@ -1339,7 +1344,11 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
pkey = ib_bth_get_pkey(ohdr);
}
- ps->opcode = ib_bth_get_opcode(ohdr);
+ if (likely(l4 != OPA_16B_L4_FM))
+ ps->opcode = ib_bth_get_opcode(ohdr);
+ else
+ ps->opcode = IB_OPCODE_UD_SEND_ONLY;
+
sr = get_send_routine(qp, ps);
ret = egress_pkey_check(dd->pport, slid, pkey,
priv->s_sc, qp->s_pkey_index);
@@ -1937,11 +1946,11 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
+ hfi1_comp_vect_mappings_lookup;
/* completeion queue */
- snprintf(dd->verbs_dev.rdi.dparms.cq_name,
- sizeof(dd->verbs_dev.rdi.dparms.cq_name),
- "hfi1_cq%d", dd->unit);
+ dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus;
dd->verbs_dev.rdi.dparms.node = dd->node;
/* misc settings */
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 2d787b8346cab..a4d06502f06df 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -110,6 +110,12 @@ enum {
#define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh))
#define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32))
+/* 24Bits for qpn, upper 8Bits reserved */
+struct opa_16b_mgmt {
+ __be32 dest_qpn;
+ __be32 src_qpn;
+};
+
struct hfi1_16b_header {
u32 lrh[4];
union {
@@ -118,6 +124,7 @@ struct hfi1_16b_header {
struct ib_other_headers oth;
} l;
struct ib_other_headers oth;
+ struct opa_16b_mgmt mgmt;
} u;
} __packed;
@@ -227,9 +234,7 @@ struct hfi1_ibdev {
/* per HFI symlinks to above */
struct dentry *hfi1_ibdev_link;
#ifdef CONFIG_FAULT_INJECTION
- struct fault_opcode *fault_opcode;
- struct fault_packet *fault_packet;
- bool fault_suppress_err;
+ struct fault *fault;
#endif
#endif
};
@@ -330,8 +335,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet);
int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
-int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-
void hfi1_migrate_qp(struct rvt_qp *qp);
int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index a40ec939ece58..46f65f9f59d0a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,7 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
buf->npages = 1 << order;
buf->page_shift = page_shift;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
- buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
+ buf->direct.buf = dma_zalloc_coherent(dev,
+ size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
@@ -207,8 +208,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
--buf->page_shift;
buf->npages *= 2;
}
-
- memset(buf->direct.buf, 0, size);
} else {
buf->nbufs = (size + page_size - 1) / page_size;
buf->npages = buf->nbufs;
@@ -220,7 +219,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf = dma_alloc_coherent(dev,
+ buf->page_list[i].buf = dma_zalloc_coherent(dev,
page_size, &t,
GFP_KERNEL);
@@ -228,7 +227,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
goto err_free;
buf->page_list[i].map = t;
- memset(buf->page_list[i].buf, 0, page_size);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 9ebe839d8b242..a0ba19d4a10ec 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -176,6 +176,9 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout)
{
+ if (hr_dev->is_reset)
+ return 0;
+
if (hr_dev->cmd.use_events)
return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
in_modifier, op_modifier, op,
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index fb305b7f99a8b..31221d506d9ab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -100,6 +100,9 @@
#define SERV_TYPE_UC 2
#define SERV_TYPE_UD 3
+/* Configure to HW for PAGE_SIZE larger than 4KB */
+#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
+
#define PAGES_SHIFT_8 8
#define PAGES_SHIFT_16 16
#define PAGES_SHIFT_24 24
@@ -211,6 +214,13 @@ enum {
struct hns_roce_uar {
u64 pfn;
unsigned long index;
+ unsigned long logic_idx;
+};
+
+struct hns_roce_vma_data {
+ struct list_head list;
+ struct vm_area_struct *vma;
+ struct mutex *vma_list_mutex;
};
struct hns_roce_ucontext {
@@ -218,6 +228,8 @@ struct hns_roce_ucontext {
struct hns_roce_uar uar;
struct list_head page_list;
struct mutex page_mutex;
+ struct list_head vma_list;
+ struct mutex vma_list_mutex;
};
struct hns_roce_pd {
@@ -770,6 +782,8 @@ struct hns_roce_dev {
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
spinlock_t bt_cmd_lock;
+ bool active;
+ bool is_reset;
struct hns_roce_ib_iboe iboe;
struct list_head pgdir_list;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 1f0965bb64eed..0e8dad68910ab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/types.h>
#include <net/addrconf.h>
#include <rdma/ib_umem.h>
@@ -52,6 +53,53 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
+ unsigned int *sge_ind)
+{
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct ib_sge *sg;
+ int num_in_wqe = 0;
+ int extend_sge_num;
+ int fi_sge_num;
+ int se_sge_num;
+ int shift;
+ int i;
+
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
+ extend_sge_num = wr->num_sge - num_in_wqe;
+ sg = wr->sg_list + num_in_wqe;
+ shift = qp->hr_buf.page_shift;
+
+ /*
+ * Check whether wr->num_sge sges are in the same page. If not, we
+ * should calculate how many sges in the first page and the second
+ * page.
+ */
+ dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
+ fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
+ (uintptr_t)dseg) /
+ sizeof(struct hns_roce_v2_wqe_data_seg);
+ if (extend_sge_num > fi_sge_num) {
+ se_sge_num = extend_sge_num - fi_sge_num;
+ for (i = 0; i < fi_sge_num; i++) {
+ set_data_seg_v2(dseg++, sg + i);
+ (*sge_ind)++;
+ }
+ dseg = get_send_extend_sge(qp,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+ for (i = 0; i < se_sge_num; i++) {
+ set_data_seg_v2(dseg++, sg + fi_sge_num + i);
+ (*sge_ind)++;
+ }
+ } else {
+ for (i = 0; i < extend_sge_num; i++) {
+ set_data_seg_v2(dseg++, sg + i);
+ (*sge_ind)++;
+ }
+ }
+}
+
static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
@@ -85,7 +133,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
1);
} else {
- if (wr->num_sge <= 2) {
+ if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
for (i = 0; i < wr->num_sge; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
@@ -98,24 +146,14 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
(*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
}
}
- dseg = get_send_extend_sge(qp,
- (*sge_ind) & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < wr->num_sge - 2; i++) {
- if (likely(wr->sg_list[i + 2].length)) {
- set_data_seg_v2(dseg,
- wr->sg_list + 2 + i);
- dseg++;
- (*sge_ind)++;
- }
- }
+ set_extend_sge(qp, wr, sge_ind);
}
roce_set_field(rc_sq_wqe->byte_16,
@@ -319,13 +357,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
GID_LEN_V2);
- dseg = get_send_extend_sge(qp,
- sge_ind & (qp->sge.sge_cnt - 1));
- for (i = 0; i < wr->num_sge; i++) {
- set_data_seg_v2(dseg + i, wr->sg_list + i);
- sge_ind++;
- }
-
+ set_extend_sge(qp, wr, &sge_ind);
ind++;
} else if (ibqp->qp_type == IB_QPT_RC) {
rc_sq_wqe = wqe;
@@ -481,8 +513,8 @@ out:
V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
- roce_set_field(sq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
- V2_DB_PARAMETER_CONS_IDX_S,
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
+ V2_DB_PARAMETER_IDX_S,
qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl);
@@ -775,6 +807,9 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
int ret = 0;
int ntc;
+ if (hr_dev->is_reset)
+ return 0;
+
spin_lock_bh(&csq->lock);
if (num > hns_roce_cmq_space(csq)) {
@@ -1031,40 +1066,40 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
- hr_dev->caps.qpc_ba_pg_sz);
+ hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
- hr_dev->caps.qpc_buf_pg_sz);
+ hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
- hr_dev->caps.srqc_ba_pg_sz);
+ hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
- hr_dev->caps.srqc_buf_pg_sz);
+ hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
- hr_dev->caps.cqc_ba_pg_sz);
+ hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
- hr_dev->caps.cqc_buf_pg_sz);
+ hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
- hr_dev->caps.mpt_ba_pg_sz);
+ hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
- hr_dev->caps.mpt_buf_pg_sz);
+ hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
@@ -1359,7 +1394,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
- V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mr->pbl_ba_pg_sz);
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+ mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
@@ -1435,7 +1471,8 @@ found:
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mr->pbl_buf_pg_sz);
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
return 0;
@@ -1616,11 +1653,11 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
- hr_dev->caps.cqe_ba_pg_sz);
+ hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
- hr_dev->caps.cqe_buf_pg_sz);
+ hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
cq_context->cqe_ba = (u32)(dma_handle >> 3);
@@ -2719,7 +2756,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
- hr_dev->caps.mtt_ba_pg_sz);
+ hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
@@ -2727,7 +2764,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
- hr_dev->caps.mtt_buf_pg_sz);
+ hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
@@ -4161,12 +4198,14 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set eqe_ba_pg_sz */
roce_set_field(eqc->byte_8,
HNS_ROCE_EQC_BA_PG_SZ_M,
- HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
+ HNS_ROCE_EQC_BA_PG_SZ_S,
+ eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
/* set eqe_buf_pg_sz */
roce_set_field(eqc->byte_8,
HNS_ROCE_EQC_BUF_PG_SZ_M,
- HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
+ HNS_ROCE_EQC_BUF_PG_SZ_S,
+ eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
/* set eq_producer_idx */
roce_set_field(eqc->byte_8,
@@ -4800,14 +4839,87 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
{
struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ if (!hr_dev)
+ return;
+
hns_roce_exit(hr_dev);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
}
+static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+{
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct ib_event event;
+
+ if (!hr_dev) {
+ dev_err(&handle->pdev->dev,
+ "Input parameter handle->priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ hr_dev->active = false;
+ hr_dev->is_reset = true;
+
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &hr_dev->ib_dev;
+ event.element.port_num = 1;
+ ib_dispatch_event(&event);
+
+ return 0;
+}
+
+static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
+{
+ int ret;
+
+ ret = hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
+ * callback function, RoCE Engine reinitialize. If RoCE reinit
+ * failed, we should inform NIC driver.
+ */
+ handle->priv = NULL;
+ dev_err(&handle->pdev->dev,
+ "In reset process RoCE reinit failed %d.\n", ret);
+ }
+
+ return ret;
+}
+
+static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
+{
+ msleep(100);
+ hns_roce_hw_v2_uninit_instance(handle, false);
+ return 0;
+}
+
+static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case HNAE3_DOWN_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_down(handle);
+ break;
+ case HNAE3_INIT_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_init(handle);
+ break;
+ case HNAE3_UNINIT_CLIENT:
+ ret = hns_roce_hw_v2_reset_notify_uninit(handle);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
.init_instance = hns_roce_hw_v2_init_instance,
.uninit_instance = hns_roce_hw_v2_uninit_instance,
+ .reset_notify = hns_roce_hw_v2_reset_notify,
};
static struct hnae3_client hns_roce_hw_v2_client = {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 182b6726f7838..d47675f365c70 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -76,7 +76,8 @@
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
-#define HNS_ROCE_CMQ_TX_TIMEOUT 200
+#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
+#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1
@@ -897,8 +898,8 @@ struct hns_roce_v2_mpt_entry {
#define V2_DB_BYTE_4_CMD_S 24
#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
-#define V2_DB_PARAMETER_CONS_IDX_S 0
-#define V2_DB_PARAMETER_CONS_IDX_M GENMASK(15, 0)
+#define V2_DB_PARAMETER_IDX_S 0
+#define V2_DB_PARAMETER_IDX_M GENMASK(15, 0)
#define V2_DB_PARAMETER_SL_S 16
#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 96fb6a9ed93c4..21b901cfa2d65 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -99,7 +99,6 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
struct ib_gid_attr zattr = { };
- union ib_gid zgid = { {0} };
u8 port = attr->port_num - 1;
unsigned long flags;
int ret;
@@ -333,6 +332,9 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
struct hns_roce_ib_alloc_ucontext_resp resp = {};
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ if (!hr_dev->active)
+ return ERR_PTR(-EAGAIN);
+
resp.qp_tab_size = hr_dev->caps.num_qps;
context = kmalloc(sizeof(*context), GFP_KERNEL);
@@ -343,6 +345,8 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (ret)
goto error_fail_uar_alloc;
+ INIT_LIST_HEAD(&context->vma_list);
+ mutex_init(&context->vma_list_mutex);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
mutex_init(&context->page_mutex);
@@ -373,6 +377,50 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
+static void hns_roce_vma_open(struct vm_area_struct *vma)
+{
+ vma->vm_ops = NULL;
+}
+
+static void hns_roce_vma_close(struct vm_area_struct *vma)
+{
+ struct hns_roce_vma_data *vma_data;
+
+ vma_data = (struct hns_roce_vma_data *)vma->vm_private_data;
+ vma_data->vma = NULL;
+ mutex_lock(vma_data->vma_list_mutex);
+ list_del(&vma_data->list);
+ mutex_unlock(vma_data->vma_list_mutex);
+ kfree(vma_data);
+}
+
+static const struct vm_operations_struct hns_roce_vm_ops = {
+ .open = hns_roce_vma_open,
+ .close = hns_roce_vma_close,
+};
+
+static int hns_roce_set_vma_data(struct vm_area_struct *vma,
+ struct hns_roce_ucontext *context)
+{
+ struct list_head *vma_head = &context->vma_list;
+ struct hns_roce_vma_data *vma_data;
+
+ vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL);
+ if (!vma_data)
+ return -ENOMEM;
+
+ vma_data->vma = vma;
+ vma_data->vma_list_mutex = &context->vma_list_mutex;
+ vma->vm_private_data = vma_data;
+ vma->vm_ops = &hns_roce_vm_ops;
+
+ mutex_lock(&context->vma_list_mutex);
+ list_add(&vma_data->list, vma_head);
+ mutex_unlock(&context->vma_list_mutex);
+
+ return 0;
+}
+
static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
@@ -398,7 +446,7 @@ static int hns_roce_mmap(struct ib_ucontext *context,
} else
return -EINVAL;
- return 0;
+ return hns_roce_set_vma_data(vma, to_hr_ucontext(context));
}
static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
@@ -422,10 +470,30 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
return 0;
}
+static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+ struct hns_roce_vma_data *vma_data, *n;
+ struct vm_area_struct *vma;
+
+ mutex_lock(&context->vma_list_mutex);
+ list_for_each_entry_safe(vma_data, n, &context->vma_list, list) {
+ vma = vma_data->vma;
+ zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
+
+ vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
+ vma->vm_ops = NULL;
+ list_del(&vma_data->list);
+ kfree(vma_data);
+ }
+ mutex_unlock(&context->vma_list_mutex);
+}
+
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
+ hr_dev->active = false;
unregister_netdevice_notifier(&iboe->nb);
ib_unregister_device(&hr_dev->ib_dev);
}
@@ -516,6 +584,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
+ ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
ib_dev->driver_id = RDMA_DRIVER_HNS;
ret = ib_register_device(ib_dev, NULL);
@@ -537,6 +606,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
goto error_failed_setup_mtu_mac;
}
+ hr_dev->active = true;
return 0;
error_failed_setup_mtu_mac:
@@ -729,6 +799,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
return ret;
}
}
+ hr_dev->is_reset = false;
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
@@ -828,6 +899,7 @@ EXPORT_SYMBOL_GPL(hns_roce_init);
void hns_roce_exit(struct hns_roce_dev *hr_dev)
{
hns_roce_unregister_device(hr_dev);
+
if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev);
hns_roce_cleanup_bitmap(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index f7256d88d38f9..d1fe0e7957e36 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -1007,12 +1007,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
n = ib_umem_page_count(mr->umem);
- if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
- dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
- BIT(mr->umem->page_shift));
- ret = -EINVAL;
- goto err_umem;
- }
if (!hr_dev->caps.pbl_hop_num) {
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 4b41e041799ca..b9f2c871ff9a6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -107,13 +107,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
int ret = 0;
/* Using bitmap to manager UAR index */
- ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->index);
+ ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
if (ret == -1)
return -ENOMEM;
- if (uar->index > 0)
- uar->index = (uar->index - 1) %
+ if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
+ uar->index = (uar->logic_idx - 1) %
(hr_dev->caps.phy_num_uars - 1) + 1;
+ else
+ uar->index = 0;
if (!dev_is_pci(hr_dev->dev)) {
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
@@ -132,7 +134,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
- hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index,
+ hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->logic_idx,
BITMAP_NO_RR);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index f7c6fd9ff6e2c..7b2655128b9f7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1519,18 +1519,13 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
/**
* i40iw_find_port - find port that matches reference port
- * @port: port number
+ * @hte: ptr to accelerated or non-accelerated list
* @accelerated_list: flag for accelerated vs non-accelerated list
*/
-static bool i40iw_find_port(struct i40iw_cm_core *cm_core, u16 port,
- bool accelerated_list)
+static bool i40iw_find_port(struct list_head *hte, u16 port)
{
- struct list_head *hte;
struct i40iw_cm_node *cm_node;
- hte = accelerated_list ?
- &cm_core->accelerated_list : &cm_core->non_accelerated_list;
-
list_for_each_entry(cm_node, hte, list) {
if (cm_node->loc_port == port)
return true;
@@ -1540,35 +1535,32 @@ static bool i40iw_find_port(struct i40iw_cm_core *cm_core, u16 port,
/**
* i40iw_port_in_use - determine if port is in use
+ * @cm_core: cm's core
* @port: port number
- * @active_side: flag for listener side vs active side
*/
-static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
+bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
{
struct i40iw_cm_listener *listen_node;
unsigned long flags;
- bool ret = false;
- if (active_side) {
- spin_lock_irqsave(&cm_core->ht_lock, flags);
- ret = i40iw_find_port(cm_core, port, true);
- if (!ret)
- ret = i40iw_find_port(cm_core, port, false);
- if (!ret)
- clear_bit(port, cm_core->active_side_ports);
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (i40iw_find_port(&cm_core->accelerated_list, port) ||
+ i40iw_find_port(&cm_core->non_accelerated_list, port)) {
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- } else {
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
- if (listen_node->loc_port == port) {
- ret = true;
- break;
- }
+ return true;
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ if (listen_node->loc_port == port) {
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return true;
}
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- return ret;
+ return false;
}
/**
@@ -1788,7 +1780,7 @@ static enum i40iw_status_code i40iw_add_mqh_4(
&ifa->ifa_address,
rdma_vlan_dev_vlan_id(dev),
dev->dev_addr);
- child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
@@ -1917,7 +1909,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
if (listener->iwdev) {
- if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
+ if (apbvt_del)
i40iw_manage_apbvt(listener->iwdev,
listener->loc_port,
I40IW_MANAGE_APBVT_DEL);
@@ -2298,7 +2290,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
if (cm_node->listener) {
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
} else {
- if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
+ if (cm_node->apbvt_set) {
i40iw_manage_apbvt(cm_node->iwdev,
cm_node->loc_port,
I40IW_MANAGE_APBVT_DEL);
@@ -2872,7 +2864,7 @@ static struct i40iw_cm_listener *i40iw_make_listen_node(
if (!listener) {
/* create a CM listen node (1/2 node to compare incoming traffic to) */
- listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
if (!listener)
return NULL;
cm_core->stats_listen_nodes_created++;
@@ -3244,6 +3236,7 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
spin_lock_init(&cm_core->ht_lock);
spin_lock_init(&cm_core->listen_list_lock);
+ spin_lock_init(&cm_core->apbvt_lock);
cm_core->event_wq = alloc_ordered_workqueue("iwewq",
WQ_MEM_RECLAIM);
@@ -3811,7 +3804,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
int ret = 0;
- unsigned long flags;
ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -3882,15 +3874,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->qhash_set = true;
}
- spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
- if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
- spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
- if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
- ret = -EINVAL;
- goto err;
- }
- } else {
- spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ if (i40iw_manage_apbvt(iwdev, cm_info.loc_port,
+ I40IW_MANAGE_APBVT_ADD)) {
+ ret = -EINVAL;
+ goto err;
}
cm_node->apbvt_set = true;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 78ba36ae2bbe1..66dc1ba033890 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -413,8 +413,9 @@ struct i40iw_cm_core {
spinlock_t ht_lock; /* manage hash table */
spinlock_t listen_list_lock; /* listen list */
+ spinlock_t apbvt_lock; /*manage apbvt entries*/
- unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)];
+ unsigned long ports_in_use[BITS_TO_LONGS(MAX_PORTS)];
u64 stats_nodes_created;
u64 stats_nodes_destroyed;
@@ -457,4 +458,5 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
struct i40iw_cm_info *nfo,
bool disconnect_all);
+bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port);
#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index c9f62ca7643c8..2836c5420d602 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -443,13 +443,37 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
{
struct i40iw_apbvt_info *info;
- enum i40iw_status_code status;
struct i40iw_cqp_request *cqp_request;
struct cqp_commands_info *cqp_info;
+ unsigned long flags;
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ enum i40iw_status_code status = 0;
+ bool in_use;
+
+ /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
+ * protect against race where add APBVT CQP can race ahead of the delete
+ * APBVT for same port.
+ */
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+
+ if (!add_port) {
+ in_use = i40iw_port_in_use(cm_core, accel_local_port);
+ if (in_use)
+ goto exit;
+ clear_bit(accel_local_port, cm_core->ports_in_use);
+ } else {
+ in_use = test_and_set_bit(accel_local_port,
+ cm_core->ports_in_use);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ if (in_use)
+ return 0;
+ }
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
- if (!cqp_request)
- return -ENOMEM;
+ if (!cqp_request) {
+ status = -ENOMEM;
+ goto exit;
+ }
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
@@ -465,6 +489,10 @@ int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool ad
status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status)
i40iw_pr_err("CQP-OP Manage APBVT entry fail");
+exit:
+ if (!add_port)
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+
return status;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 05001e6da1f8f..68095f00d08f7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1757,7 +1757,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
return;
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
return;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0793a21d76f4c..d604b3d5aa3e4 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1934,7 +1934,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
"buf:%lld\n", wc.wr_id);
break;
default:
- BUG_ON(1);
break;
}
} else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 5b70744f414ac..f839bf3b1497a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -276,7 +276,7 @@ static int mlx4_ib_add_gid(const union ib_gid *gid,
found = i;
break;
}
- if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
+ if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
free = i; /* HW has space */
}
@@ -345,7 +345,8 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
if (!ctx->refcount) {
unsigned int real_index = ctx->real_index;
- memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
+ memset(&port_gid_table->gids[real_index].gid, 0,
+ sizeof(port_gid_table->gids[real_index].gid));
kfree(port_gid_table->gids[real_index].ctx);
port_gid_table->gids[real_index].ctx = NULL;
hw_update = 1;
@@ -1185,65 +1186,25 @@ static const struct vm_operations_struct mlx4_ib_vm_ops = {
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
int i;
- int ret = 0;
struct vm_area_struct *vma;
struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- /* make sure that task is dead before returning, it may
- * prevent a rare case of module down in parallel to a
- * call to mlx4_ib_vma_close.
- */
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the task struct */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
/* need to protect from a race on closing the vma as part of
* mlx4_ib_vma_close().
*/
- down_write(&owning_mm->mmap_sem);
for (i = 0; i < HW_BAR_COUNT; i++) {
vma = context->hw_bar_info[i].vma;
if (!vma)
continue;
- ret = zap_vma_ptes(context->hw_bar_info[i].vma,
- context->hw_bar_info[i].vma->vm_start,
- PAGE_SIZE);
- if (ret) {
- pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
- BUG_ON(1);
- }
+ zap_vma_ptes(context->hw_bar_info[i].vma,
+ context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
context->hw_bar_info[i].vma->vm_flags &=
~(VM_SHARED | VM_MAYSHARE);
/* context going to be destroyed, should not access ops any more */
context->hw_bar_info[i].vma->vm_ops = NULL;
}
-
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
}
static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
@@ -1847,7 +1808,7 @@ static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
- int domain)
+ int domain, struct ib_udata *udata)
{
int err = 0, i = 0, j = 0;
struct mlx4_ib_flow *mflow;
@@ -1865,6 +1826,10 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
(flow_attr->type != IB_FLOW_ATTR_NORMAL))
return ERR_PTR(-EOPNOTSUPP);
+ if (udata &&
+ udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
+ return ERR_PTR(-EOPNOTSUPP);
+
memset(type, 0, sizeof(type));
mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
@@ -3050,7 +3015,10 @@ void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
return;
- BUG_ON(qpn < dev->steer_qpn_base);
+ if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
+ qpn, dev->steer_qpn_base))
+ /* not supposed to be here */
+ return;
bitmap_release_region(dev->ib_uc_qpns_bitmap,
qpn - dev->steer_qpn_base,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 61d8b06375bb8..ed1f253faf977 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -367,6 +367,40 @@ end:
return block_shift;
}
+static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
+ u64 length, u64 virt_addr,
+ int access_flags)
+{
+ /*
+ * Force registering the memory as writable if the underlying pages
+ * are writable. This is so rereg can change the access permissions
+ * from readable to writable without having to run through ib_umem_get
+ * again
+ */
+ if (!ib_access_writable(access_flags)) {
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ /*
+ * FIXME: Ideally this would iterate over all the vmas that
+ * cover the memory, but for now it requires a single vma to
+ * entirely cover the MR to support RO mappings.
+ */
+ vma = find_vma(current->mm, start);
+ if (vma && vma->vm_end >= start + length &&
+ vma->vm_start <= start) {
+ if (vma->vm_flags & VM_WRITE)
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
+ } else {
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
+ }
+
+ up_read(&current->mm->mmap_sem);
+ }
+
+ return ib_umem_get(context, start, length, access_flags, 0);
+}
+
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -381,10 +415,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- /* Force registering the memory as writable. */
- /* Used for memory re-registeration. HCA protects the access */
- mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags | IB_ACCESS_LOCAL_WRITE, 0);
+ mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
+ virt_addr, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -454,6 +486,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
}
if (flags & IB_MR_REREG_ACCESS) {
+ if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
+ return -EPERM;
+
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
convert_access(mr_access_flags));
@@ -467,10 +502,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem = ib_umem_get(mr->uobject->context, start, length,
- mr_access_flags |
- IB_ACCESS_LOCAL_WRITE,
- 0);
+ mmr->umem =
+ mlx4_get_umem_mr(mr->uobject->context, start, length,
+ virt_addr, mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 199648adac749..cd2c08c453345 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -3078,7 +3078,7 @@ static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
memcpy(gid, &port_gid_table->gids[index].gid, sizeof(*gid));
*gid_type = port_gid_table->gids[index].gid_type;
spin_unlock_irqrestore(&iboe->lock, flags);
- if (!memcmp(gid, &zgid, sizeof(*gid)))
+ if (rdma_is_zero_gid(gid))
return -ENOENT;
return 0;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 6d52ea03574e5..ad39d64b81080 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -637,7 +637,7 @@ repoll:
}
static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
- struct ib_wc *wc)
+ struct ib_wc *wc, bool is_fatal_err)
{
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct mlx5_ib_wc *soft_wc, *next;
@@ -650,6 +650,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
cq->mcq.cqn);
+ if (unlikely(is_fatal_err)) {
+ soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
+ soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ }
wc[npolled++] = soft_wc->wc;
list_del(&soft_wc->list);
kfree(soft_wc);
@@ -670,12 +674,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+ /* make sure no soft wqe's are waiting */
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc, true);
+
+ mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
+ wc + soft_polled, &npolled);
goto out;
}
if (unlikely(!list_empty(&cq->wc_list)))
- soft_polled = poll_soft_wc(cq, num_entries, wc);
+ soft_polled = poll_soft_wc(cq, num_entries, wc, false);
for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
@@ -742,6 +751,28 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
return 0;
}
+enum {
+ MLX5_CQE_RES_FORMAT_HASH = 0,
+ MLX5_CQE_RES_FORMAT_CSUM = 1,
+ MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
+};
+
+static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
+{
+ switch (format) {
+ case MLX5_IB_CQE_RES_FORMAT_HASH:
+ return MLX5_CQE_RES_FORMAT_HASH;
+ case MLX5_IB_CQE_RES_FORMAT_CSUM:
+ return MLX5_CQE_RES_FORMAT_CSUM;
+ case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
+ if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
+ return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, u32 **cqb,
@@ -807,6 +838,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) {
+ int mini_cqe_format;
+
if (!((*cqe_size == 128 &&
MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
(*cqe_size == 64 &&
@@ -817,20 +850,18 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
goto err_cqb;
}
- if (unlikely(!ucmd.cqe_comp_res_format ||
- !(ucmd.cqe_comp_res_format <
- MLX5_IB_CQE_RES_RESERVED) ||
- (ucmd.cqe_comp_res_format &
- (ucmd.cqe_comp_res_format - 1)))) {
- err = -EOPNOTSUPP;
- mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
- ucmd.cqe_comp_res_format);
+ mini_cqe_format =
+ mini_cqe_res_format_to_hw(dev,
+ ucmd.cqe_comp_res_format);
+ if (mini_cqe_format < 0) {
+ err = mini_cqe_format;
+ mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
+ ucmd.cqe_comp_res_format, err);
goto err_cqb;
}
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
- MLX5_SET(cqc, cqc, mini_cqe_res_format,
- ilog2(ucmd.cqe_comp_res_format));
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
}
if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 0e04fdddf6701..35a0e04c38f28 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 046fd942fd469..2ba73636a2fb6 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*/
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 69716a7ea9934..e52dd21519b45 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -982,13 +982,21 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
- resp.cqe_comp_caps.max_num =
- MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
- MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
- resp.cqe_comp_caps.supported_format =
- MLX5_IB_CQE_RES_FORMAT_HASH |
- MLX5_IB_CQE_RES_FORMAT_CSUM;
resp.response_length += sizeof(resp.cqe_comp_caps);
+
+ if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
+ resp.cqe_comp_caps.max_num =
+ MLX5_CAP_GEN(dev->mdev,
+ cqe_compression_max_num);
+
+ resp.cqe_comp_caps.supported_format =
+ MLX5_IB_CQE_RES_FORMAT_HASH |
+ MLX5_IB_CQE_RES_FORMAT_CSUM;
+
+ if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
+ resp.cqe_comp_caps.supported_format |=
+ MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
+ }
}
if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
@@ -1084,6 +1092,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_GRE;
+ if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_CW_MPLS_GRE)
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
+ if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_CW_MPLS_UDP)
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}
if (uhw->outlen) {
@@ -1953,49 +1969,15 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- int ret;
struct vm_area_struct *vma;
struct mlx5_ib_vma_private_data *vma_private, *n;
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the
- * task struct.
- */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
-
- /* need to protect from a race on closing the vma as part of
- * mlx5_ib_vma_close.
- */
- down_write(&owning_mm->mmap_sem);
mutex_lock(&context->vma_private_list_mutex);
list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
list) {
vma = vma_private->vma;
- ret = zap_vma_ptes(vma, vma->vm_start,
- PAGE_SIZE);
- WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
+ zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
/* context going to be destroyed, should
* not access ops any more.
*/
@@ -2005,9 +1987,6 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
kfree(vma_private);
}
mutex_unlock(&context->vma_private_list_mutex);
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
}
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
@@ -2051,10 +2030,6 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (err)
return err;
- mlx5_ib_dbg(dev, "mapped clock info at 0x%lx, PA 0x%llx\n",
- vma->vm_start,
- (unsigned long long)pfn << PAGE_SHIFT);
-
return mlx5_ib_set_vma_data(vma, context);
}
@@ -2149,15 +2124,14 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
err = io_remap_pfn_range(vma, vma->vm_start, pfn,
PAGE_SIZE, vma->vm_page_prot);
if (err) {
- mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
- err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
+ mlx5_ib_err(dev,
+ "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
+ err, mmap_cmd2str(cmd));
err = -EAGAIN;
goto err;
}
pa = pfn << PAGE_SHIFT;
- mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
- vma->vm_start, &pa);
err = mlx5_ib_set_vma_data(vma, context);
if (err)
@@ -2243,10 +2217,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (io_remap_pfn_range(vma, vma->vm_start, pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
-
- mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
- vma->vm_start,
- (unsigned long long)pfn << PAGE_SHIFT);
break;
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
@@ -2386,7 +2356,8 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
enum {
MATCH_CRITERIA_ENABLE_OUTER_BIT,
MATCH_CRITERIA_ENABLE_MISC_BIT,
- MATCH_CRITERIA_ENABLE_INNER_BIT
+ MATCH_CRITERIA_ENABLE_INNER_BIT,
+ MATCH_CRITERIA_ENABLE_MISC2_BIT
};
#define HEADER_IS_ZERO(match_criteria, headers) \
@@ -2406,6 +2377,9 @@ static u8 get_match_criteria_enable(u32 *match_criteria)
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
MATCH_CRITERIA_ENABLE_INNER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
+ MATCH_CRITERIA_ENABLE_MISC2_BIT;
return match_criteria_enable;
}
@@ -2440,6 +2414,27 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
}
+static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
+{
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
#define LAST_ETH_FIELD vlan_tag
#define LAST_IB_FIELD sl
#define LAST_IPV4_FIELD tos
@@ -2448,6 +2443,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
#define LAST_TUNNEL_FIELD tunnel_id
#define LAST_FLOW_TAG_FIELD tag_id
#define LAST_DROP_FIELD size
+#define LAST_COUNTERS_FIELD counters
/* Field is the last supported field */
#define FIELDS_NOT_SUPPORTED(filter, field)\
@@ -2479,12 +2475,16 @@ static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
u32 *match_v, const union ib_flow_spec *ib_spec,
const struct ib_flow_attr *flow_attr,
- struct mlx5_flow_act *action)
+ struct mlx5_flow_act *action, u32 prev_type)
{
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
misc_parameters);
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
misc_parameters);
+ void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters_2);
+ void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ misc_parameters_2);
void *headers_c;
void *headers_v;
int match_ipv;
@@ -2689,6 +2689,93 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
+ case IB_FLOW_SPEC_GRE:
+ if (ib_spec->gre.mask.c_ks_res0_ver)
+ return -EOPNOTSUPP;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ IPPROTO_GRE);
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
+ ntohs(ib_spec->gre.val.protocol));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
+ gre_key_h),
+ &ib_spec->gre.mask.key,
+ sizeof(ib_spec->gre.mask.key));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
+ gre_key_h),
+ &ib_spec->gre.val.key,
+ sizeof(ib_spec->gre.val.key));
+ break;
+ case IB_FLOW_SPEC_MPLS:
+ switch (prev_type) {
+ case IB_FLOW_SPEC_UDP:
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls_over_udp),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls_over_udp),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls_over_udp),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ break;
+ case IB_FLOW_SPEC_GRE:
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls_over_gre),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls_over_gre),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls_over_gre),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ break;
+ default:
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_first_mpls),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ inner_first_mpls),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ inner_first_mpls),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ } else {
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ }
+ }
+ break;
case IB_FLOW_SPEC_VXLAN_TUNNEL:
if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
LAST_TUNNEL_FIELD))
@@ -2720,6 +2807,18 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
if (ret)
return ret;
break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
+ LAST_COUNTERS_FIELD))
+ return -EOPNOTSUPP;
+
+ /* for now support only one counters spec per flow */
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ return -EINVAL;
+
+ action->counters = ib_spec->flow_count.counters;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ break;
default:
return -EINVAL;
}
@@ -2867,6 +2966,17 @@ static void put_flow_table(struct mlx5_ib_dev *dev,
}
}
+static void counters_clear_description(struct ib_counters *counters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ kfree(mcounters->counters_data);
+ mcounters->counters_data = NULL;
+ mcounters->cntrs_max_index = 0;
+ mutex_unlock(&mcounters->mcntrs_mutex);
+}
+
static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
{
struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
@@ -2886,8 +2996,11 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true);
- mutex_unlock(&dev->flow_db->lock);
+ if (handler->ibcounters &&
+ atomic_read(&handler->ibcounters->usecnt) == 1)
+ counters_clear_description(handler->ibcounters);
+ mutex_unlock(&dev->flow_db->lock);
kfree(handler);
return 0;
@@ -3007,21 +3120,143 @@ static void set_underlay_qp(struct mlx5_ib_dev *dev,
}
}
+static int read_flow_counters(struct ib_device *ibdev,
+ struct mlx5_read_counters_attr *read_attr)
+{
+ struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ return mlx5_fc_query(dev->mdev, fc,
+ &read_attr->out[IB_COUNTER_PACKETS],
+ &read_attr->out[IB_COUNTER_BYTES]);
+}
+
+/* flow counters currently expose two counters packets and bytes */
+#define FLOW_COUNTERS_NUM 2
+static int counters_set_description(struct ib_counters *counters,
+ enum mlx5_ib_counters_type counters_type,
+ struct mlx5_ib_flow_counters_desc *desc_data,
+ u32 ncounters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+ u32 cntrs_max_index = 0;
+ int i;
+
+ if (counters_type != MLX5_IB_COUNTERS_FLOW)
+ return -EINVAL;
+
+ /* init the fields for the object */
+ mcounters->type = counters_type;
+ mcounters->read_counters = read_flow_counters;
+ mcounters->counters_num = FLOW_COUNTERS_NUM;
+ mcounters->ncounters = ncounters;
+ /* each counter entry have both description and index pair */
+ for (i = 0; i < ncounters; i++) {
+ if (desc_data[i].description > IB_COUNTER_BYTES)
+ return -EINVAL;
+
+ if (cntrs_max_index <= desc_data[i].index)
+ cntrs_max_index = desc_data[i].index + 1;
+ }
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ mcounters->counters_data = desc_data;
+ mcounters->cntrs_max_index = cntrs_max_index;
+ mutex_unlock(&mcounters->mcntrs_mutex);
+
+ return 0;
+}
+
+#define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
+static int flow_counters_set_data(struct ib_counters *ibcounters,
+ struct mlx5_ib_create_flow *ucmd)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
+ struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
+ struct mlx5_ib_flow_counters_desc *desc_data = NULL;
+ bool hw_hndl = false;
+ int ret = 0;
+
+ if (ucmd && ucmd->ncounters_data != 0) {
+ cntrs_data = ucmd->data;
+ if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
+ return -EINVAL;
+
+ desc_data = kcalloc(cntrs_data->ncounters,
+ sizeof(*desc_data),
+ GFP_KERNEL);
+ if (!desc_data)
+ return -ENOMEM;
+
+ if (copy_from_user(desc_data,
+ u64_to_user_ptr(cntrs_data->counters_data),
+ sizeof(*desc_data) * cntrs_data->ncounters)) {
+ ret = -EFAULT;
+ goto free;
+ }
+ }
+
+ if (!mcounters->hw_cntrs_hndl) {
+ mcounters->hw_cntrs_hndl = mlx5_fc_create(
+ to_mdev(ibcounters->device)->mdev, false);
+ if (!mcounters->hw_cntrs_hndl) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ hw_hndl = true;
+ }
+
+ if (desc_data) {
+ /* counters already bound to at least one flow */
+ if (mcounters->cntrs_max_index) {
+ ret = -EINVAL;
+ goto free_hndl;
+ }
+
+ ret = counters_set_description(ibcounters,
+ MLX5_IB_COUNTERS_FLOW,
+ desc_data,
+ cntrs_data->ncounters);
+ if (ret)
+ goto free_hndl;
+
+ } else if (!mcounters->cntrs_max_index) {
+ /* counters not bound yet, must have udata passed */
+ ret = -EINVAL;
+ goto free_hndl;
+ }
+
+ return 0;
+
+free_hndl:
+ if (hw_hndl) {
+ mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
+ mcounters->hw_cntrs_hndl);
+ mcounters->hw_cntrs_hndl = NULL;
+ }
+free:
+ kfree(desc_data);
+ return ret;
+}
+
static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
const struct ib_flow_attr *flow_attr,
struct mlx5_flow_destination *dst,
- u32 underlay_qpn)
+ u32 underlay_qpn,
+ struct mlx5_ib_create_flow *ucmd)
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_flow_spec *spec;
- struct mlx5_flow_destination *rule_dst = dst;
+ struct mlx5_flow_destination dest_arr[2] = {};
+ struct mlx5_flow_destination *rule_dst = dest_arr;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
+ u32 prev_type = 0;
int err = 0;
- int dest_num = 1;
+ int dest_num = 0;
bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
if (!is_valid_attr(dev->mdev, flow_attr))
@@ -3035,14 +3270,20 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
}
INIT_LIST_HEAD(&handler->list);
+ if (dst) {
+ memcpy(&dest_arr[0], dst, sizeof(*dst));
+ dest_num++;
+ }
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
err = parse_flow_attr(dev->mdev, spec->match_criteria,
spec->match_value,
- ib_flow, flow_attr, &flow_act);
+ ib_flow, flow_attr, &flow_act,
+ prev_type);
if (err < 0)
goto free;
+ prev_type = ((union ib_flow_spec *)ib_flow)->type;
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
}
@@ -3069,15 +3310,30 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
goto free;
}
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ err = flow_counters_set_data(flow_act.counters, ucmd);
+ if (err)
+ goto free;
+
+ handler->ibcounters = flow_act.counters;
+ dest_arr[dest_num].type =
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest_arr[dest_num].counter =
+ to_mcounters(flow_act.counters)->hw_cntrs_hndl;
+ dest_num++;
+ }
+
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
- rule_dst = NULL;
- dest_num = 0;
+ if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
+ rule_dst = NULL;
+ dest_num = 0;
+ }
} else {
if (is_egress)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
else
flow_act.action |=
- dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
}
@@ -3103,8 +3359,12 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
ft_prio->flow_table = ft;
free:
- if (err)
+ if (err && handler) {
+ if (handler->ibcounters &&
+ atomic_read(&handler->ibcounters->usecnt) == 1)
+ counters_clear_description(handler->ibcounters);
kfree(handler);
+ }
kvfree(spec);
return err ? ERR_PTR(err) : handler;
}
@@ -3114,7 +3374,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
const struct ib_flow_attr *flow_attr,
struct mlx5_flow_destination *dst)
{
- return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0);
+ return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
@@ -3244,7 +3504,8 @@ err:
static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
- int domain)
+ int domain,
+ struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
@@ -3253,9 +3514,44 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
struct mlx5_ib_flow_prio *ft_prio;
bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
+ struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
+ size_t min_ucmd_sz, required_ucmd_sz;
int err;
int underlay_qpn;
+ if (udata && udata->inlen) {
+ min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
+ sizeof(ucmd_hdr.reserved);
+ if (udata->inlen < min_ucmd_sz)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
+ if (err)
+ return ERR_PTR(err);
+
+ /* currently supports only one counters data */
+ if (ucmd_hdr.ncounters_data > 1)
+ return ERR_PTR(-EINVAL);
+
+ required_ucmd_sz = min_ucmd_sz +
+ sizeof(struct mlx5_ib_flow_counters_data) *
+ ucmd_hdr.ncounters_data;
+ if (udata->inlen > required_ucmd_sz &&
+ !ib_is_udata_cleared(udata, required_ucmd_sz,
+ udata->inlen - required_ucmd_sz))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
+ if (!ucmd)
+ return ERR_PTR(-ENOMEM);
+
+ err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
+ if (err) {
+ kfree(ucmd);
+ return ERR_PTR(err);
+ }
+ }
+
if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
return ERR_PTR(-ENOMEM);
@@ -3309,7 +3605,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
mqp->underlay_qpn : 0;
handler = _create_flow_rule(dev, ft_prio, flow_attr,
- dst, underlay_qpn);
+ dst, underlay_qpn, ucmd);
}
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
@@ -3330,6 +3626,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
mutex_unlock(&dev->flow_db->lock);
kfree(dst);
+ kfree(ucmd);
return &handler->ibflow;
@@ -3340,6 +3637,7 @@ destroy_ft:
unlock:
mutex_unlock(&dev->flow_db->lock);
kfree(dst);
+ kfree(ucmd);
kfree(handler);
return ERR_PTR(err);
}
@@ -5000,6 +5298,76 @@ static void depopulate_specs_root(struct mlx5_ib_dev *dev)
uverbs_free_spec_tree(dev->ib_dev.specs_root);
}
+static int mlx5_ib_read_counters(struct ib_counters *counters,
+ struct ib_counters_read_attr *read_attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+ struct mlx5_read_counters_attr mread_attr = {};
+ struct mlx5_ib_flow_counters_desc *desc;
+ int ret, i;
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ if (mcounters->cntrs_max_index > read_attr->ncounters) {
+ ret = -EINVAL;
+ goto err_bound;
+ }
+
+ mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
+ GFP_KERNEL);
+ if (!mread_attr.out) {
+ ret = -ENOMEM;
+ goto err_bound;
+ }
+
+ mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
+ mread_attr.flags = read_attr->flags;
+ ret = mcounters->read_counters(counters->device, &mread_attr);
+ if (ret)
+ goto err_read;
+
+ /* do the pass over the counters data array to assign according to the
+ * descriptions and indexing pairs
+ */
+ desc = mcounters->counters_data;
+ for (i = 0; i < mcounters->ncounters; i++)
+ read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
+
+err_read:
+ kfree(mread_attr.out);
+err_bound:
+ mutex_unlock(&mcounters->mcntrs_mutex);
+ return ret;
+}
+
+static int mlx5_ib_destroy_counters(struct ib_counters *counters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+
+ counters_clear_description(counters);
+ if (mcounters->hw_cntrs_hndl)
+ mlx5_fc_destroy(to_mdev(counters->device)->mdev,
+ mcounters->hw_cntrs_hndl);
+
+ kfree(mcounters);
+
+ return 0;
+}
+
+static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_mcounters *mcounters;
+
+ mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
+ if (!mcounters)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&mcounters->mcntrs_mutex);
+
+ return &mcounters->ibcntrs;
+}
+
void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
@@ -5243,6 +5611,9 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action;
dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp;
dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
+ dev->ib_dev.create_counters = mlx5_ib_create_counters;
+ dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters;
+ dev->ib_dev.read_counters = mlx5_ib_read_counters;
err = init_node_data(dev);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 49a1aa0ff429c..d89c8fe626f6b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -175,6 +175,7 @@ struct mlx5_ib_flow_handler {
struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_handle *rule;
+ struct ib_counters *ibcounters;
};
struct mlx5_ib_flow_db {
@@ -813,6 +814,41 @@ struct mlx5_memic {
DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
};
+struct mlx5_read_counters_attr {
+ struct mlx5_fc *hw_cntrs_hndl;
+ u64 *out;
+ u32 flags;
+};
+
+enum mlx5_ib_counters_type {
+ MLX5_IB_COUNTERS_FLOW,
+};
+
+struct mlx5_ib_mcounters {
+ struct ib_counters ibcntrs;
+ enum mlx5_ib_counters_type type;
+ /* number of counters supported for this counters type */
+ u32 counters_num;
+ struct mlx5_fc *hw_cntrs_hndl;
+ /* read function for this counters type */
+ int (*read_counters)(struct ib_device *ibdev,
+ struct mlx5_read_counters_attr *read_attr);
+ /* max index set as part of create_flow */
+ u32 cntrs_max_index;
+ /* number of counters data entries (<description,index> pair) */
+ u32 ncounters;
+ /* counters data array for descriptions and indexes */
+ struct mlx5_ib_flow_counters_desc *counters_data;
+ /* protects access to mcounters internal data */
+ struct mutex mcntrs_mutex;
+};
+
+static inline struct mlx5_ib_mcounters *
+to_mcounters(struct ib_counters *ibcntrs)
+{
+ return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
+}
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2193dc1765fb2..a4f1f638509fe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -54,6 +54,7 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
};
static const u32 mlx5_ib_opcode[] = {
@@ -302,7 +303,9 @@ static int sq_overhead(struct ib_qp_init_attr *attr)
max(sizeof(struct mlx5_wqe_atomic_seg) +
sizeof(struct mlx5_wqe_raddr_seg),
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
- sizeof(struct mlx5_mkey_seg));
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
+ MLX5_IB_UMR_OCTOWORD);
break;
case IB_QPT_XRC_TGT:
@@ -3641,13 +3644,15 @@ static __be64 sig_mkey_mask(void)
}
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr)
+ struct mlx5_ib_mr *mr, bool umr_inline)
{
int size = mr->ndescs * mr->desc_size;
memset(umr, 0, sizeof(*umr));
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ if (umr_inline)
+ umr->flags |= MLX5_UMR_INLINE;
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
umr->mkey_mask = frwr_mkey_mask();
}
@@ -3831,6 +3836,24 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
}
+static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp,
+ struct mlx5_ib_mr *mr, int mr_list_size)
+{
+ void *qend = qp->sq.qend;
+ void *addr = mr->descs;
+ int copy;
+
+ if (unlikely(seg + mr_list_size > qend)) {
+ copy = qend - seg;
+ memcpy(seg, addr, copy);
+ addr += copy;
+ mr_list_size -= copy;
+ seg = mlx5_get_send_wqe(qp, 0);
+ }
+ memcpy(seg, addr, mr_list_size);
+ seg += mr_list_size;
+}
+
static __be32 send_ieth(struct ib_send_wr *wr)
{
switch (wr->opcode) {
@@ -4225,6 +4248,8 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ int mr_list_size = mr->ndescs * mr->desc_size;
+ bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
@@ -4232,7 +4257,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
return -EINVAL;
}
- set_reg_umr_seg(*seg, mr);
+ set_reg_umr_seg(*seg, mr, umr_inline);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((*seg == qp->sq.qend)))
@@ -4244,10 +4269,14 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
if (unlikely((*seg == qp->sq.qend)))
*seg = mlx5_get_send_wqe(qp, 0);
- set_reg_data_seg(*seg, mr, pd);
- *seg += sizeof(struct mlx5_wqe_data_seg);
- *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
-
+ if (umr_inline) {
+ set_reg_umr_inline_seg(*seg, qp, mr, mr_list_size);
+ *size += get_xlt_octo(mr_list_size);
+ } else {
+ set_reg_data_seg(*seg, mr, pd);
+ *seg += sizeof(struct mlx5_wqe_data_seg);
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index e2caabb8a9266..710032f1fad7e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -414,7 +414,7 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
DP_ERR(dev,
- "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
+ "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
(void *)vma->vm_start, (void *)vma->vm_end);
return -EINVAL;
}
@@ -2577,7 +2577,7 @@ static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
u32 pbes_in_page;
if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
- DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
+ DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 46072455130c9..3461df002f819 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1228,6 +1228,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
#define QIB_BADINTR 0x8000 /* severe interrupt problems */
#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
+#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
/*
* values for ppd->lflags (_ib_port_ related flags)
@@ -1423,8 +1424,7 @@ u64 qib_sps_ints(void);
/*
* dma_addr wrappers - all 0's invalid for hw
*/
-dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
- size_t, int);
+int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 6a8800b65047f..98e1ce14fa2ab 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
goto done;
}
for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
+ dma_addr_t daddr;
+
for (; ntids--; tid++) {
if (tid == tidcnt)
tid = 0;
@@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
ret = -ENOMEM;
break;
}
+ ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
+ if (ret)
+ break;
+
tidlist[i] = tid + tidoff;
/* we "know" system pages and TID pages are same size */
dd->pageshadow[ctxttid + tid] = pagep[i];
- dd->physshadow[ctxttid + tid] =
- qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dd->physshadow[ctxttid + tid] = daddr;
/*
* don't need atomic or it's overhead
*/
@@ -868,7 +872,7 @@ bail:
/*
* qib_file_vma_fault - handle a VMA page fault.
*/
-static int qib_file_vma_fault(struct vm_fault *vmf)
+static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf)
{
struct page *page;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 6c68f8a97018c..0155202897352 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -841,6 +841,10 @@ static void qib_shutdown_device(struct qib_devdata *dd)
struct qib_pportdata *ppd;
unsigned pidx;
+ if (dd->flags & QIB_SHUTDOWN)
+ return;
+ dd->flags |= QIB_SHUTDOWN;
+
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -1182,6 +1186,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
static void qib_remove_one(struct pci_dev *);
static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
+static void qib_shutdown_one(struct pci_dev *);
#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
@@ -1199,6 +1204,7 @@ static struct pci_driver qib_driver = {
.name = QIB_DRV_NAME,
.probe = qib_init_one,
.remove = qib_remove_one,
+ .shutdown = qib_shutdown_one,
.id_table = qib_pci_tbl,
.err_handler = &qib_pci_err_handler,
};
@@ -1549,6 +1555,13 @@ static void qib_remove_one(struct pci_dev *pdev)
qib_postinit_cleanup(dd);
}
+static void qib_shutdown_one(struct pci_dev *pdev)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+
+ qib_shutdown_device(dd);
+}
+
/**
* qib_create_rcvhdrq - create a receive header queue
* @dd: the qlogic_ib device
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index c9955d48c50f4..f35fdeb143479 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1828,7 +1828,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1849,7 +1849,7 @@ send_middle:
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
/* consume RWQE */
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1858,7 +1858,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -1949,7 +1949,7 @@ send_last:
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
goto no_immediate_data;
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto nack_op_err;
if (!ret) {
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 4662cc7bde929..f8a7de795beb6 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -38,156 +38,6 @@
#include "qib_mad.h"
/*
- * Validate a RWQE and fill in the SGE state.
- * Return 1 if OK.
- */
-static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
-{
- int i, j, ret;
- struct ib_wc wc;
- struct rvt_lkey_table *rkt;
- struct rvt_pd *pd;
- struct rvt_sge_state *ss;
-
- rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
- pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
- ss = &qp->r_sge;
- ss->sg_list = qp->r_sg_list;
- qp->r_len = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- NULL, &wqe->sg_list[i],
- IB_ACCESS_LOCAL_WRITE);
- if (unlikely(ret <= 0))
- goto bad_lkey;
- qp->r_len += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ss->total_len = qp->r_len;
- ret = 1;
- goto bail;
-
-bad_lkey:
- while (j) {
- struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
-
- rvt_put_mr(sge->mr);
- }
- ss->num_sge = 0;
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * qib_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return -1 if there is a local error, 0 if no RWQE is available,
- * otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct rvt_rq *rq;
- struct rvt_rwq *wq;
- struct rvt_srq *srq;
- struct rvt_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = rvt_get_rwqe_ptr(rq, tail);
- /*
- * Even though we update the tail index in memory, the verbs
- * consumer is not supposed to post more entries until a
- * completion is generated.
- */
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- if (!wr_id_only && !qib_init_sge(qp, wqe)) {
- ret = -1;
- goto unlock;
- }
- qp->r_wr_id = wqe->wr_id;
-
- ret = 1;
- set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * Validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
-/*
* Switch to alternate path.
* The QP s_lock should be held and interrupts disabled.
*/
@@ -419,7 +269,7 @@ again:
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -431,7 +281,7 @@ again:
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 840eec6ebc33b..3e54bc11e0ae2 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -335,7 +335,7 @@ send_first:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge;
else {
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
if (!ret)
@@ -471,7 +471,7 @@ rdma_last_imm:
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
rvt_put_ss(&qp->s_rdma_read_sge);
else {
- ret = qib_get_rwqe(qp, 1);
+ ret = rvt_get_rwqe(qp, true);
if (ret < 0)
goto op_err;
if (!ret)
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 3e4ff77260c2a..f8d029a2390ff 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -139,7 +139,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
else {
int ret;
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
@@ -534,7 +534,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
else {
int ret;
- ret = qib_get_rwqe(qp, 0);
+ ret = rvt_get_rwqe(qp, false);
if (ret < 0) {
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index ce83ba9a12eff..16543d5e80c3a 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -99,23 +99,27 @@ bail:
*
* I'm sure we won't be so lucky with other iommu's, so FIXME.
*/
-dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
+int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
{
dma_addr_t phys;
- phys = pci_map_page(hwdev, page, offset, size, direction);
+ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(hwdev, phys))
+ return -ENOMEM;
- if (phys == 0) {
- pci_unmap_page(hwdev, phys, size, direction);
- phys = pci_map_page(hwdev, page, offset, size, direction);
+ if (!phys) {
+ pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(hwdev, phys))
+ return -ENOMEM;
/*
* FIXME: If we get 0 again, we should keep this page,
* map another, then free the 0 page.
*/
}
-
- return phys;
+ *daddr = phys;
+ return 0;
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 3977abbc83ad6..14b4057a2b8f9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -1631,10 +1631,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
- snprintf(dd->verbs_dev.rdi.dparms.cq_name,
- sizeof(dd->verbs_dev.rdi.dparms.cq_name),
- "qib_cq%d", dd->unit);
-
qib_fill_device_attr(dd);
ppd = dd->pport;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index f887737ac1421..f9a46768a19a4 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -321,8 +321,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
void mr_rcu_callback(struct rcu_head *list);
-int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-
void qib_migrate_qp(struct rvt_qp *qp);
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index 2b5513da7e839..98e798007f75c 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
- depends on 64BIT
+ depends on 64BIT && ARCH_DMA_ADDR_T_64BIT
depends on PCI
select DMA_VIRT_OPS
---help---
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index fb52b669bfce5..4f1544ad4affb 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -47,11 +47,12 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/kthread.h>
#include "cq.h"
#include "vt.h"
#include "trace.h"
+static struct workqueue_struct *comp_vector_wq;
+
/**
* rvt_cq_enter - add a new entry to the completion queue
* @cq: completion queue
@@ -124,20 +125,17 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
* This will cause send_complete() to be called in
* another thread.
*/
- spin_lock(&cq->rdi->n_cqs_lock);
- if (likely(cq->rdi->worker)) {
- cq->notify = RVT_CQ_NONE;
- cq->triggered++;
- kthread_queue_work(cq->rdi->worker, &cq->comptask);
- }
- spin_unlock(&cq->rdi->n_cqs_lock);
+ cq->notify = RVT_CQ_NONE;
+ cq->triggered++;
+ queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
+ &cq->comptask);
}
spin_unlock_irqrestore(&cq->lock, flags);
}
EXPORT_SYMBOL(rvt_cq_enter);
-static void send_complete(struct kthread_work *work)
+static void send_complete(struct work_struct *work)
{
struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
@@ -189,6 +187,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
struct ib_cq *ret;
u32 sz;
unsigned int entries = attr->cqe;
+ int comp_vector = attr->comp_vector;
if (attr->flags)
return ERR_PTR(-EINVAL);
@@ -196,6 +195,11 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
if (entries < 1 || entries > rdi->dparms.props.max_cqe)
return ERR_PTR(-EINVAL);
+ if (comp_vector < 0)
+ comp_vector = 0;
+
+ comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
+
/* Allocate the completion queue structure. */
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
if (!cq)
@@ -264,14 +268,22 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
* an error.
*/
cq->rdi = rdi;
+ if (rdi->driver_f.comp_vect_cpu_lookup)
+ cq->comp_vector_cpu =
+ rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
+ else
+ cq->comp_vector_cpu =
+ cpumask_first(cpumask_of_node(rdi->dparms.node));
+
cq->ibcq.cqe = entries;
cq->notify = RVT_CQ_NONE;
spin_lock_init(&cq->lock);
- kthread_init_work(&cq->comptask, send_complete);
+ INIT_WORK(&cq->comptask, send_complete);
cq->queue = wc;
ret = &cq->ibcq;
+ trace_rvt_create_cq(cq, attr);
goto done;
bail_ip:
@@ -297,7 +309,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
struct rvt_dev_info *rdi = cq->rdi;
- kthread_flush_work(&cq->comptask);
+ flush_work(&cq->comptask);
spin_lock_irq(&rdi->n_cqs_lock);
rdi->n_cqs_allocated--;
spin_unlock_irq(&rdi->n_cqs_lock);
@@ -507,24 +519,13 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
*
* Return: 0 on success
*/
-int rvt_driver_cq_init(struct rvt_dev_info *rdi)
+int rvt_driver_cq_init(void)
{
- int cpu;
- struct kthread_worker *worker;
-
- if (rdi->worker)
- return 0;
-
- spin_lock_init(&rdi->n_cqs_lock);
-
- cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
- worker = kthread_create_worker_on_cpu(cpu, 0,
- "%s", rdi->dparms.cq_name);
- if (IS_ERR(worker))
- return PTR_ERR(worker);
+ comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+ 0, "rdmavt_cq");
+ if (!comp_vector_wq)
+ return -ENOMEM;
- set_user_nice(worker->task, MIN_NICE);
- rdi->worker = worker;
return 0;
}
@@ -532,19 +533,8 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
* rvt_cq_exit - tear down cq reources
* @rdi: rvt dev structure
*/
-void rvt_cq_exit(struct rvt_dev_info *rdi)
+void rvt_cq_exit(void)
{
- struct kthread_worker *worker;
-
- /* block future queuing from send_complete() */
- spin_lock_irq(&rdi->n_cqs_lock);
- worker = rdi->worker;
- if (!worker) {
- spin_unlock_irq(&rdi->n_cqs_lock);
- return;
- }
- rdi->worker = NULL;
- spin_unlock_irq(&rdi->n_cqs_lock);
-
- kthread_destroy_worker(worker);
+ destroy_workqueue(comp_vector_wq);
+ comp_vector_wq = NULL;
}
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
index 6182c29eff66b..72184b1c176ba 100644
--- a/drivers/infiniband/sw/rdmavt/cq.h
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -2,7 +2,7 @@
#define DEF_RVTCQ_H
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -59,6 +59,6 @@ int rvt_destroy_cq(struct ib_cq *ibcq);
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-int rvt_driver_cq_init(struct rvt_dev_info *rdi);
-void rvt_cq_exit(struct rvt_dev_info *rdi);
+int rvt_driver_cq_init(void);
+void rvt_cq_exit(void);
#endif /* DEF_RVTCQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index c82e6bb3d77cf..40046135c5099 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1987,6 +1987,155 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
return 0;
}
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
+{
+ int i, j, ret;
+ struct ib_wc wc;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_sge_state *ss;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ rkt = &rdi->lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ ss = &qp->r_sge;
+ ss->sg_list = qp->r_sg_list;
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ NULL, &wqe->sg_list[i],
+ IB_ACCESS_LOCAL_WRITE);
+ if (unlikely(ret <= 0))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ss->total_len = qp->r_len;
+ return 1;
+
+bad_lkey:
+ while (j) {
+ struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+ rvt_put_mr(sge->mr);
+ }
+ ss->num_sge = 0;
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ /* Signal solicited completion event. */
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ return 0;
+}
+
+/**
+ * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
+{
+ unsigned long flags;
+ struct rvt_rq *rq;
+ struct rvt_rwq *wq;
+ struct rvt_srq *srq;
+ struct rvt_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ int ret;
+
+ if (qp->ibqp.srq) {
+ srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ handler = NULL;
+ rq = &qp->r_rq;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ wq = rq->wq;
+ tail = wq->tail;
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+ if (unlikely(tail == wq->head)) {
+ ret = 0;
+ goto unlock;
+ }
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
+ wqe = rvt_get_rwqe_ptr(rq, tail);
+ /*
+ * Even though we update the tail index in memory, the verbs
+ * consumer is not supposed to post more entries until a
+ * completion is generated.
+ */
+ if (++tail >= rq->size)
+ tail = 0;
+ wq->tail = tail;
+ if (!wr_id_only && !init_sge(qp, wqe)) {
+ ret = -1;
+ goto unlock;
+ }
+ qp->r_wr_id = wqe->wr_id;
+
+ ret = 1;
+ set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
+ if (handler) {
+ u32 n;
+
+ /*
+ * Validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
+ else
+ n -= tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ handler(&ev, srq->ibsrq.srq_context);
+ goto bail;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rvt_get_rwqe);
+
/**
* qp_comm_est - handle trap with QP established
* @qp: the QP
@@ -2076,7 +2225,7 @@ void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
to = rvt_aeth_to_usec(aeth);
trace_rvt_rnrnak_add(qp, to);
hrtimer_start(&qp->s_rnr_timer,
- ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
+ ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
}
EXPORT_SYMBOL(rvt_add_rnr_timer);
diff --git a/drivers/infiniband/sw/rdmavt/trace_cq.h b/drivers/infiniband/sw/rdmavt/trace_cq.h
index a315850aa9bbd..df8e1adbef9df 100644
--- a/drivers/infiniband/sw/rdmavt/trace_cq.h
+++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -71,6 +71,39 @@ __print_symbolic(opcode, \
wc_opcode_name(RECV), \
wc_opcode_name(RECV_RDMA_WITH_IMM))
+#define CQ_ATTR_PRINT \
+"[%s] user cq %s cqe %u comp_vector %d comp_vector_cpu %d flags %x"
+
+DECLARE_EVENT_CLASS(rvt_cq_template,
+ TP_PROTO(struct rvt_cq *cq,
+ const struct ib_cq_init_attr *attr),
+ TP_ARGS(cq, attr),
+ TP_STRUCT__entry(RDI_DEV_ENTRY(cq->rdi)
+ __field(struct rvt_mmap_info *, ip)
+ __field(unsigned int, cqe)
+ __field(int, comp_vector)
+ __field(int, comp_vector_cpu)
+ __field(u32, flags)
+ ),
+ TP_fast_assign(RDI_DEV_ASSIGN(cq->rdi)
+ __entry->ip = cq->ip;
+ __entry->cqe = attr->cqe;
+ __entry->comp_vector = attr->comp_vector;
+ __entry->comp_vector_cpu =
+ cq->comp_vector_cpu;
+ __entry->flags = attr->flags;
+ ),
+ TP_printk(CQ_ATTR_PRINT, __get_str(dev),
+ __entry->ip ? "true" : "false", __entry->cqe,
+ __entry->comp_vector, __entry->comp_vector_cpu,
+ __entry->flags
+ )
+);
+
+DEFINE_EVENT(rvt_cq_template, rvt_create_cq,
+ TP_PROTO(struct rvt_cq *cq, const struct ib_cq_init_attr *attr),
+ TP_ARGS(cq, attr));
+
#define CQ_PRN \
"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x"
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 434199d0bc96c..17e4abc067afa 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,7 @@
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include "vt.h"
+#include "cq.h"
#include "trace.h"
#define RVT_UVERBS_ABI_VERSION 2
@@ -58,21 +59,18 @@ MODULE_DESCRIPTION("RDMA Verbs Transport Library");
static int rvt_init(void)
{
- /*
- * rdmavt does not need to do anything special when it starts up. All it
- * needs to do is sit and wait until a driver attempts registration.
- */
- return 0;
+ int ret = rvt_driver_cq_init();
+
+ if (ret)
+ pr_err("Error in driver CQ init.\n");
+
+ return ret;
}
module_init(rvt_init);
static void rvt_cleanup(void)
{
- /*
- * Nothing to do at exit time either. The module won't be able to be
- * removed until all drivers are gone which means all the dev structs
- * are gone so there is really nothing to do.
- */
+ rvt_cq_exit();
}
module_exit(rvt_cleanup);
@@ -777,11 +775,7 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
}
/* Completion queues */
- ret = rvt_driver_cq_init(rdi);
- if (ret) {
- pr_err("Error in driver CQ init.\n");
- goto bail_mr;
- }
+ spin_lock_init(&rdi->n_cqs_lock);
/* DMA Operations */
rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops;
@@ -829,14 +823,15 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
rdi->ibdev.node_type = RDMA_NODE_IB_CA;
- rdi->ibdev.num_comp_vectors = 1;
+ if (!rdi->ibdev.num_comp_vectors)
+ rdi->ibdev.num_comp_vectors = 1;
rdi->ibdev.driver_id = driver_id;
/* We are now good to announce we exist */
ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
if (ret) {
rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
- goto bail_cq;
+ goto bail_mr;
}
rvt_create_mad_agents(rdi);
@@ -844,9 +839,6 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rvt_pr_info(rdi, "Registration with rdmavt done.\n");
return ret;
-bail_cq:
- rvt_cq_exit(rdi);
-
bail_mr:
rvt_mr_exit(rdi);
@@ -870,7 +862,6 @@ void rvt_unregister_device(struct rvt_dev_info *rdi)
rvt_free_mad_agents(rdi);
ib_unregister_device(&rdi->ibdev);
- rvt_cq_exit(rdi);
rvt_mr_exit(rdi);
rvt_qp_exit(rdi);
}
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index bad4a576d7cf1..67ae960ab523b 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,6 +1,7 @@
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
+ depends on !64BIT || ARCH_DMA_ADDR_T_64BIT
select NET_UDP_TUNNEL
select CRYPTO_CRC32
select DMA_VIRT_OPS
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index e493fdbd61c62..7121e1b1eb898 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -291,7 +291,7 @@ err1:
return err;
}
-int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
{
struct rxe_port *port = &rxe->port;
enum ib_mtu mtu;
@@ -303,10 +303,7 @@ int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
-
- return 0;
}
-EXPORT_SYMBOL(rxe_set_mtu);
/* called by ifc layer to create new rxe device.
* The caller should allocate memory for rxe by calling ib_alloc_device.
@@ -321,9 +318,7 @@ int rxe_add(struct rxe_dev *rxe, unsigned int mtu)
if (err)
goto err1;
- err = rxe_set_mtu(rxe, mtu);
- if (err)
- goto err1;
+ rxe_set_mtu(rxe, mtu);
err = rxe_register_device(rxe);
if (err)
@@ -335,7 +330,6 @@ err1:
rxe_dev_put(rxe);
return err;
}
-EXPORT_SYMBOL(rxe_add);
/* called by the ifc layer to remove a device */
void rxe_remove(struct rxe_dev *rxe)
@@ -344,7 +338,6 @@ void rxe_remove(struct rxe_dev *rxe)
rxe_dev_put(rxe);
}
-EXPORT_SYMBOL(rxe_remove);
static int __init rxe_module_init(void)
{
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 561ad307c6eca..d9ec2de687386 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -92,13 +92,13 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
return retval;
}
-int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
void rxe_remove(struct rxe_dev *rxe);
void rxe_remove_all(void);
-int rxe_rcv(struct sk_buff *skb);
+void rxe_rcv(struct sk_buff *skb);
static inline void rxe_dev_put(struct rxe_dev *rxe)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 6cdc40ed8a9f8..98d470d1f3fc2 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -355,10 +355,9 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
int ret;
- ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mem_obj, NULL);
if (ret)
@@ -374,12 +373,11 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
int ret;
u64 atomic_orig = atmack_orig(pkt);
- ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mem_obj, NULL);
if (ret)
@@ -661,7 +659,6 @@ int rxe_completer(void *arg)
qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies);
- WARN_ON_ONCE(skb);
goto exit;
case COMPST_ERROR_RETRY:
@@ -675,7 +672,6 @@ int rxe_completer(void *arg)
/* there is nothing to retry in this case */
if (!wqe || (wqe->state == wqe_state_posted)) {
- WARN_ON_ONCE(skb);
goto exit;
}
@@ -704,7 +700,6 @@ int rxe_completer(void *arg)
skb = NULL;
}
- WARN_ON_ONCE(skb);
goto exit;
} else {
@@ -748,7 +743,6 @@ int rxe_completer(void *arg)
skb = NULL;
}
- WARN_ON_ONCE(skb);
goto exit;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index b71023c1c58bc..a51ece596c430 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -106,20 +106,20 @@ enum copy_direction {
from_mem_obj,
};
-int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem);
-int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mr);
-int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem);
int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
int length, enum copy_direction dir, u32 *crcp);
-int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
+int copy_data(struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
enum copy_direction dir, u32 *crcp);
@@ -143,7 +143,7 @@ void rxe_mem_cleanup(struct rxe_pool_entry *arg);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
/* rxe_net.c */
-int rxe_loopback(struct sk_buff *skb);
+void rxe_loopback(struct sk_buff *skb);
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
@@ -268,7 +268,8 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
if (pkt->mask & RXE_LOOPBACK_MASK) {
memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
- err = rxe_loopback(skb);
+ rxe_loopback(skb);
+ err = 0;
} else {
err = rxe_send(pkt, skb);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 5c2684bf430f8..dff605fdf60fa 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -107,7 +107,7 @@ void rxe_mem_cleanup(struct rxe_pool_entry *arg)
}
}
-static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
+static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
{
int i;
int num_map;
@@ -145,7 +145,7 @@ err1:
return -ENOMEM;
}
-int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem)
{
rxe_mem_init(access, mem);
@@ -158,7 +158,7 @@ int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
return 0;
}
-int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mem)
{
@@ -184,7 +184,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
rxe_mem_init(access, mem);
- err = rxe_mem_alloc(rxe, mem, num_buf);
+ err = rxe_mem_alloc(mem, num_buf);
if (err) {
pr_warn("err %d from rxe_mem_alloc\n", err);
ib_umem_release(umem);
@@ -236,7 +236,7 @@ err1:
return err;
}
-int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem)
{
int err;
@@ -246,7 +246,7 @@ int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
/* In fastreg, we also set the rkey */
mem->ibmr.rkey = mem->ibmr.lkey;
- err = rxe_mem_alloc(rxe, mem, max_pages);
+ err = rxe_mem_alloc(mem, max_pages);
if (err)
goto err1;
@@ -434,7 +434,6 @@ err1:
* under the control of a dma descriptor
*/
int copy_data(
- struct rxe_dev *rxe,
struct rxe_pd *pd,
int access,
struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 9da6e37fb70c7..59ec6d918ed47 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -276,9 +276,12 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
pkt->mask = RXE_GRH_MASK;
pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
- return rxe_rcv(skb);
+ rxe_rcv(skb);
+
+ return 0;
drop:
kfree_skb(skb);
+
return 0;
}
@@ -315,7 +318,7 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
return sock;
}
-void rxe_release_udp_tunnel(struct socket *sk)
+static void rxe_release_udp_tunnel(struct socket *sk)
{
if (sk)
udp_tunnel_sock_release(sk);
@@ -517,9 +520,9 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0;
}
-int rxe_loopback(struct sk_buff *skb)
+void rxe_loopback(struct sk_buff *skb)
{
- return rxe_rcv(skb);
+ rxe_rcv(skb);
}
static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
@@ -562,11 +565,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
pkt->rxe = rxe;
pkt->port_num = port_num;
- pkt->hdr = skb_put(skb, paylen);
+ pkt->hdr = skb_put_zero(skb, paylen);
pkt->mask |= RXE_GRH_MASK;
- memset(pkt->hdr, 0, paylen);
-
dev_put(ndev);
return skb;
}
@@ -622,7 +623,6 @@ void rxe_remove_all(void)
}
spin_unlock_bh(&dev_list_lock);
}
-EXPORT_SYMBOL(rxe_remove_all);
static void rxe_port_event(struct rxe_dev *rxe,
enum ib_event_type event)
@@ -707,7 +707,7 @@ out:
return NOTIFY_OK;
}
-struct notifier_block rxe_net_notifier = {
+static struct notifier_block rxe_net_notifier = {
.notifier_call = rxe_notify,
};
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 728d8c71b36a5..106c586dbb268 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -43,9 +43,6 @@ struct rxe_recv_sockets {
struct socket *sk6;
};
-extern struct notifier_block rxe_net_notifier;
-void rxe_release_udp_tunnel(struct socket *sk);
-
struct rxe_dev *rxe_net_add(struct net_device *ndev);
int rxe_net_init(void);
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index dd80c7d9074ad..dfba44a40f0b4 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -311,7 +311,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
* increase the users of the skb then post to the next qp
*/
if (mce->qp_list.next != &mcg->qp_list)
- refcount_inc(&skb->users);
+ skb_get(skb);
pkt->qp = qp;
rxe_add_ref(qp);
@@ -345,7 +345,7 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
}
/* rxe_rcv is called from the interface driver */
-int rxe_rcv(struct sk_buff *skb)
+void rxe_rcv(struct sk_buff *skb)
{
int err;
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
@@ -403,12 +403,11 @@ int rxe_rcv(struct sk_buff *skb)
else
rxe_rcv_pkt(rxe, pkt, skb);
- return 0;
+ return;
drop:
if (pkt->qp)
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
- return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 7851999904570..f30eeba3f772c 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -490,7 +490,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
wqe->dma.resid -= paylen;
wqe->dma.sge_offset += paylen;
} else {
- err = copy_data(rxe, qp->pd, 0, &wqe->dma,
+ err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
from_mem_obj,
&crc);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 955ff3b6da9c6..5b57de30dee4b 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -511,9 +511,8 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int data_len)
{
int err;
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
+ err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
data_addr, data_len, to_mem_obj, NULL);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
@@ -987,7 +986,7 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
sizeof(skb->cb) - sizeof(ack_pkt));
- refcount_inc(&skb->users);
+ skb_get(skb);
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;
res->first_psn = ack_pkt.psn;
@@ -1121,23 +1120,12 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
/* Find the operation in our list of responder resources. */
res = find_resource(qp, pkt->psn);
if (res) {
- struct sk_buff *skb_copy;
-
- skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
- if (skb_copy) {
- rxe_add_ref(qp); /* for the new SKB */
- } else {
- pr_warn("Couldn't clone atomic resp\n");
- rc = RESPST_CLEANUP;
- goto out;
- }
-
+ skb_get(res->atomic.skb);
/* Resend the result. */
rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
- pkt, skb_copy);
+ pkt, res->atomic.skb);
if (rc) {
pr_err("Failed resending result. This flow is not handled - skb ignored\n");
- rxe_drop_ref(qp);
rc = RESPST_CLEANUP;
goto out;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 73a00a1c06f62..9deafc3aa6af6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1003,7 +1003,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_add_ref(pd);
- err = rxe_mem_init_dma(rxe, pd, access, mr);
+ err = rxe_mem_init_dma(pd, access, mr);
if (err)
goto err2;
@@ -1038,7 +1038,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mem_init_user(rxe, pd, start, length, iova,
+ err = rxe_mem_init_user(pd, start, length, iova,
access, udata, mr);
if (err)
goto err3;
@@ -1086,7 +1086,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
+ err = rxe_mem_init_fast(pd, max_num_sg, mr);
if (err)
goto err2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 308e0ce492892..a50b062ed13e0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -415,6 +415,7 @@ struct ipoib_ah {
struct list_head list;
struct kref ref;
unsigned last_send;
+ int valid;
};
struct ipoib_path {
@@ -431,7 +432,6 @@ struct ipoib_path {
struct rb_node rb_node;
struct list_head list;
- int valid;
};
struct ipoib_neigh {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cf291f90b58fd..2ce40a7ff6040 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -697,7 +697,8 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
path->pathrec.dgid.raw);
- path->valid = 0;
+ if (path->ah)
+ path->ah->valid = 0;
}
spin_unlock_irq(&priv->lock);
@@ -833,7 +834,7 @@ static void path_rec_completion(int status,
while ((skb = __skb_dequeue(&neigh->queue)))
__skb_queue_tail(&skqueue, skb);
}
- path->valid = 1;
+ path->ah->valid = 1;
}
path->query = NULL;
@@ -926,6 +927,24 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
+static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_path *path;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ path = __path_find(dev, daddr + 4);
+ if (!path)
+ goto out;
+ if (!path->query)
+ path_rec_start(dev, path);
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
struct net_device *dev)
{
@@ -963,7 +982,7 @@ static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
list_add_tail(&neigh->list, &path->neigh_list);
- if (path->ah) {
+ if (path->ah && path->ah->valid) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
@@ -1034,63 +1053,43 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
goto drop_and_unlock;
path = __path_find(dev, phdr->hwaddr + 4);
- if (!path || !path->valid) {
- int new_path = 0;
-
+ if (!path || !path->ah || !path->ah->valid) {
if (!path) {
path = path_rec_create(dev, phdr->hwaddr + 4);
- new_path = 1;
+ if (!path)
+ goto drop_and_unlock;
+ __path_add(dev, path);
+ } else {
+ /*
+ * make sure there are no changes in the existing
+ * path record
+ */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+ }
+ if (!path->query && path_rec_start(dev, path)) {
+ goto drop_and_unlock;
}
- if (path) {
- if (!new_path)
- /* make sure there is no changes in the existing path record */
- init_path_rec(priv, path, phdr->hwaddr + 4);
-
- if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- push_pseudo_header(skb, phdr->hwaddr);
- __skb_queue_tail(&path->queue, skb);
- } else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
- }
- if (!path->query && path_rec_start(dev, path)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- if (new_path)
- path_free(dev, path);
- return;
- } else
- __path_add(dev, path);
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ push_pseudo_header(skb, phdr->hwaddr);
+ __skb_queue_tail(&path->queue, skb);
+ goto unlock;
} else {
goto drop_and_unlock;
}
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
- }
-
- if (path->ah) {
- ipoib_dbg(priv, "Send unicast ARP to %08x\n",
- be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
-
- spin_unlock_irqrestore(&priv->lock, flags);
- path->ah->last_send = rn->send(dev, skb, path->ah->ah,
- IPOIB_QPN(phdr->hwaddr));
- return;
- } else if ((path->query || !path_rec_start(dev, path)) &&
- skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- push_pseudo_header(skb, phdr->hwaddr);
- __skb_queue_tail(&path->queue, skb);
- } else {
- goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_dbg(priv, "Send unicast ARP to %08x\n",
+ be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
+ path->ah->last_send = rn->send(dev, skb, path->ah->ah,
+ IPOIB_QPN(phdr->hwaddr));
return;
drop_and_unlock:
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
+unlock:
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1161,10 +1160,12 @@ send_using_neigh:
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto unref;
}
- } else if (neigh->ah) {
+ } else if (neigh->ah && neigh->ah->valid) {
neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
IPOIB_QPN(phdr->hwaddr));
goto unref;
+ } else if (neigh->ah) {
+ neigh_refresh_path(neigh, phdr->hwaddr, dev);
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 9b3f47ae20160..6709328d90f8b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -886,7 +886,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
struct netdev_hw_addr *ha;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
- unsigned long flags;
struct ib_sa_mcmember_rec rec;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
@@ -898,9 +897,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- local_irq_save(flags);
- netif_addr_lock(dev);
- spin_lock(&priv->lock);
+ netif_addr_lock_bh(dev);
+ spin_lock_irq(&priv->lock);
/*
* Unfortunately, the networking core only gives us a list of all of
@@ -978,9 +976,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
}
}
- spin_unlock(&priv->lock);
- netif_addr_unlock(dev);
- local_irq_restore(flags);
+ spin_unlock_irq(&priv->lock);
+ netif_addr_unlock_bh(dev);
ipoib_mcast_remove_list(&remove_list);
@@ -988,9 +985,9 @@ void ipoib_mcast_restart_task(struct work_struct *work)
* Double check that we are still up
*/
if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
__ipoib_mcast_schedule_join_thread(priv, NULL, 0);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
}
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 0336643c2ed65..9a6434c31db2a 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -665,19 +665,17 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
goto free_host;
}
- /*
- * FRs or FMRs can only map up to a (device) page per entry, but if the
- * first entry is misaligned we'll end up using using two entries
- * (head and tail) for a single page worth data, so we have to drop
- * one segment from the calculation.
- */
- max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
+ max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9;
shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
iser_conn, shost->sg_tablesize,
shost->max_sectors);
+ if (shost->max_sectors < iser_max_sectors)
+ iser_warn("max_sectors was reduced from %u to %u\n",
+ iser_max_sectors, shost->max_sectors);
+
if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
cmds_max, max_cmds);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index c1ae4aeae2f90..120b408295603 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -383,10 +383,6 @@ struct iser_device {
bool remote_inv_sup;
};
-#define ISER_CHECK_GUARD 0xc0
-#define ISER_CHECK_REFTAG 0x0f
-#define ISER_CHECK_APPTAG 0x30
-
/**
* struct iser_reg_resources - Fast registration recources
*
@@ -498,6 +494,7 @@ struct ib_conn {
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
+ * @pages_per_mr: maximum pages available for registration
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -520,6 +517,7 @@ struct iser_conn {
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
+ unsigned short pages_per_mr;
bool snd_w_inv;
};
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index df49c4eb67f7b..ca858d6bd37af 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -251,7 +251,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
- iser_conn->scsi_sg_tablesize))
+ iser_conn->pages_per_mr))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 322209d5ff582..ca844a926e6ad 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -362,9 +362,9 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
*mask = 0;
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
- *mask |= ISER_CHECK_REFTAG;
+ *mask |= IB_SIG_CHECK_REFTAG;
if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
- *mask |= ISER_CHECK_GUARD;
+ *mask |= IB_SIG_CHECK_GUARD;
}
static inline void
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 56b7240a3fc30..616d978cbf2b4 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -703,19 +703,34 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
unsigned int max_sectors)
{
struct iser_device *device = iser_conn->ib_conn.device;
+ struct ib_device_attr *attr = &device->ib_device->attrs;
unsigned short sg_tablesize, sup_sg_tablesize;
+ unsigned short reserved_mr_pages;
+
+ /*
+ * FRs without SG_GAPS or FMRs can only map up to a (device) page per
+ * entry, but if the first entry is misaligned we'll end up using two
+ * entries (head and tail) for a single page worth data, so one
+ * additional entry is required.
+ */
+ if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) &&
+ (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ reserved_mr_pages = 0;
+ else
+ reserved_mr_pages = 1;
sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
- if (device->ib_device->attrs.device_cap_flags &
- IB_DEVICE_MEM_MGT_EXTENSIONS)
+ if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
sup_sg_tablesize =
min_t(
uint, ISCSI_ISER_MAX_SG_TABLESIZE,
- device->ib_device->attrs.max_fast_reg_page_list_len);
+ attr->max_fast_reg_page_list_len - reserved_mr_pages);
else
sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
+ iser_conn->pages_per_mr =
+ iser_conn->scsi_sg_tablesize + reserved_mr_pages;
}
/**
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index fff40b0979477..f2f9318e1f498 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -886,15 +886,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
}
static void
-isert_create_send_desc(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd,
- struct iser_tx_desc *tx_desc)
+__isert_create_send_desc(struct isert_device *device,
+ struct iser_tx_desc *tx_desc)
{
- struct isert_device *device = isert_conn->device;
- struct ib_device *ib_dev = device->ib_device;
-
- ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
- ISER_HEADERS_LEN, DMA_TO_DEVICE);
memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
tx_desc->iser_header.flags = ISCSI_CTRL;
@@ -907,6 +901,20 @@ isert_create_send_desc(struct isert_conn *isert_conn,
}
}
+static void
+isert_create_send_desc(struct isert_conn *isert_conn,
+ struct isert_cmd *isert_cmd,
+ struct iser_tx_desc *tx_desc)
+{
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+
+ ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+
+ __isert_create_send_desc(device, tx_desc);
+}
+
static int
isert_init_tx_hdrs(struct isert_conn *isert_conn,
struct iser_tx_desc *tx_desc)
@@ -994,7 +1002,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
int ret;
- isert_create_send_desc(isert_conn, NULL, tx_desc);
+ __isert_create_send_desc(device, tx_desc);
memcpy(&tx_desc->iscsi_header, &login->rsp[0],
sizeof(struct iscsi_hdr));
@@ -2106,10 +2114,13 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
return -EINVAL;
}
- sig_attrs->check_mask =
- (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
- (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
- (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)
+ sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)
+ sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
+ if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)
+ sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
+
return 0;
}
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8fb8c737fffef..d60c7dc62905d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -354,6 +354,9 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev)
};
int i, pos;
+ if (pci_ats_disabled())
+ return false;
+
for (i = 0; i < 3; ++i) {
pos = pci_find_ext_capability(pdev, caps[i]);
if (pos == 0)
@@ -3523,9 +3526,11 @@ int amd_iommu_device_info(struct pci_dev *pdev,
memset(info, 0, sizeof(*info));
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
- if (pos)
- info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+ if (!pci_ats_disabled()) {
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+ }
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (pos)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 749d8f2353466..772b404a6604b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2459,7 +2459,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
- if (ecap_dev_iotlb_support(iommu->ecap) &&
+ if (!pci_ats_disabled() &&
+ ecap_dev_iotlb_support(iommu->ecap) &&
pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
dmar_find_matched_atsr_unit(pdev))
info->ats_supported = 1;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index a2bb27446dce4..e63d29a95e76b 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -109,16 +109,20 @@ config TI_MESSAGE_MANAGER
platform has support for the hardware block.
config HI3660_MBOX
- tristate "Hi3660 Mailbox"
- depends on ARCH_HISI && OF
+ tristate "Hi3660 Mailbox" if EXPERT
+ depends on (ARCH_HISI || COMPILE_TEST)
+ depends on OF
+ default ARCH_HISI
help
An implementation of the hi3660 mailbox. It is used to send message
between application processors and other processors/MCU/DSP. Select
Y here if you want to use Hi3660 mailbox controller.
config HI6220_MBOX
- tristate "Hi6220 Mailbox"
- depends on ARCH_HISI
+ tristate "Hi6220 Mailbox" if EXPERT
+ depends on (ARCH_HISI || COMPILE_TEST)
+ depends on OF
+ default ARCH_HISI
help
An implementation of the hi6220 mailbox. It is used to send message
between application processors and MCU. Say Y here if you want to
@@ -162,7 +166,6 @@ config XGENE_SLIMPRO_MBOX
config BCM_PDC_MBOX
tristate "Broadcom FlexSparx DMA Mailbox"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on HAS_DMA
help
Mailbox implementation for the Broadcom FlexSparx DMA ring manager,
which provides access to various offload engines on Broadcom
@@ -172,11 +175,18 @@ config BCM_FLEXRM_MBOX
tristate "Broadcom FlexRM Mailbox"
depends on ARM64
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on HAS_DMA
select GENERIC_MSI_IRQ_DOMAIN
default m if ARCH_BCM_IPROC
help
Mailbox implementation of the Broadcom FlexRM ring manager,
which provides access to various offload engines on Broadcom
SoCs. Say Y here if you want to use the Broadcom FlexRM.
+
+config STM32_IPCC
+ tristate "STM32 IPCC Mailbox"
+ depends on MACH_STM32MP157
+ help
+ Mailbox implementation for STMicroelectonics STM32 family chips
+ with hardware for Inter-Processor Communication Controller (IPCC)
+ between processors. Say Y here if you want to have this support.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index cc23c3a43fcdf..4d501bea78635 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -38,3 +38,5 @@ obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
+
+obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index cfb4b4496dd9f..e92bbc533821a 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -134,7 +134,7 @@ static struct mbox_chan *bcm2835_mbox_index_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
if (sp->args_count != 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
return &mbox->chans[0];
}
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 57bde0dfd12fe..333ed4a9d4b8f 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -125,6 +125,8 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
+ { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
new file mode 100644
index 0000000000000..533b0da5235d4
--- /dev/null
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+
+#define IPCC_XCR 0x000
+#define XCR_RXOIE BIT(0)
+#define XCR_TXOIE BIT(16)
+
+#define IPCC_XMR 0x004
+#define IPCC_XSCR 0x008
+#define IPCC_XTOYSR 0x00c
+
+#define IPCC_PROC_OFFST 0x010
+
+#define IPCC_HWCFGR 0x3f0
+#define IPCFGR_CHAN_MASK GENMASK(7, 0)
+
+#define IPCC_VER 0x3f4
+#define VER_MINREV_MASK GENMASK(3, 0)
+#define VER_MAJREV_MASK GENMASK(7, 4)
+
+#define RX_BIT_MASK GENMASK(15, 0)
+#define RX_BIT_CHAN(chan) BIT(chan)
+#define TX_BIT_SHIFT 16
+#define TX_BIT_MASK GENMASK(31, 16)
+#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
+
+#define STM32_MAX_PROCS 2
+
+enum {
+ IPCC_IRQ_RX,
+ IPCC_IRQ_TX,
+ IPCC_IRQ_NUM,
+};
+
+struct stm32_ipcc {
+ struct mbox_controller controller;
+ void __iomem *reg_base;
+ void __iomem *reg_proc;
+ struct clk *clk;
+ int irqs[IPCC_IRQ_NUM];
+ int wkp;
+ u32 proc_id;
+ u32 n_chans;
+ u32 xcr;
+ u32 xmr;
+};
+
+static inline void stm32_ipcc_set_bits(void __iomem *reg, u32 mask)
+{
+ writel_relaxed(readl_relaxed(reg) | mask, reg);
+}
+
+static inline void stm32_ipcc_clr_bits(void __iomem *reg, u32 mask)
+{
+ writel_relaxed(readl_relaxed(reg) & ~mask, reg);
+}
+
+static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
+{
+ struct stm32_ipcc *ipcc = data;
+ struct device *dev = ipcc->controller.dev;
+ u32 status, mr, tosr, chan;
+ irqreturn_t ret = IRQ_NONE;
+ int proc_offset;
+
+ /* read 'channel occupied' status from other proc */
+ proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
+ tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
+ mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+
+ /* search for unmasked 'channel occupied' */
+ status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
+
+ for (chan = 0; chan < ipcc->n_chans; chan++) {
+ if (!(status & (1 << chan)))
+ continue;
+
+ dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
+
+ mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
+
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR,
+ RX_BIT_CHAN(chan));
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
+{
+ struct stm32_ipcc *ipcc = data;
+ struct device *dev = ipcc->controller.dev;
+ u32 status, mr, tosr, chan;
+ irqreturn_t ret = IRQ_NONE;
+
+ tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
+ mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+
+ /* search for unmasked 'channel free' */
+ status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
+
+ for (chan = 0; chan < ipcc->n_chans ; chan++) {
+ if (!(status & (1 << chan)))
+ continue;
+
+ dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
+
+ /* mask 'tx channel free' interrupt */
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
+ TX_BIT_CHAN(chan));
+
+ mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+
+ dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan);
+
+ /* set channel n occupied */
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XSCR, TX_BIT_CHAN(chan));
+
+ /* unmask 'tx channel free' interrupt */
+ stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, TX_BIT_CHAN(chan));
+
+ return 0;
+}
+
+static int stm32_ipcc_startup(struct mbox_chan *link)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+ int ret;
+
+ ret = clk_prepare_enable(ipcc->clk);
+ if (ret) {
+ dev_err(ipcc->controller.dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* unmask 'rx channel occupied' interrupt */
+ stm32_ipcc_clr_bits(ipcc->reg_proc + IPCC_XMR, RX_BIT_CHAN(chan));
+
+ return 0;
+}
+
+static void stm32_ipcc_shutdown(struct mbox_chan *link)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+
+ /* mask rx/tx interrupt */
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
+
+ clk_disable_unprepare(ipcc->clk);
+}
+
+static const struct mbox_chan_ops stm32_ipcc_ops = {
+ .send_data = stm32_ipcc_send_data,
+ .startup = stm32_ipcc_startup,
+ .shutdown = stm32_ipcc_shutdown,
+};
+
+static int stm32_ipcc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct stm32_ipcc *ipcc;
+ struct resource *res;
+ unsigned int i;
+ int ret;
+ u32 ip_ver;
+ static const char * const irq_name[] = {"rx", "tx"};
+ irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
+
+ if (!np) {
+ dev_err(dev, "No DT found\n");
+ return -ENODEV;
+ }
+
+ ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ /* proc_id */
+ if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
+ dev_err(dev, "Missing st,proc-id\n");
+ return -ENODEV;
+ }
+
+ if (ipcc->proc_id >= STM32_MAX_PROCS) {
+ dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
+ return -EINVAL;
+ }
+
+ /* regs */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ipcc->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ipcc->reg_base))
+ return PTR_ERR(ipcc->reg_base);
+
+ ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
+
+ /* clock */
+ ipcc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ipcc->clk))
+ return PTR_ERR(ipcc->clk);
+
+ ret = clk_prepare_enable(ipcc->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* irq */
+ for (i = 0; i < IPCC_IRQ_NUM; i++) {
+ ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]);
+ if (ipcc->irqs[i] < 0) {
+ dev_err(dev, "no IRQ specified %s\n", irq_name[i]);
+ ret = ipcc->irqs[i];
+ goto err_clk;
+ }
+
+ ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
+ irq_thread[i], IRQF_ONESHOT,
+ dev_name(dev), ipcc);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d (%d)\n", i, ret);
+ goto err_clk;
+ }
+ }
+
+ /* mask and enable rx/tx irq */
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_MASK | TX_BIT_MASK);
+ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XCR, XCR_RXOIE | XCR_TXOIE);
+
+ /* wakeup */
+ if (of_property_read_bool(np, "wakeup-source")) {
+ ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup");
+ if (ipcc->wkp < 0) {
+ dev_err(dev, "could not get wakeup IRQ\n");
+ ret = ipcc->wkp;
+ goto err_clk;
+ }
+
+ device_init_wakeup(dev, true);
+ ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp);
+ if (ret) {
+ dev_err(dev, "Failed to set wake up irq\n");
+ goto err_init_wkp;
+ }
+ } else {
+ device_init_wakeup(dev, false);
+ }
+
+ /* mailbox controller */
+ ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
+ ipcc->n_chans &= IPCFGR_CHAN_MASK;
+
+ ipcc->controller.dev = dev;
+ ipcc->controller.txdone_irq = true;
+ ipcc->controller.ops = &stm32_ipcc_ops;
+ ipcc->controller.num_chans = ipcc->n_chans;
+ ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
+ sizeof(*ipcc->controller.chans),
+ GFP_KERNEL);
+ if (!ipcc->controller.chans) {
+ ret = -ENOMEM;
+ goto err_irq_wkp;
+ }
+
+ for (i = 0; i < ipcc->controller.num_chans; i++)
+ ipcc->controller.chans[i].con_priv = (void *)i;
+
+ ret = mbox_controller_register(&ipcc->controller);
+ if (ret)
+ goto err_irq_wkp;
+
+ platform_set_drvdata(pdev, ipcc);
+
+ ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
+
+ dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
+ FIELD_GET(VER_MAJREV_MASK, ip_ver),
+ FIELD_GET(VER_MINREV_MASK, ip_ver),
+ ipcc->controller.num_chans, ipcc->proc_id);
+
+ clk_disable_unprepare(ipcc->clk);
+ return 0;
+
+err_irq_wkp:
+ if (ipcc->wkp)
+ dev_pm_clear_wake_irq(dev);
+err_init_wkp:
+ device_init_wakeup(dev, false);
+err_clk:
+ clk_disable_unprepare(ipcc->clk);
+ return ret;
+}
+
+static int stm32_ipcc_remove(struct platform_device *pdev)
+{
+ struct stm32_ipcc *ipcc = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&ipcc->controller);
+
+ if (ipcc->wkp)
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ device_init_wakeup(&pdev->dev, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void stm32_ipcc_set_irq_wake(struct device *dev, bool enable)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+ unsigned int i;
+
+ if (device_may_wakeup(dev))
+ for (i = 0; i < IPCC_IRQ_NUM; i++)
+ irq_set_irq_wake(ipcc->irqs[i], enable);
+}
+
+static int stm32_ipcc_suspend(struct device *dev)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+
+ ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+ ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
+
+ stm32_ipcc_set_irq_wake(dev, true);
+
+ return 0;
+}
+
+static int stm32_ipcc_resume(struct device *dev)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+
+ stm32_ipcc_set_irq_wake(dev, false);
+
+ writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
+ writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
+ stm32_ipcc_suspend, stm32_ipcc_resume);
+
+static const struct of_device_id stm32_ipcc_of_match[] = {
+ { .compatible = "st,stm32mp1-ipcc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
+
+static struct platform_driver stm32_ipcc_driver = {
+ .driver = {
+ .name = "stm32-ipcc",
+ .pm = &stm32_ipcc_pm_ops,
+ .of_match_table = stm32_ipcc_of_match,
+ },
+ .probe = stm32_ipcc_probe,
+ .remove = stm32_ipcc_remove,
+};
+
+module_platform_driver(stm32_ipcc_driver);
+
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STM32 IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index fe8897e64635c..7b370466a227f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -203,7 +203,7 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
if (!val)
return false;
- if (test->last_irq - pdev->irq == msi_num - 1)
+ if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
return true;
return false;
@@ -233,7 +233,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
orig_src_addr = dma_alloc_coherent(dev, size + alignment,
&orig_src_phys_addr, GFP_KERNEL);
if (!orig_src_addr) {
- dev_err(dev, "failed to allocate source buffer\n");
+ dev_err(dev, "Failed to allocate source buffer\n");
ret = false;
goto err;
}
@@ -259,7 +259,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
&orig_dst_phys_addr, GFP_KERNEL);
if (!orig_dst_addr) {
- dev_err(dev, "failed to allocate destination address\n");
+ dev_err(dev, "Failed to allocate destination address\n");
ret = false;
goto err_orig_src_addr;
}
@@ -321,7 +321,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
- dev_err(dev, "failed to allocate address\n");
+ dev_err(dev, "Failed to allocate address\n");
ret = false;
goto err;
}
@@ -382,7 +382,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
- dev_err(dev, "failed to allocate destination address\n");
+ dev_err(dev, "Failed to allocate destination address\n");
ret = false;
goto err;
}
@@ -513,31 +513,31 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!no_msi) {
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (irq < 0)
- dev_err(dev, "failed to get MSI interrupts\n");
+ dev_err(dev, "Failed to get MSI interrupts\n");
test->num_irqs = irq;
}
err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, test);
if (err) {
- dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
+ dev_err(dev, "Failed to request IRQ %d\n", pdev->irq);
goto err_disable_msi;
}
for (i = 1; i < irq; i++) {
- err = devm_request_irq(dev, pdev->irq + i,
+ err = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, test);
if (err)
dev_err(dev, "failed to request IRQ %d for MSI %d\n",
- pdev->irq + i, i + 1);
+ pci_irq_vector(pdev, i), i + 1);
}
for (bar = BAR_0; bar <= BAR_5; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
base = pci_ioremap_bar(pdev, bar);
if (!base) {
- dev_err(dev, "failed to read BAR%d\n", bar);
+ dev_err(dev, "Failed to read BAR%d\n", bar);
WARN_ON(bar == test_reg_bar);
}
test->bar[bar] = base;
@@ -557,7 +557,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
err = id;
- dev_err(dev, "unable to get id\n");
+ dev_err(dev, "Unable to get id\n");
goto err_iounmap;
}
@@ -573,7 +573,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
err = misc_register(misc_device);
if (err) {
- dev_err(dev, "failed to register device\n");
+ dev_err(dev, "Failed to register device\n");
goto err_kfree_name;
}
@@ -592,7 +592,7 @@ err_iounmap:
}
for (i = 0; i < irq; i++)
- devm_free_irq(dev, pdev->irq + i, test);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
err_disable_msi:
pci_disable_msi(pdev);
@@ -625,7 +625,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_iounmap(pdev, test->bar[bar]);
}
for (i = 0; i < test->num_irqs; i++)
- devm_free_irq(&pdev->dev, pdev->irq + i, test);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
pci_disable_msi(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -634,6 +634,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a822e70c2af35..f2af87d70594f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3386,32 +3386,6 @@ err_disable_device:
}
/*****************************************************************************/
-static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
-{
- int rc;
-
- if (numvfs > 0) {
- rc = pci_enable_sriov(dev, numvfs);
- if (rc != 0) {
- dev_err(&dev->dev,
- "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
- numvfs, rc);
- return rc;
- }
-
- return numvfs;
- }
-
- if (numvfs == 0) {
- pci_disable_sriov(dev);
- return 0;
- }
-
- return -EINVAL;
-}
-
-/*****************************************************************************/
-/*****************************************************************************/
/* ena_remove - Device Removal Routine
* @pdev: PCI device information struct
@@ -3526,7 +3500,7 @@ static struct pci_driver ena_pci_driver = {
.suspend = ena_suspend,
.resume = ena_resume,
#endif
- .sriov_configure = ena_sriov_configure,
+ .sriov_configure = pci_sriov_configure_simple,
};
static int __init ena_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c766ae23bc74f..5b1ed240bf18b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13922,8 +13922,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct bnx2x *bp;
- enum pcie_link_width pcie_width;
- enum pci_bus_speed pcie_speed;
int rc, max_non_def_sbs;
int rx_count, tx_count, rss_count, doorbell_size;
int max_cos_est;
@@ -14091,21 +14089,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
}
- if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
- pcie_speed == PCI_SPEED_UNKNOWN ||
- pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
- BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
- else
- BNX2X_DEV_INFO(
- "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
- board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width,
- pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
- pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
- pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
- "Unknown",
- dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ pcie_print_link_status(bp->pdev);
bnx2x_register_phc(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dfa0839f66561..176fc9f4d7def 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -8685,22 +8685,6 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
-static void bnxt_parse_log_pcie_link(struct bnxt *bp)
-{
- enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
- enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
-
- if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
- speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
- netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
- else
- netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
- speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
- speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
- speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
- "Unknown", width);
-}
-
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -8915,8 +8899,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
-
- bnxt_parse_log_pcie_link(bp);
+ pcie_print_link_status(pdev);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0efae2030e71b..35cb3ae4f7b67 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5066,79 +5066,6 @@ static int init_rss(struct adapter *adap)
return 0;
}
-static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
- enum pci_bus_speed *speed,
- enum pcie_link_width *width)
-{
- u32 lnkcap1, lnkcap2;
- int err1, err2;
-
-#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
-
- *speed = PCI_SPEED_UNKNOWN;
- *width = PCIE_LNK_WIDTH_UNKNOWN;
-
- err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
- &lnkcap1);
- err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
- &lnkcap2);
- if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
- *speed = PCIE_SPEED_8_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
- *speed = PCIE_SPEED_5_0GT;
- else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
- *speed = PCIE_SPEED_2_5GT;
- }
- if (!err1) {
- *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
- if (!lnkcap2) { /* pre-r3.0 */
- if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
- *speed = PCIE_SPEED_5_0GT;
- else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
- *speed = PCIE_SPEED_2_5GT;
- }
- }
-
- if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
- return err1 ? err1 : err2 ? err2 : -EINVAL;
- return 0;
-}
-
-static void cxgb4_check_pcie_caps(struct adapter *adap)
-{
- enum pcie_link_width width, width_cap;
- enum pci_bus_speed speed, speed_cap;
-
-#define PCIE_SPEED_STR(speed) \
- (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
- speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
- speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
- "Unknown")
-
- if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
- dev_warn(adap->pdev_dev,
- "Unable to determine PCIe device BW capabilities\n");
- return;
- }
-
- if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
- speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
- dev_warn(adap->pdev_dev,
- "Unable to determine PCI Express bandwidth.\n");
- return;
- }
-
- dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
- PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
- dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
- width, width_cap);
- if (speed < speed_cap || width < width_cap)
- dev_info(adap->pdev_dev,
- "A slot with more lanes and/or higher speed is "
- "suggested for optimal performance.\n");
-}
-
/* Dump basic information about the adapter */
static void print_adapter_info(struct adapter *adapter)
{
@@ -5798,7 +5725,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* check for PCI Express bandwidth capabiltites */
- cxgb4_check_pcie_caps(adapter);
+ pcie_print_link_status(pdev);
err = init_rss(adapter);
if (err)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 38b4e48994904..4929f72655985 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -245,9 +245,6 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
int expected_gts)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_gts = 0;
- enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
- enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
struct pci_dev *pdev;
/* Some devices are not connected over PCIe and thus do not negotiate
@@ -263,49 +260,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
else
pdev = adapter->pdev;
- if (pcie_get_minimum_link(pdev, &speed, &width) ||
- speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
- e_dev_warn("Unable to determine PCI Express bandwidth.\n");
- return;
- }
-
- switch (speed) {
- case PCIE_SPEED_2_5GT:
- /* 8b/10b encoding reduces max throughput by 20% */
- max_gts = 2 * width;
- break;
- case PCIE_SPEED_5_0GT:
- /* 8b/10b encoding reduces max throughput by 20% */
- max_gts = 4 * width;
- break;
- case PCIE_SPEED_8_0GT:
- /* 128b/130b encoding reduces throughput by less than 2% */
- max_gts = 8 * width;
- break;
- default:
- e_dev_warn("Unable to determine PCI Express bandwidth.\n");
- return;
- }
-
- e_dev_info("PCI Express bandwidth of %dGT/s available\n",
- max_gts);
- e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
- (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
- speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
- speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
- "Unknown"),
- width,
- (speed == PCIE_SPEED_2_5GT ? "20%" :
- speed == PCIE_SPEED_5_0GT ? "20%" :
- speed == PCIE_SPEED_8_0GT ? "<2%" :
- "Unknown"));
-
- if (max_gts < expected_gts) {
- e_dev_warn("This is not sufficient for optimal performance of this card.\n");
- e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
- expected_gts);
- e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
- }
+ pcie_print_link_status(pdev);
}
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6cab1dd66d1b7..f63dfbcd29fea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2104,21 +2104,18 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
struct mlx5_vport *vport = &esw->vports[vport_idx];
u64 rx_discard_vport_down, tx_discard_vport_down;
u64 bytes = 0;
- u16 idx = 0;
int err = 0;
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
return 0;
- if (vport->egress.drop_counter) {
- idx = vport->egress.drop_counter->id;
- mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
- }
+ if (vport->egress.drop_counter)
+ mlx5_fc_query(dev, vport->egress.drop_counter,
+ &stats->rx_dropped, &bytes);
- if (vport->ingress.drop_counter) {
- idx = vport->ingress.drop_counter->id;
- mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
- }
+ if (vport->ingress.drop_counter)
+ mlx5_fc_query(dev, vport->ingress.drop_counter,
+ &stats->tx_dropped, &bytes);
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e1b609c61d593..49a75d31185ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -325,7 +325,8 @@ static bool check_valid_mask(u8 match_criteria_enable, const u32 *match_criteria
if (match_criteria_enable & ~(
(1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) |
(1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) |
- (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS)))
+ (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) |
+ (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2)))
return false;
if (!(match_criteria_enable &
@@ -361,6 +362,17 @@ static bool check_valid_mask(u8 match_criteria_enable, const u32 *match_criteria
return false;
}
+ if (!(match_criteria_enable &
+ 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2)) {
+ char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
+ match_criteria, misc_parameters_2);
+
+ if (fg_type_mask[0] ||
+ memcmp(fg_type_mask, fg_type_mask + 1,
+ MLX5_ST_SZ_BYTES(fte_match_set_misc2) - 1))
+ return false;
+ }
+
return check_last_reserved(match_criteria);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index e26d3e9d5f9f9..32070e5d993d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -159,7 +159,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
-#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_600
+#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_800
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
@@ -233,8 +233,6 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
-int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
- u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index b7ab929d5f8e1..58af6be13dfa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -243,6 +243,7 @@ err_out:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(mlx5_fc_create);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
@@ -260,6 +261,7 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
@@ -312,11 +314,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
-int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes)
{
- return mlx5_cmd_fc_query(dev, id, packets, bytes);
+ return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
}
+EXPORT_SYMBOL(mlx5_fc_query);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e526437bacbf7..d234de5505ea2 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2630,24 +2630,6 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_put_ctrl(&dev->ctrl);
}
-static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
- int ret = 0;
-
- if (numvfs == 0) {
- if (pci_vfs_assigned(pdev)) {
- dev_warn(&pdev->dev,
- "Cannot disable SR-IOV VFs while assigned\n");
- return -EPERM;
- }
- pci_disable_sriov(pdev);
- return 0;
- }
-
- ret = pci_enable_sriov(pdev, numvfs);
- return ret ? ret : numvfs;
-}
-
#ifdef CONFIG_PM_SLEEP
static int nvme_suspend(struct device *dev)
{
@@ -2774,7 +2756,7 @@ static struct pci_driver nvme_driver = {
.driver = {
.pm = &nvme_dev_pm_ops,
},
- .sriov_configure = nvme_pci_sriov_configure,
+ .sriov_configure = pci_sriov_configure_simple,
.err_handler = &nvme_err_handler,
};
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index f9d5480a4ae53..27d9b4bba535c 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -177,7 +177,6 @@ int of_node_to_nid(struct device_node *device)
return NUMA_NO_NODE;
}
-EXPORT_SYMBOL(of_node_to_nid);
int __init of_numa_init(void)
{
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index c00d81dfac0b8..0b49a62b38a38 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -32,6 +32,11 @@ const struct of_device_id of_default_bus_match_table[] = {
{} /* Empty terminated list */
};
+static const struct of_device_id of_skipped_node_table[] = {
+ { .compatible = "operating-points-v2", },
+ {} /* Empty terminated list */
+};
+
static int of_dev_node_match(struct device *dev, void *data)
{
return dev->of_node == data;
@@ -356,6 +361,12 @@ static int of_platform_bus_create(struct device_node *bus,
return 0;
}
+ /* Skip nodes for which we don't want to create devices */
+ if (unlikely(of_match_node(of_skipped_node_table, bus))) {
+ pr_debug("%s() - skipping %pOF node\n", __func__, bus);
+ return 0;
+ }
+
if (of_node_check_flag(bus, OF_POPULATED_BUS)) {
pr_debug("%s() - skipping %pOF, already populated\n",
__func__, bus);
@@ -537,6 +548,9 @@ int of_platform_device_destroy(struct device *dev, void *data)
if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS))
device_for_each_child(dev, NULL, of_platform_device_destroy);
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+ of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
+
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
@@ -544,8 +558,6 @@ int of_platform_device_destroy(struct device *dev, void *data)
amba_device_unregister(to_amba_device(dev));
#endif
- of_node_clear_flag(dev->of_node, OF_POPULATED);
- of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
}
EXPORT_SYMBOL_GPL(of_platform_device_destroy);
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 65d0b7adfcd4b..7edfac6f19144 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -122,6 +122,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
goto err_fail;
}
+ if (offset < 0 || offset + sizeof(__be32) > prop->length) {
+ err = -EINVAL;
+ goto err_fail;
+ }
+
*(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
}
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 6bb37c18292a8..ecee50d10d149 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -165,20 +165,20 @@ static void __init of_unittest_dynamic(void)
/* Add a new property - should pass*/
prop->name = "new-property";
prop->value = "new-property-data";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
/* Try to add an existing property - should fail */
prop++;
prop->name = "new-property";
prop->value = "new-property-data-should-fail";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) != 0,
"Adding an existing property should have failed\n");
/* Try to modify an existing property - should pass */
prop->value = "modify-property-data-should-pass";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating an existing property should have passed\n");
@@ -186,7 +186,7 @@ static void __init of_unittest_dynamic(void)
prop++;
prop->name = "modify-property";
prop->value = "modify-missing-property-data-should-pass";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating a missing property should have passed\n");
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 29a487f31daea..b2f07635e94d4 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -67,6 +67,18 @@ config PCI_STUB
When in doubt, say N.
+config PCI_PF_STUB
+ tristate "PCI PF Stub driver"
+ depends on PCI
+ depends on PCI_IOV
+ help
+ Say Y or M here if you want to enable support for devices that
+ require SR-IOV support, while at the same time the PF itself is
+ not providing any actual services on the host itself such as
+ storage or networking.
+
+ When in doubt, say N.
+
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
depends on PCI && X86 && XEN
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 952addc7bacfd..84c9eef6b1c3e 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PCI_LABEL) += pci-label.o
obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
+obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 89305b569d3d8..4923a2a8e14b5 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -20,6 +20,9 @@ void pci_ats_init(struct pci_dev *dev)
{
int pos;
+ if (pci_ats_disabled())
+ return;
+
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
if (!pos)
return;
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index 2f3f5c50aa487..16f52c626b4bd 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
menu "DesignWare PCI Core Support"
+ depends on PCI
config PCIE_DW
bool
config PCIE_DW_HOST
bool
- depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
@@ -22,7 +22,7 @@ config PCI_DRA7XX
config PCI_DRA7XX_HOST
bool "TI DRA7xx PCIe controller Host Mode"
depends on SOC_DRA7XX || COMPILE_TEST
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
depends on OF && HAS_IOMEM && TI_PIPE3
select PCIE_DW_HOST
select PCI_DRA7XX
@@ -51,50 +51,62 @@ config PCI_DRA7XX_EP
This uses the DesignWare core.
config PCIE_DW_PLAT
- bool "Platform bus based DesignWare PCIe Controller"
- depends on PCI
- depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_DW_HOST
- ---help---
- This selects the DesignWare PCIe controller support. Select this if
- you have a PCIe controller on Platform bus.
+ bool
- If you have a controller with this interface, say Y or M here.
+config PCIE_DW_PLAT_HOST
+ bool "Platform bus based DesignWare PCIe Controller - Host mode"
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW_HOST
+ select PCIE_DW_PLAT
+ default y
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in host mode. There are two instances of PCIe controller in
+ Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
- If unsure, say N.
+config PCIE_DW_PLAT_EP
+ bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
+ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_DW_PLAT
+ help
+ Enables support for the PCIe controller in the Designware IP to
+ work in endpoint mode. There are two instances of PCIe controller
+ in Designware IP.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCIE_DW_PLAT_HOST must be selected and in
+ order to enable device-specific features PCI_DW_PLAT_EP must be
+ selected.
config PCI_EXYNOS
bool "Samsung Exynos PCIe controller"
- depends on PCI
- depends on SOC_EXYNOS5440
+ depends on SOC_EXYNOS5440 || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
config PCI_IMX6
bool "Freescale i.MX6 PCIe controller"
- depends on PCI
- depends on SOC_IMX6Q
+ depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
- depends on PCI
- depends on ARCH_SPEAR13XX
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want PCIe support on SPEAr13XX SoCs.
config PCI_KEYSTONE
bool "TI Keystone PCIe controller"
- depends on PCI
- depends on ARCH_KEYSTONE
+ depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want to enable PCI controller support on Keystone
@@ -104,8 +116,7 @@ config PCI_KEYSTONE
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
- depends on PCI
- depends on OF && (ARM || ARCH_LAYERSCAPE)
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
select PCIE_DW_HOST
@@ -113,11 +124,9 @@ config PCI_LAYERSCAPE
Say Y here if you want PCIe controller support on Layerscape SoCs.
config PCI_HISI
- depends on OF && ARM64
+ depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
- depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
select PCI_HOST_COMMON
help
@@ -126,10 +135,8 @@ config PCI_HISI
config PCIE_QCOM
bool "Qualcomm PCIe controller"
- depends on PCI
- depends on ARCH_QCOM && OF
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
@@ -138,10 +145,8 @@ config PCIE_QCOM
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
- depends on PCI
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want to enable PCIe controller support on
@@ -154,9 +159,8 @@ config PCIE_ARTPEC6
config PCIE_ARTPEC6_HOST
bool "Axis ARTPEC-6 PCIe controller Host Mode"
- depends on MACH_ARTPEC6
- depends on PCI && PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
+ depends on MACH_ARTPEC6 || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCIE_ARTPEC6
help
@@ -165,7 +169,7 @@ config PCIE_ARTPEC6_HOST
config PCIE_ARTPEC6_EP
bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
- depends on MACH_ARTPEC6
+ depends on MACH_ARTPEC6 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCIE_ARTPEC6
@@ -174,11 +178,9 @@ config PCIE_ARTPEC6_EP
endpoint mode. This uses the DesignWare core.
config PCIE_KIRIN
- depends on OF && ARM64
+ depends on OF && (ARM64 || COMPILE_TEST)
bool "HiSilicon Kirin series SoCs PCIe controllers"
depends on PCI_MSI_IRQ_DOMAIN
- depends on PCI
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support
@@ -186,10 +188,8 @@ config PCIE_KIRIN
config PCIE_HISI_STB
bool "HiSilicon STB SoCs PCIe controllers"
- depends on ARCH_HISI
- depends on PCI
+ depends on ARCH_HISI || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
- select PCIEPORTBUS
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index ed8558d638e5c..f688204e50c5f 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -27,6 +27,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "../pci.h"
#include "pcie-designware.h"
/* PCIe controller wrapper DRA7XX configuration registers */
@@ -406,14 +407,14 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
ep->ops = &pcie_ep_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
- pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base)
- return -ENOMEM;
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
- pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base2)
- return -ENOMEM;
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
@@ -459,9 +460,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
- pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base)
- return -ENOMEM;
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
pp->ops = &dra7xx_pcie_host_ops;
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 4818ef875f8ab..80f6046027834 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -338,7 +338,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
- case IMX6QP: /* FALLTHROUGH */
+ case IMX6QP: /* FALLTHROUGH */
case IMX6Q:
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index d55ae0716adf3..3722a5f31e5e6 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -89,7 +89,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pci)) {
- dev_err(dev, "Link already up\n");
+ dev_info(dev, "Link already up\n");
return 0;
}
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index b587352f8b9fa..072fd7ecc29f2 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -28,6 +28,7 @@
struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
+ struct clk *clk_reg;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -229,26 +230,38 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ pcie->clk_reg = devm_clk_get(dev, "reg");
+ if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto fail;
+ }
+ if (!IS_ERR(pcie->clk_reg)) {
+ ret = clk_prepare_enable(pcie->clk_reg);
+ if (ret)
+ goto fail_clkreg;
+ }
+
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "couldn't remap regs base %p\n", base);
ret = PTR_ERR(pci->dbi_base);
- goto fail;
+ goto fail_clkreg;
}
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
- goto fail;
+ goto fail_clkreg;
return 0;
+fail_clkreg:
+ clk_disable_unprepare(pcie->clk_reg);
fail:
- if (!IS_ERR(pcie->clk))
- clk_disable_unprepare(pcie->clk);
+ clk_disable_unprepare(pcie->clk);
return ret;
}
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index e66cede2b5b7d..321b56cfd5d0a 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -463,9 +463,9 @@ static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
ep->ops = &pcie_ep_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
- if (!pci->dbi_base2)
- return -ENOMEM;
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
index f07678bf7cfc8..1eec4415a77f0 100644
--- a/drivers/pci/dwc/pcie-designware-ep.c
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -75,7 +75,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
if (free_win >= ep->num_ib_windows) {
- dev_err(pci->dev, "no free inbound window\n");
+ dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
@@ -100,7 +100,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
if (free_win >= ep->num_ob_windows) {
- dev_err(pci->dev, "no free outbound window\n");
+ dev_err(pci->dev, "No free outbound window\n");
return -EINVAL;
}
@@ -204,7 +204,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
if (ret) {
- dev_err(pci->dev, "failed to enable address\n");
+ dev_err(pci->dev, "Failed to enable address\n");
return ret;
}
@@ -348,21 +348,21 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {
- dev_err(dev, "unable to read *num-ib-windows* property\n");
+ dev_err(dev, "Unable to read *num-ib-windows* property\n");
return ret;
}
if (ep->num_ib_windows > MAX_IATU_IN) {
- dev_err(dev, "invalid *num-ib-windows*\n");
+ dev_err(dev, "Invalid *num-ib-windows*\n");
return -EINVAL;
}
ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
if (ret < 0) {
- dev_err(dev, "unable to read *num-ob-windows* property\n");
+ dev_err(dev, "Unable to read *num-ob-windows* property\n");
return ret;
}
if (ep->num_ob_windows > MAX_IATU_OUT) {
- dev_err(dev, "invalid *num-ob-windows*\n");
+ dev_err(dev, "Invalid *num-ob-windows*\n");
return -EINVAL;
}
@@ -389,7 +389,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
- dev_err(dev, "failed to create epc device\n");
+ dev_err(dev, "Failed to create epc device\n");
return PTR_ERR(epc);
}
@@ -411,6 +411,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
}
+ epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
+ EPC_FEATURE_SET_BAR(epc->features, BAR_0);
+
ep->epc = epc;
epc_set_drvdata(epc, ep);
dw_pcie_setup(pci);
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 6c409079d5143..cba1432e395da 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -15,6 +15,7 @@
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
+#include "../pci.h"
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
@@ -83,18 +84,23 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
- &val);
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, &val);
if (!val)
continue;
ret = IRQ_HANDLED;
pos = 0;
- while ((pos = find_next_bit((unsigned long *) &val, 32,
- pos)) != 32) {
- irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
+ while ((pos = find_next_bit((unsigned long *) &val,
+ MAX_MSI_IRQS_PER_CTRL,
+ pos)) != MAX_MSI_IRQS_PER_CTRL) {
+ irq = irq_find_mapping(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
generic_handle_irq(irq);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE),
4, 1 << pos);
pos++;
}
@@ -157,9 +163,9 @@ static void dw_pci_bottom_mask(struct irq_data *data)
if (pp->ops->msi_clear_irq) {
pp->ops->msi_clear_irq(pp, data->hwirq);
} else {
- ctrl = data->hwirq / 32;
- res = ctrl * 12;
- bit = data->hwirq % 32;
+ ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] &= ~(1 << bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -180,9 +186,9 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
if (pp->ops->msi_set_irq) {
pp->ops->msi_set_irq(pp, data->hwirq);
} else {
- ctrl = data->hwirq / 32;
- res = ctrl * 12;
- bit = data->hwirq % 32;
+ ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
@@ -248,8 +254,10 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
+
bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
order_base_2(nr_irqs));
+
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -266,7 +274,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
if (!pp->irq_domain) {
- dev_err(pci->dev, "failed to create IRQ domain\n");
+ dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -274,7 +282,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
&dw_pcie_msi_domain_info,
pp->irq_domain);
if (!pp->msi_domain) {
- dev_err(pci->dev, "failed to create MSI domain\n");
+ dev_err(pci->dev, "Failed to create MSI domain\n");
irq_domain_remove(pp->irq_domain);
return -ENOMEM;
}
@@ -301,13 +309,13 @@ void dw_pcie_msi_init(struct pcie_port *pp)
page = alloc_page(GFP_KERNEL);
pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, pp->msi_data)) {
- dev_err(dev, "failed to map MSI data\n");
+ dev_err(dev, "Failed to map MSI data\n");
__free_page(page);
return;
}
msi_target = (u64)pp->msi_data;
- /* program the msi_data */
+ /* Program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
lower_32_bits(msi_target));
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
@@ -330,19 +338,19 @@ int dw_pcie_host_init(struct pcie_port *pp)
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res) / 2;
- pp->cfg1_size = resource_size(cfg_res) / 2;
+ pp->cfg0_size = resource_size(cfg_res) >> 1;
+ pp->cfg1_size = resource_size(cfg_res) >> 1;
pp->cfg0_base = cfg_res->start;
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
} else if (!pp->va_cfg0_base) {
- dev_err(dev, "missing *config* reg space\n");
+ dev_err(dev, "Missing *config* reg space\n");
}
bridge = pci_alloc_host_bridge(0);
if (!bridge)
return -ENOMEM;
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&bridge->windows, &pp->io_base);
if (ret)
return ret;
@@ -357,7 +365,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
case IORESOURCE_IO:
ret = pci_remap_iospace(win->res, pp->io_base);
if (ret) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
+ dev_warn(dev, "Error %d: failed to map resource %pR\n",
ret, win->res);
resource_list_destroy_entry(win);
} else {
@@ -375,8 +383,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
break;
case 0:
pp->cfg = win->res;
- pp->cfg0_size = resource_size(pp->cfg) / 2;
- pp->cfg1_size = resource_size(pp->cfg) / 2;
+ pp->cfg0_size = resource_size(pp->cfg) >> 1;
+ pp->cfg1_size = resource_size(pp->cfg) >> 1;
pp->cfg0_base = pp->cfg->start;
pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
break;
@@ -391,7 +399,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg->start,
resource_size(pp->cfg));
if (!pci->dbi_base) {
- dev_err(dev, "error with ioremap\n");
+ dev_err(dev, "Error with ioremap\n");
ret = -ENOMEM;
goto error;
}
@@ -403,7 +411,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
- dev_err(dev, "error with ioremap in function\n");
+ dev_err(dev, "Error with ioremap in function\n");
ret = -ENOMEM;
goto error;
}
@@ -414,7 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_base,
pp->cfg1_size);
if (!pp->va_cfg1_base) {
- dev_err(dev, "error with ioremap\n");
+ dev_err(dev, "Error with ioremap\n");
ret = -ENOMEM;
goto error;
}
@@ -586,7 +594,7 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
return 0;
}
- /* access only one slot on each root port */
+ /* Access only one slot on each root port */
if (bus->number == pp->root_bus_nr && dev > 0)
return 0;
@@ -650,13 +658,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
- &pp->irq_status[ctrl]);
- /* setup RC BARs */
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ 4, &pp->irq_status[ctrl]);
+
+ /* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
- /* setup interrupt pins */
+ /* Setup interrupt pins */
dw_pcie_dbi_ro_wr_en(pci);
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
@@ -664,13 +674,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
dw_pcie_dbi_ro_wr_dis(pci);
- /* setup bus numbers */
+ /* Setup bus numbers */
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00ff0100;
dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
- /* setup command register */
+ /* Setup command register */
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
@@ -683,7 +693,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
- /* get iATU unroll support */
+ /* Get iATU unroll support */
pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
dev_dbg(pci->dev, "iATU unroll: %s\n",
pci->iatu_unroll_enabled ? "enabled" : "disabled");
@@ -701,7 +711,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Enable write permission for the DBI read-only register */
dw_pcie_dbi_ro_wr_en(pci);
- /* program correct class for RC */
+ /* Program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
/* Better disable write permission right after the update */
dw_pcie_dbi_ro_wr_dis(pci);
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 5416aa8a07a5e..5937fed4c938e 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -12,19 +12,29 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
+#include <linux/regmap.h>
#include "pcie-designware.h"
struct dw_plat_pcie {
- struct dw_pcie *pci;
+ struct dw_pcie *pci;
+ struct regmap *regmap;
+ enum dw_pcie_device_mode mode;
};
+struct dw_plat_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[];
+
static int dw_plat_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -38,13 +48,63 @@ static int dw_plat_pcie_host_init(struct pcie_port *pp)
return 0;
}
+static void dw_plat_set_num_vectors(struct pcie_port *pp)
+{
+ pp->num_vectors = MAX_MSI_IRQS;
+}
+
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
.host_init = dw_plat_pcie_host_init,
+ .set_num_vectors = dw_plat_set_num_vectors,
+};
+
+static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = dw_plat_pcie_establish_link,
};
-static int dw_plat_add_pcie_port(struct pcie_port *pp,
+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u8 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ return -EINVAL;
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dw_plat_pcie_ep_init,
+ .raise_irq = dw_plat_pcie_ep_raise_irq,
+};
+
+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
struct platform_device *pdev)
{
+ struct dw_pcie *pci = dw_plat_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -63,15 +123,44 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(dev, "failed to initialize host\n");
+ dev_err(dev, "Failed to initialize host\n");
return ret;
}
return 0;
}
-static const struct dw_pcie_ops dw_pcie_ops = {
-};
+static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = dw_plat_pcie->pci;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize endpoint\n");
+ return ret;
+ }
+ return 0;
+}
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
@@ -80,6 +169,16 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct resource *res; /* Resource from DT */
int ret;
+ const struct of_device_id *match;
+ const struct dw_plat_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+
+ match = of_match_device(dw_plat_pcie_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct dw_plat_pcie_of_data *)match->data;
+ mode = (enum dw_pcie_device_mode)data->mode;
dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
if (!dw_plat_pcie)
@@ -93,23 +192,59 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
+ dw_plat_pcie->mode = mode;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pci->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
platform_set_drvdata(pdev, dw_plat_pcie);
- ret = dw_plat_add_pcie_port(&pci->pp, pdev);
- if (ret < 0)
- return ret;
+ switch (dw_plat_pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
+ return -ENODEV;
+
+ ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
+ return -ENODEV;
+
+ ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ }
return 0;
}
+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id dw_plat_pcie_of_match[] = {
- { .compatible = "snps,dw-pcie", },
+ {
+ .compatible = "snps,dw-pcie",
+ .data = &dw_plat_pcie_rc_of_data,
+ },
+ {
+ .compatible = "snps,dw-pcie-ep",
+ .data = &dw_plat_pcie_ep_of_data,
+ },
{},
};
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 1b7282e5b4946..778c4f76a8843 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -69,7 +69,7 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
ret = dw_pcie_read(base + reg, size, &val);
if (ret)
- dev_err(pci->dev, "read DBI address failed\n");
+ dev_err(pci->dev, "Read DBI address failed\n");
return val;
}
@@ -86,7 +86,7 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
ret = dw_pcie_write(base + reg, size, val);
if (ret)
- dev_err(pci->dev, "write DBI address failed\n");
+ dev_err(pci->dev, "Write DBI address failed\n");
}
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -137,7 +137,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "outbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
@@ -180,7 +180,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "outbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
@@ -238,7 +238,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "inbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
return -EBUSY;
}
@@ -284,7 +284,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "inbound iATU is not being enabled\n");
+ dev_err(pci->dev, "Inbound iATU is not being enabled\n");
return -EBUSY;
}
@@ -313,16 +313,16 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
int retries;
- /* check if the link is up or not */
+ /* Check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "link up\n");
+ dev_info(pci->dev, "Link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(pci->dev, "phy link never came up\n");
+ dev_err(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
@@ -351,7 +351,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
if (ret)
lanes = 0;
- /* set the number of lanes */
+ /* Set the number of lanes */
val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
switch (lanes) {
@@ -373,7 +373,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
}
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
- /* set link width speed control register */
+ /* Set link width speed control register */
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (lanes) {
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index fe811dbc12cf7..bee4e2535a612 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -110,6 +110,7 @@
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE 12
#define MSI_DEF_NUM_VECTORS 32
/* Maximum number of inbound/outbound iATUs */
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5897af7d3355a..a1d0198081a64 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -10,7 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -19,6 +19,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
@@ -869,7 +870,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= !BIT(0);
+ val &= ~BIT(0);
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
/* change DBI base address */
@@ -1088,6 +1089,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
int ret;
+ pm_runtime_get_sync(pci->dev);
qcom_ep_reset_assert(pcie);
ret = pcie->ops->init(pcie);
@@ -1124,6 +1126,7 @@ err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
pcie->ops->deinit(pcie);
+ pm_runtime_put(pci->dev);
return ret;
}
@@ -1212,6 +1215,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ pm_runtime_enable(dev);
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1257,14 +1261,17 @@ static int qcom_pcie_probe(struct platform_device *pdev)
}
ret = phy_init(pcie->phy);
- if (ret)
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
return ret;
+ }
platform_set_drvdata(pdev, pcie);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
+ pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 7cef851243256..63ed706445b9f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -87,7 +87,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
if (!src_addr) {
- dev_err(dev, "failed to allocate source address\n");
+ dev_err(dev, "Failed to allocate source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -96,14 +96,14 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map source address\n");
+ dev_err(dev, "Failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
goto err_src_addr;
}
dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
if (!dst_addr) {
- dev_err(dev, "failed to allocate destination address\n");
+ dev_err(dev, "Failed to allocate destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
ret = -ENOMEM;
goto err_src_map_addr;
@@ -112,7 +112,7 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map destination address\n");
+ dev_err(dev, "Failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
goto err_dst_addr;
}
@@ -149,7 +149,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
if (!src_addr) {
- dev_err(dev, "failed to allocate address\n");
+ dev_err(dev, "Failed to allocate address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -158,7 +158,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map address\n");
+ dev_err(dev, "Failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
goto err_addr;
}
@@ -201,7 +201,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
if (!dst_addr) {
- dev_err(dev, "failed to allocate address\n");
+ dev_err(dev, "Failed to allocate address\n");
reg->status = STATUS_DST_ADDR_INVALID;
ret = -ENOMEM;
goto err;
@@ -210,7 +210,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
reg->size);
if (ret) {
- dev_err(dev, "failed to map address\n");
+ dev_err(dev, "Failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
goto err_addr;
}
@@ -230,7 +230,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
* wait 1ms inorder for the write to complete. Without this delay L3
* error in observed in the host system.
*/
- mdelay(1);
+ usleep_range(1000, 2000);
kfree(buf);
@@ -379,7 +379,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar);
- dev_err(dev, "failed to set BAR%d\n", bar);
+ dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
}
@@ -406,7 +406,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
test_reg_bar);
if (!base) {
- dev_err(dev, "failed to allocated register space\n");
+ dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
}
epf_test->reg[test_reg_bar] = base;
@@ -416,7 +416,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
continue;
base = pci_epf_alloc_space(epf, bar_size[bar], bar);
if (!base)
- dev_err(dev, "failed to allocate space for BAR%d\n",
+ dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
epf_test->reg[bar] = base;
}
@@ -435,9 +435,16 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
+ if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
+ epf_test->linkup_notifier = false;
+ else
+ epf_test->linkup_notifier = true;
+
+ epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
+
ret = pci_epc_write_header(epc, epf->func_no, header);
if (ret) {
- dev_err(dev, "configuration header write failed\n");
+ dev_err(dev, "Configuration header write failed\n");
return ret;
}
@@ -519,7 +526,7 @@ static int __init pci_epf_test_init(void)
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
ret = pci_epf_register_driver(&test_driver);
if (ret) {
- pr_err("failed to register pci epf test driver --> %d\n", ret);
+ pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 465b5f058b6df..523a8cab3bfba 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -15,6 +15,8 @@
#include <linux/pci-epf.h>
#include <linux/pci-ep-cfs.h>
+static DEFINE_MUTEX(pci_epf_mutex);
+
static struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
@@ -143,7 +145,13 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
*/
void pci_epf_unregister_driver(struct pci_epf_driver *driver)
{
- pci_ep_cfs_remove_epf_group(driver->group);
+ struct config_group *group;
+
+ mutex_lock(&pci_epf_mutex);
+ list_for_each_entry(group, &driver->epf_group, group_entry)
+ pci_ep_cfs_remove_epf_group(group);
+ list_del(&driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
@@ -159,6 +167,8 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner)
{
int ret;
+ struct config_group *group;
+ const struct pci_epf_device_id *id;
if (!driver->ops)
return -EINVAL;
@@ -173,7 +183,16 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (ret)
return ret;
- driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
+ INIT_LIST_HEAD(&driver->epf_group);
+
+ id = driver->id_table;
+ while (id->name[0]) {
+ group = pci_ep_cfs_add_epf_group(id->name);
+ mutex_lock(&pci_epf_mutex);
+ list_add_tail(&group->group_entry, &driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+ id++;
+ }
return 0;
}
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 0d0177ce436cc..a96e23bda6640 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -5,13 +5,14 @@ menu "PCI host controller drivers"
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
- depends on ARCH_MVEBU || ARCH_DOVE
+ depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
+ depends on MVEBU_MBUS
depends on ARM
depends on OF
config PCI_AARDVARK
bool "Aardvark PCIe controller"
- depends on ARCH_MVEBU && ARM64
+ depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
help
@@ -21,7 +22,7 @@ config PCI_AARDVARK
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
- depends on ARCH_ZYNQMP
+ depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say 'Y' here if you want kernel support for Xilinx
@@ -32,12 +33,11 @@ config PCIE_XILINX_NWL
config PCI_FTPCI100
bool "Faraday Technology FTPCI100 PCI controller"
depends on OF
- depends on ARM
default ARCH_GEMINI
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want support for the PCIe host controller found
@@ -45,8 +45,8 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARM
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARM
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
@@ -54,7 +54,7 @@ config PCI_RCAR_GEN2
config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
- depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe controller support on R-Car SoCs.
@@ -65,25 +65,25 @@ config PCI_HOST_COMMON
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
- depends on (ARM || ARM64) && OF
+ depends on OF
select PCI_HOST_COMMON
select IRQ_DOMAIN
+ select PCI_DOMAINS
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
- depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC)
+ depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
config PCI_XGENE
bool "X-Gene PCIe controller"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
- select PCIEPORTBUS
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
There are 5 internal PCIe ports available. Each port is GEN3 capable
@@ -101,7 +101,7 @@ config PCI_XGENE_MSI
config PCI_V3_SEMI
bool "V3 Semiconductor PCI controller"
depends on OF
- depends on ARM
+ depends on ARM || COMPILE_TEST
default ARCH_INTEGRATOR_AP
config PCI_VERSATILE
@@ -147,8 +147,7 @@ config PCIE_IPROC_MSI
config PCIE_ALTERA
bool "Altera PCIe controller"
- depends on ARM || NIOS2
- depends on OF_PCI
+ depends on ARM || NIOS2 || COMPILE_TEST
select PCI_DOMAINS
help
Say Y here if you want to enable PCIe controller support on Altera
@@ -164,7 +163,7 @@ config PCIE_ALTERA_MSI
config PCI_HOST_THUNDER_PEM
bool "Cavium Thunder PCIe controller to off-chip devices"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
@@ -172,29 +171,45 @@ config PCI_HOST_THUNDER_PEM
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
config PCIE_ROCKCHIP
- tristate "Rockchip PCIe controller"
+ bool
+ depends on PCI
+
+config PCIE_ROCKCHIP_HOST
+ tristate "Rockchip PCIe host controller"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
+ select PCIE_ROCKCHIP
help
Say Y here if you want internal PCI support on Rockchip SoC.
There is 1 internal PCIe port available to support GEN2 with
4 slots.
+config PCIE_ROCKCHIP_EP
+ bool "Rockchip PCIe endpoint controller"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select MFD_SYSCON
+ select PCIE_ROCKCHIP
+ help
+ Say Y here if you want to support Rockchip PCIe controller in
+ endpoint mode on Rockchip SoC. There is 1 internal PCIe port
+ available to support GEN2 with 4 slots.
+
config PCIE_MEDIATEK
bool "MediaTek PCIe controller"
- depends on (ARM || ARM64) && (ARCH_MEDIATEK || COMPILE_TEST)
+ depends on ARCH_MEDIATEK || COMPILE_TEST
depends on OF
- depends on PCI
- select PCIEPORTBUS
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 3b10591908675..11d21b026d373 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 9abf549631b4d..d3172d5d3d352 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -19,6 +19,8 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include "../pci.h"
+
/* PCIe core registers */
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
@@ -822,14 +824,13 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct resource_entry *win, *tmp;
resource_size_t iobase;
INIT_LIST_HEAD(&pcie->resources);
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
- &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, &iobase);
if (err)
return err;
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 5008fd87956a9..a1ebe9ed441f0 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -28,6 +28,8 @@
#include <linux/irq.h>
#include <linux/clk.h>
+#include "../pci.h"
+
/*
* Special configuration registers directly in the first few words
* in I/O space.
@@ -476,8 +478,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
- &res, &io_base);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &res, &io_base);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 5d028f53fdcdb..d8f10451f2730 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -101,5 +101,18 @@ int pci_host_common_probe(struct platform_device *pdev,
return ret;
}
+ platform_set_drvdata(pdev, bridge->bus);
+ return 0;
+}
+
+int pci_host_common_remove(struct platform_device *pdev)
+{
+ struct pci_bus *bus = platform_get_drvdata(pdev);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+ pci_unlock_rescan_remove();
+
return 0;
}
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 45319ee3b484b..dea3ec7592a23 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -95,5 +95,6 @@ static struct platform_driver gen_pci_driver = {
.suppress_bind_attrs = true,
},
.probe = gen_pci_probe,
+ .remove = pci_host_common_remove,
};
builtin_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 50cdefe3f6d3c..6cc5036ac83cf 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -433,7 +433,7 @@ enum hv_pcibus_state {
struct hv_pcibus_device {
struct pci_sysdata sysdata;
enum hv_pcibus_state state;
- atomic_t remove_lock;
+ refcount_t remove_lock;
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@@ -488,17 +488,6 @@ enum hv_pcichild_state {
hv_pcichild_maximum
};
-enum hv_pcidev_ref_reason {
- hv_pcidev_ref_invalid = 0,
- hv_pcidev_ref_initial,
- hv_pcidev_ref_by_slot,
- hv_pcidev_ref_packet,
- hv_pcidev_ref_pnp,
- hv_pcidev_ref_childlist,
- hv_pcidev_irqdata,
- hv_pcidev_ref_max
-};
-
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
@@ -548,14 +537,41 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
u32 wslot);
-static void get_pcichild(struct hv_pci_dev *hv_pcidev,
- enum hv_pcidev_ref_reason reason);
-static void put_pcichild(struct hv_pci_dev *hv_pcidev,
- enum hv_pcidev_ref_reason reason);
+
+static void get_pcichild(struct hv_pci_dev *hpdev)
+{
+ refcount_inc(&hpdev->refs);
+}
+
+static void put_pcichild(struct hv_pci_dev *hpdev)
+{
+ if (refcount_dec_and_test(&hpdev->refs))
+ kfree(hpdev);
+}
static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+/*
+ * There is no good way to get notified from vmbus_onoffer_rescind(),
+ * so let's use polling here, since this is not a hot path.
+ */
+static int wait_for_response(struct hv_device *hdev,
+ struct completion *comp)
+{
+ while (true) {
+ if (hdev->channel->rescind) {
+ dev_warn_once(&hdev->device, "The device is gone.\n");
+ return -ENODEV;
+ }
+
+ if (wait_for_completion_timeout(comp, HZ / 10))
+ break;
+ }
+
+ return 0;
+}
+
/**
* devfn_to_wslot() - Convert from Linux PCI slot to Windows
* @devfn: The Linux representation of PCI slot
@@ -762,7 +778,7 @@ static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
_hv_pcifront_read_config(hpdev, where, size, val);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return PCIBIOS_SUCCESSFUL;
}
@@ -790,7 +806,7 @@ static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
_hv_pcifront_write_config(hpdev, where, size, val);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return PCIBIOS_SUCCESSFUL;
}
@@ -856,7 +872,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
}
hv_int_desc_free(hpdev, int_desc);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
}
static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
@@ -1186,13 +1202,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_lo = comp.int_desc.address & 0xffffffff;
msg->data = comp.int_desc.data;
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return;
free_int_desc:
kfree(int_desc);
drop_reference:
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return_null_message:
msg->address_hi = 0;
msg->address_lo = 0;
@@ -1283,7 +1299,6 @@ static u64 get_bar_size(u64 bar_val)
*/
static void survey_child_resources(struct hv_pcibus_device *hbus)
{
- struct list_head *iter;
struct hv_pci_dev *hpdev;
resource_size_t bar_size = 0;
unsigned long flags;
@@ -1309,8 +1324,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
* for a child device are a power of 2 in size and aligned in memory,
* so it's sufficient to just add them up without tracking alignment.
*/
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev, list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
for (i = 0; i < 6; i++) {
if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device,
@@ -1363,7 +1377,6 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
resource_size_t low_base = 0;
resource_size_t bar_size;
struct hv_pci_dev *hpdev;
- struct list_head *iter;
unsigned long flags;
u64 bar_val;
u32 command;
@@ -1385,9 +1398,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
/* Pick addresses for the BARs. */
do {
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
for (i = 0; i < 6; i++) {
bar_val = hpdev->probed_bar[i];
if (bar_val == 0)
@@ -1508,19 +1519,6 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
complete(&completion->host_event);
}
-static void get_pcichild(struct hv_pci_dev *hpdev,
- enum hv_pcidev_ref_reason reason)
-{
- refcount_inc(&hpdev->refs);
-}
-
-static void put_pcichild(struct hv_pci_dev *hpdev,
- enum hv_pcidev_ref_reason reason)
-{
- if (refcount_dec_and_test(&hpdev->refs))
- kfree(hpdev);
-}
-
/**
* new_pcichild_device() - Create a new child device
* @hbus: The internal struct tracking this root PCI bus.
@@ -1568,24 +1566,14 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
if (ret)
goto error;
- wait_for_completion(&comp_pkt.host_event);
+ if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
+ goto error;
hpdev->desc = *desc;
refcount_set(&hpdev->refs, 1);
- get_pcichild(hpdev, hv_pcidev_ref_childlist);
+ get_pcichild(hpdev);
spin_lock_irqsave(&hbus->device_list_lock, flags);
- /*
- * When a device is being added to the bus, we set the PCI domain
- * number to be the device serial number, which is non-zero and
- * unique on the same VM. The serial numbers start with 1, and
- * increase by 1 for each device. So device names including this
- * can have shorter names than based on the bus instance UUID.
- * Only the first device serial number is used for domain, so the
- * domain number will not change after the first device is added.
- */
- if (list_empty(&hbus->children))
- hbus->sysdata.domain = desc->ser;
list_add_tail(&hpdev->list_entry, &hbus->children);
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
return hpdev;
@@ -1618,7 +1606,7 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
list_for_each_entry(iter, &hbus->children, list_entry) {
if (iter->desc.win_slot.slot == wslot) {
hpdev = iter;
- get_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ get_pcichild(hpdev);
break;
}
}
@@ -1654,7 +1642,6 @@ static void pci_devices_present_work(struct work_struct *work)
{
u32 child_no;
bool found;
- struct list_head *iter;
struct pci_function_description *new_desc;
struct hv_pci_dev *hpdev;
struct hv_pcibus_device *hbus;
@@ -1691,10 +1678,8 @@ static void pci_devices_present_work(struct work_struct *work)
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
- hpdev->reported_missing = true;
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ hpdev->reported_missing = true;
}
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
@@ -1704,11 +1689,8 @@ static void pci_devices_present_work(struct work_struct *work)
new_desc = &dr->func[child_no];
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
- if ((hpdev->desc.win_slot.slot ==
- new_desc->win_slot.slot) &&
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
(hpdev->desc.v_id == new_desc->v_id) &&
(hpdev->desc.d_id == new_desc->d_id) &&
(hpdev->desc.ser == new_desc->ser)) {
@@ -1730,12 +1712,10 @@ static void pci_devices_present_work(struct work_struct *work)
spin_lock_irqsave(&hbus->device_list_lock, flags);
do {
found = false;
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
if (hpdev->reported_missing) {
found = true;
- put_pcichild(hpdev, hv_pcidev_ref_childlist);
+ put_pcichild(hpdev);
list_move_tail(&hpdev->list_entry, &removed);
break;
}
@@ -1748,7 +1728,7 @@ static void pci_devices_present_work(struct work_struct *work)
hpdev = list_first_entry(&removed, struct hv_pci_dev,
list_entry);
list_del(&hpdev->list_entry);
- put_pcichild(hpdev, hv_pcidev_ref_initial);
+ put_pcichild(hpdev);
}
switch (hbus->state) {
@@ -1883,8 +1863,8 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
- put_pcichild(hpdev, hv_pcidev_ref_childlist);
- put_pcichild(hpdev, hv_pcidev_ref_pnp);
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
put_hvpcibus(hpdev->hbus);
}
@@ -1899,7 +1879,7 @@ static void hv_eject_device_work(struct work_struct *work)
static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
{
hpdev->state = hv_pcichild_ejecting;
- get_pcichild(hpdev, hv_pcidev_ref_pnp);
+ get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
get_hvpcibus(hpdev->hbus);
queue_work(hpdev->hbus->wq, &hpdev->wrk);
@@ -1999,8 +1979,7 @@ static void hv_pci_onchannelcallback(void *context)
dev_message->wslot.slot);
if (hpdev) {
hv_pci_eject_device(hpdev);
- put_pcichild(hpdev,
- hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
}
break;
@@ -2069,15 +2048,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
sizeof(struct pci_version_request),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
if (ret) {
dev_err(&hdev->device,
- "PCI Pass-through VSP failed sending version reqquest: %#x",
+ "PCI Pass-through VSP failed to request version: %d",
ret);
goto exit;
}
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status >= 0) {
pci_protocol_version = pci_protocol_versions[i];
dev_info(&hdev->device,
@@ -2286,11 +2266,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
if (ret)
goto exit;
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2330,11 +2311,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
0, VM_PKT_DATA_INBAND, 0);
- if (ret)
- return ret;
+ if (!ret)
+ ret = wait_for_response(hdev, &comp);
- wait_for_completion(&comp);
- return 0;
+ return ret;
}
/**
@@ -2398,17 +2378,17 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
ret = vmbus_sendpacket(hdev->channel, &pkt->message,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
if (ret)
break;
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status < 0) {
ret = -EPROTO;
dev_err(&hdev->device,
@@ -2446,7 +2426,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
pkt.message_type.type = PCI_RESOURCES_RELEASED;
pkt.wslot.slot = hpdev->desc.win_slot.slot;
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
VM_PKT_DATA_INBAND, 0);
@@ -2459,12 +2439,12 @@ static int hv_send_resources_released(struct hv_device *hdev)
static void get_hvpcibus(struct hv_pcibus_device *hbus)
{
- atomic_inc(&hbus->remove_lock);
+ refcount_inc(&hbus->remove_lock);
}
static void put_hvpcibus(struct hv_pcibus_device *hbus)
{
- if (atomic_dec_and_test(&hbus->remove_lock))
+ if (refcount_dec_and_test(&hbus->remove_lock))
complete(&hbus->remove_event);
}
@@ -2508,7 +2488,7 @@ static int hv_pci_probe(struct hv_device *hdev,
hdev->dev_instance.b[8] << 8;
hbus->hdev = hdev;
- atomic_inc(&hbus->remove_lock);
+ refcount_set(&hbus->remove_lock, 1);
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
INIT_LIST_HEAD(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 5d4dccfc9d813..23e270839e6a8 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -21,6 +21,8 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include "../pci.h"
+
/*
* PCIe unit register offsets.
*/
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index dd4f1a6b57c56..326171cb1a978 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -21,6 +21,8 @@
#include <linux/sizes.h>
#include <linux/slab.h>
+#include "../pci.h"
+
/* AHB-PCI Bridge PCI communication registers */
#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 389e74be846ce..f4f53d092e005 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -40,6 +40,8 @@
#include <soc/tegra/cpuidle.h>
#include <soc/tegra/pmc.h>
+#include "../pci.h"
+
#define INT_PCI_MSI_NR (8 * 32)
/* register definitions */
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
index 0a4dea7966638..68b8bfbdb867d 100644
--- a/drivers/pci/host/pci-v3-semi.c
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -33,6 +33,8 @@
#include <linux/regmap.h>
#include <linux/clk.h>
+#include "../pci.h"
+
#define V3_PCI_VENDOR 0x00000000
#define V3_PCI_DEVICE 0x00000002
#define V3_PCI_CMD 0x00000004
@@ -791,7 +793,8 @@ static int v3_pci_probe(struct platform_device *pdev)
if (IS_ERR(v3->config_base))
return PTR_ERR(v3->config_base);
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &io_base);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 5b3876f5312bd..994f32061b325 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -15,6 +15,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
static void __iomem *versatile_pci_base;
static void __iomem *versatile_cfg_base[2];
@@ -64,11 +66,10 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *res)
{
int err, mem = 1, res_valid = 0;
- struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
if (err)
return err;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 0a0d7ee6d3c92..d854d67e873cc 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -22,6 +22,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define PCIECORE_CTLANDSTATUS 0x50
#define PIM1_1L 0x80
#define IBAR2 0x98
@@ -632,7 +634,8 @@ static int xgene_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (ret)
return ret;
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index a6af62e0256dc..7d05e51205b38 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -17,6 +17,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define RP_TX_REG0 0x2000
#define RP_TX_REG1 0x2004
#define RP_TX_CNTRL 0x2008
@@ -488,11 +490,10 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct resource_entry *win;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
- NULL);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, NULL);
if (err)
return err;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index e764a2a2693ce..f30f5f3fb5c1e 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -16,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#include "../pci.h"
#include "pcie-iproc.h"
static const struct of_device_id iproc_pcie_of_match_table[] = {
@@ -99,8 +100,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->phy = NULL;
}
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
- &iobase);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
+ &iobase);
if (ret) {
dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
index a8b20c5012a98..0baabe30858fd 100644
--- a/drivers/pci/host/pcie-mediatek.c
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -11,8 +11,10 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
@@ -22,6 +24,8 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include "../pci.h"
+
/* PCIe shared registers */
#define PCIE_SYS_CFG 0x00
#define PCIE_INT_ENABLE 0x0c
@@ -66,6 +70,10 @@
/* PCIe V2 per-port registers */
#define PCIE_MSI_VECTOR 0x0c0
+
+#define PCIE_CONF_VEND_ID 0x100
+#define PCIE_CONF_CLASS_ID 0x106
+
#define PCIE_INT_MASK 0x420
#define INTX_MASK GENMASK(19, 16)
#define INTX_SHIFT 16
@@ -125,13 +133,13 @@ struct mtk_pcie_port;
/**
* struct mtk_pcie_soc - differentiate between host generations
- * @has_msi: whether this host supports MSI interrupts or not
+ * @need_fix_class_id: whether this host's class ID needed to be fixed or not
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
* @setup_irq: pointer to initialize IRQ functions
*/
struct mtk_pcie_soc {
- bool has_msi;
+ bool need_fix_class_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
@@ -155,7 +163,9 @@ struct mtk_pcie_soc {
* @lane: lane count
* @slot: port slot
* @irq_domain: legacy INTx IRQ domain
+ * @inner_domain: inner IRQ domain
* @msi_domain: MSI IRQ domain
+ * @lock: protect the msi_irq_in_use bitmap
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
struct mtk_pcie_port {
@@ -173,7 +183,9 @@ struct mtk_pcie_port {
u32 lane;
u32 slot;
struct irq_domain *irq_domain;
+ struct irq_domain *inner_domain;
struct irq_domain *msi_domain;
+ struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
@@ -375,6 +387,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
struct resource *mem = &pcie->mem;
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
size_t size;
int err;
@@ -403,6 +416,15 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
PCIE_MAC_SRSTB | PCIE_CRSTB;
writel(val, port->base + PCIE_RST_CTRL);
+ /* Set up vendor ID and class code */
+ if (soc->need_fix_class_id) {
+ val = PCI_VENDOR_ID_MEDIATEK;
+ writew(val, port->base + PCIE_CONF_VEND_ID);
+
+ val = PCI_CLASS_BRIDGE_HOST;
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
+ }
+
/* 100ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
!!(val & PCIE_PORT_LINKUP_V2), 20,
@@ -430,103 +452,130 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
return 0;
}
-static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port)
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- int msi;
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr;
- msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
- if (msi < MTK_MSI_IRQS_NUM)
- set_bit(msi, port->msi_irq_in_use);
- else
- return -ENOSPC;
+ /* MT2712/MT7622 only support 32-bit MSI addresses */
+ addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+ msg->address_hi = 0;
+ msg->address_lo = lower_32_bits(addr);
+
+ msg->data = data->hwirq;
- return msi;
+ dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq)
+static int mtk_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- clear_bit(hwirq, port->msi_irq_in_use);
+ return -EINVAL;
}
-static int mtk_pcie_msi_setup_irq(struct msi_controller *chip,
- struct pci_dev *pdev, struct msi_desc *desc)
+static void mtk_msi_ack_irq(struct irq_data *data)
{
- struct mtk_pcie_port *port;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
- phys_addr_t msg_addr;
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ u32 hwirq = data->hwirq;
- port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
- if (!port)
- return -EINVAL;
+ writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
+}
- hwirq = mtk_pcie_msi_alloc(port);
- if (hwirq < 0)
- return hwirq;
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+ .name = "MTK MSI",
+ .irq_compose_msi_msg = mtk_compose_msi_msg,
+ .irq_set_affinity = mtk_msi_set_affinity,
+ .irq_ack = mtk_msi_ack_irq,
+};
- irq = irq_create_mapping(port->msi_domain, hwirq);
- if (!irq) {
- mtk_pcie_msi_free(port, hwirq);
- return -EINVAL;
- }
+static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&port->lock);
- chip->dev = &pdev->dev;
+ bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+ if (bit >= MTK_MSI_IRQS_NUM) {
+ mutex_unlock(&port->lock);
+ return -ENOSPC;
+ }
- irq_set_msi_desc(irq, desc);
+ __set_bit(bit, port->msi_irq_in_use);
- /* MT2712/MT7622 only support 32-bit MSI addresses */
- msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
- msg.address_hi = 0;
- msg.address_lo = lower_32_bits(msg_addr);
- msg.data = hwirq;
+ mutex_unlock(&port->lock);
- pci_write_msi_msg(irq, &msg);
+ irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq,
+ NULL, NULL);
return 0;
}
-static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- struct pci_dev *pdev = to_pci_dev(chip->dev);
- struct irq_data *d = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct mtk_pcie_port *port;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
- port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
- if (!port)
- return;
+ mutex_lock(&port->lock);
- irq_dispose_mapping(irq);
- mtk_pcie_msi_free(port, hwirq);
+ if (!test_bit(d->hwirq, port->msi_irq_in_use))
+ dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ else
+ __clear_bit(d->hwirq, port->msi_irq_in_use);
+
+ mutex_unlock(&port->lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static struct msi_controller mtk_pcie_msi_chip = {
- .setup_irq = mtk_pcie_msi_setup_irq,
- .teardown_irq = mtk_msi_teardown_irq,
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mtk_pcie_irq_domain_alloc,
+ .free = mtk_pcie_irq_domain_free,
};
static struct irq_chip mtk_msi_irq_chip = {
- .name = "MTK PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+ .name = "MTK PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
-static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &mtk_msi_irq_chip,
+};
+
+static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+
+ mutex_init(&port->lock);
+
+ port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
+ &msi_domain_ops, port);
+ if (!port->inner_domain) {
+ dev_err(port->pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
+ port->inner_domain);
+ if (!port->msi_domain) {
+ dev_err(port->pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(port->inner_domain);
+ return -ENOMEM;
+ }
return 0;
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = mtk_pcie_msi_map,
-};
-
static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
{
u32 val;
@@ -559,6 +608,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
{
struct device *dev = port->pcie->dev;
struct device_node *pcie_intc_node;
+ int ret;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
@@ -575,27 +625,28 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM,
- &msi_domain_ops,
- &mtk_pcie_msi_chip);
- if (!port->msi_domain) {
- dev_err(dev, "failed to create MSI IRQ domain\n");
- return -ENODEV;
- }
+ ret = mtk_pcie_allocate_msi_domains(port);
+ if (ret)
+ return ret;
+
mtk_pcie_enable_msi(port);
}
return 0;
}
-static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
+static void mtk_pcie_intr_handler(struct irq_desc *desc)
{
- struct mtk_pcie_port *port = (struct mtk_pcie_port *)data;
+ struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
u32 virq;
u32 bit = INTX_SHIFT;
- while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) {
+ chained_irq_enter(irqchip, desc);
+
+ status = readl(port->base + PCIE_INT_STATUS);
+ if (status & INTX_MASK) {
for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
/* Clear the INTx */
writel(1 << bit, port->base + PCIE_INT_STATUS);
@@ -606,14 +657,12 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) {
+ if (status & MSI_STATUS){
unsigned long imsi_status;
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
- /* Clear the MSI */
- writel(1 << bit, port->base + PCIE_IMSI_STATUS);
- virq = irq_find_mapping(port->msi_domain, bit);
+ virq = irq_find_mapping(port->inner_domain, bit);
generic_handle_irq(virq);
}
}
@@ -622,7 +671,9 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
}
}
- return IRQ_HANDLED;
+ chained_irq_exit(irqchip, desc);
+
+ return;
}
static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
@@ -633,20 +684,15 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
struct platform_device *pdev = to_platform_device(dev);
int err, irq;
- irq = platform_get_irq(pdev, port->slot);
- err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
- IRQF_SHARED, "mtk-pcie", port);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", irq);
- return err;
- }
-
err = mtk_pcie_init_irq_domain(port, node);
if (err) {
dev_err(dev, "failed to init PCIe IRQ domain\n");
return err;
}
+ irq = platform_get_irq(pdev, port->slot);
+ irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
+
return 0;
}
@@ -1080,8 +1126,6 @@ static int mtk_pcie_register_host(struct pci_host_bridge *host)
host->map_irq = of_irq_parse_and_map_pci;
host->swizzle_irq = pci_common_swizzle;
host->sysdata = pcie;
- if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
- host->msi = &mtk_pcie_msi_chip;
err = pci_scan_root_bus_bridge(host);
if (err < 0)
@@ -1142,8 +1186,14 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
.startup = mtk_pcie_startup_port,
};
-static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
- .has_msi = true,
+static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
+ .need_fix_class_id = true,
.ops = &mtk_pcie_ops_v2,
.startup = mtk_pcie_startup_port_v2,
.setup_irq = mtk_pcie_setup_irq,
@@ -1152,8 +1202,8 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
static const struct of_device_id mtk_pcie_ids[] = {
{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
- { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 },
- { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 },
+ { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
+ { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
{},
};
diff --git a/drivers/pci/host/pcie-mobiveil.c b/drivers/pci/host/pcie-mobiveil.c
new file mode 100644
index 0000000000000..4d6c20e47bed4
--- /dev/null
+++ b/drivers/pci/host/pcie-mobiveil.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
+
+#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
+
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_EN 0xc00
+#define PAGE_SEL_OFFSET_SHIFT 10
+
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
+
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
+
+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_INTX_MASK 0x01e0
+#define PAB_INTP_MSI_MASK 0x8
+
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+
+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
+#define AXI_WINDOW_ALIGN_MASK 3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS 0x474
+
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START 5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI 16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+struct mobiveil_msi { /* MSI information */
+ struct mutex lock; /* protect bitmap variable */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ phys_addr_t msi_pages_phys;
+ int num_of_vectors;
+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie {
+ struct platform_device *pdev;
+ struct list_head resources;
+ void __iomem *config_axi_slave_base; /* endpoint config base */
+ void __iomem *csr_axi_slave_base; /* root port config base */
+ void __iomem *apb_csr_base; /* MSI register base */
+ void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
+ struct irq_domain *intx_domain;
+ raw_spinlock_t intx_mask_lock;
+ int irq;
+ int apio_wins;
+ int ppio_wins;
+ int ob_wins_configured; /* configured outbound windows */
+ int ib_wins_configured; /* configured inbound windows */
+ struct resource *ob_io_res;
+ char root_bus_nr;
+ struct mobiveil_msi msi;
+};
+
+static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->csr_axi_slave_base + reg);
+}
+
+static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->csr_axi_slave_base + reg);
+}
+
+static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+ return (csr_readl(pcie, LTSSM_STATUS) &
+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+
+ /* Only one device down on each root port */
+ if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly
+ * attached to RC
+ */
+ if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
+ return false;
+
+ return true;
+}
+
+/*
+ * mobiveil_pcie_map_bus - routine to get the configuration base of either
+ * root port or endpoint
+ */
+static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+
+ if (!mobiveil_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ if (bus->number == pcie->root_bus_nr) {
+ /* RC config access */
+ return pcie->csr_axi_slave_base + where;
+ }
+
+ /*
+ * EP config access (in Config/APIO space)
+ * Program PEX Address base (31..16 bits) with appropriate value
+ * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
+ * Relies on pci_lock serialization
+ */
+ csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
+ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
+ PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ return pcie->config_axi_slave_base + where;
+}
+
+static struct pci_ops mobiveil_pcie_ops = {
+ .map_bus = mobiveil_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void mobiveil_pcie_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct device *dev = &pcie->pdev->dev;
+ struct mobiveil_msi *msi = &pcie->msi;
+ u32 msi_data, msi_addr_lo, msi_addr_hi;
+ u32 intr_status, msi_status;
+ unsigned long shifted_status;
+ u32 bit, virq, val, mask;
+
+ /*
+ * The core provides a single interrupt for both INTx/MSI messages.
+ * So we'll read both INTx and MSI status
+ */
+
+ chained_irq_enter(chip, desc);
+
+ /* read INTx status */
+ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ intr_status = val & mask;
+
+ /* Handle INTx */
+ if (intr_status & PAB_INTP_INTX_MASK) {
+ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
+ PAB_INTX_START;
+ do {
+ for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
+ virq = irq_find_mapping(pcie->intx_domain,
+ bit + 1);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err_ratelimited(dev,
+ "unexpected IRQ, INT%d\n", bit);
+
+ /* clear interrupt */
+ csr_writel(pcie,
+ shifted_status << PAB_INTX_START,
+ PAB_INTP_AMBA_MISC_STAT);
+ }
+ } while ((shifted_status >> PAB_INTX_START) != 0);
+ }
+
+ /* read extra MSI status register */
+ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
+
+ /* handle MSI interrupts */
+ while (msi_status & 1) {
+ msi_data = readl_relaxed(pcie->apb_csr_base
+ + MSI_DATA_OFFSET);
+
+ /*
+ * MSI_STATUS_OFFSET register gets updated to zero
+ * once we pop not only the MSI data but also address
+ * from MSI hardware FIFO. So keeping these following
+ * two dummy reads.
+ */
+ msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_L_OFFSET);
+ msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_H_OFFSET);
+ dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
+ msi_data, msi_addr_hi, msi_addr_lo);
+
+ virq = irq_find_mapping(msi->dev_domain, msi_data);
+ if (virq)
+ generic_handle_irq(virq);
+
+ msi_status = readl_relaxed(pcie->apb_csr_base +
+ MSI_STATUS_OFFSET);
+ }
+
+ /* Clear the interrupt status */
+ csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ chained_irq_exit(chip, desc);
+}
+
+static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct platform_device *pdev = pcie->pdev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ const char *type;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ /* map config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "config_axi_slave");
+ pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->config_axi_slave_base))
+ return PTR_ERR(pcie->config_axi_slave_base);
+ pcie->ob_io_res = res;
+
+ /* map csr resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "csr_axi_slave");
+ pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->csr_axi_slave_base))
+ return PTR_ERR(pcie->csr_axi_slave_base);
+ pcie->pcie_reg_base = res->start;
+
+ /* map MSI config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->apb_csr_base))
+ return PTR_ERR(pcie->apb_csr_base);
+
+ /* read the number of windows requested */
+ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
+ pcie->apio_wins = MAX_PIO_WINDOWS;
+
+ if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
+ pcie->ppio_wins = MAX_PIO_WINDOWS;
+
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq <= 0) {
+ dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
+
+ return 0;
+}
+
+/*
+ * select_paged_register - routine to access paged register of root complex
+ *
+ * registers of RC are paged, for this scheme to work
+ * extracted higher 6 bits of the offset will be written to pg_sel
+ * field of PAB_CTRL register and rest of the lower 10 bits enabled with
+ * PAGE_SEL_EN are used as offset of the register.
+ */
+static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+ int pab_ctrl_dw, pg_sel;
+
+ /* clear pg_sel field */
+ pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
+ pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
+
+ /* set pg_sel field */
+ pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
+ pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
+ csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
+}
+
+static void write_paged_register(struct mobiveil_pcie *pcie,
+ u32 val, u32 offset)
+{
+ u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+ select_paged_register(pcie, offset);
+ csr_writel(pcie, val, off);
+}
+
+static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+ u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+ select_paged_register(pcie, offset);
+ return csr_readl(pcie, off);
+}
+
+static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+ int pci_addr, u32 type, u64 size)
+{
+ int pio_ctrl_val;
+ int amap_ctrl_dw;
+ u64 size64 = ~(size - 1);
+
+ if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+ }
+
+ pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ csr_writel(pcie,
+ pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
+ amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
+ amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
+
+ write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
+ PAB_PEX_AMAP_CTRL(win_num));
+
+ write_paged_register(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+ write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
+ write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
+}
+
+/*
+ * routine to program the outbound windows
+ */
+static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
+{
+
+ u32 value, type;
+ u64 size64 = ~(size - 1);
+
+ if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+ }
+
+ /*
+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+ * to 4 KB in PAB_AXI_AMAP_CTRL register
+ */
+ type = config_io_bit;
+ value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
+
+ write_paged_register(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+ /*
+ * program AXI window base with appropriate value in
+ * PAB_AXI_AMAP_AXI_WIN0 register
+ */
+ value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
+ csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+
+ value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ob_wins_configured++;
+}
+
+static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (mobiveil_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ }
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+ return -ETIMEDOUT;
+}
+
+static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
+{
+ phys_addr_t msg_addr = pcie->pcie_reg_base;
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ pcie->msi.num_of_vectors = PCI_NUM_MSI;
+ msi->msi_pages_phys = (phys_addr_t)msg_addr;
+
+ writel_relaxed(lower_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+ writel_relaxed(upper_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+ writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
+ writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
+}
+
+static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+{
+ u32 value, pab_ctrl, type = 0;
+ int err;
+ struct resource_entry *win, *tmp;
+
+ err = mobiveil_bringup_link(pcie);
+ if (err) {
+ dev_info(&pcie->pdev->dev, "link bring-up failed\n");
+ return err;
+ }
+
+ /*
+ * program Bus Master Enable Bit in Command Register in PAB Config
+ * Space
+ */
+ value = csr_readl(pcie, PCI_COMMAND);
+ csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER, PCI_COMMAND);
+
+ /*
+ * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
+ * register
+ */
+ pab_ctrl = csr_readl(pcie, PAB_CTRL);
+ csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
+ (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
+
+ csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
+
+ /*
+ * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
+ * PAB_AXI_PIO_CTRL Register
+ */
+ value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
+
+ /*
+ * we'll program one outbound window for config reads and
+ * another default inbound window for all the upstream traffic
+ * rest of the outbound windows will be configured according to
+ * the "ranges" field defined in device tree
+ */
+
+ /* config outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
+ resource_size(pcie->ob_io_res));
+
+ /* memory inbound translation window */
+ program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
+ type = 0;
+ if (resource_type(win->res) == IORESOURCE_MEM)
+ type = MEM_WINDOW_TYPE;
+ if (resource_type(win->res) == IORESOURCE_IO)
+ type = IO_WINDOW_TYPE;
+ if (type) {
+ /* configure outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ win->res->start, 0, type,
+ resource_size(win->res));
+ }
+ }
+
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
+
+ return err;
+}
+
+static void mobiveil_mask_intx_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct mobiveil_pcie *pcie;
+ unsigned long flags;
+ u32 mask, shifted_val;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static void mobiveil_unmask_intx_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct mobiveil_pcie *pcie;
+ unsigned long flags;
+ u32 shifted_val, mask;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+ .name = "mobiveil_pcie:intx",
+ .irq_enable = mobiveil_unmask_intx_irq,
+ .irq_disable = mobiveil_mask_intx_irq,
+ .irq_mask = mobiveil_mask_intx_irq,
+ .irq_unmask = mobiveil_unmask_intx_irq,
+};
+
+/* routine to setup the INTx related data */
+static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ return 0;
+}
+
+/* INTx domain operations structure */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mobiveil_pcie_intx_map,
+};
+
+static struct irq_chip mobiveil_msi_irq_chip = {
+ .name = "Mobiveil PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mobiveil_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .chip = &mobiveil_msi_irq_chip,
+};
+
+static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip mobiveil_msi_bottom_irq_chip = {
+ .name = "Mobiveil MSI",
+ .irq_compose_msi_msg = mobiveil_compose_msi_msg,
+ .irq_set_affinity = mobiveil_msi_set_affinity,
+};
+
+static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs, void *args)
+{
+ struct mobiveil_pcie *pcie = domain->host_data;
+ struct mobiveil_msi *msi = &pcie->msi;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&msi->lock);
+
+ bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
+ if (bit >= msi->num_of_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->msi_irq_in_use);
+
+ mutex_unlock(&msi->lock);
+
+ irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
+ domain->host_data, handle_level_irq,
+ NULL, NULL);
+ return 0;
+}
+
+static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
+ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ } else {
+ __clear_bit(d->hwirq, msi->msi_irq_in_use);
+ }
+
+ mutex_unlock(&msi->lock);
+}
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mobiveil_irq_msi_domain_alloc,
+ .free = mobiveil_irq_msi_domain_free,
+};
+
+static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ mutex_init(&pcie->msi.lock);
+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &mobiveil_msi_domain_info, msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ /* setup INTx */
+ pcie->intx_domain = irq_domain_add_linear(node,
+ PCI_NUM_INTX, &intx_domain_ops, pcie);
+
+ if (!pcie->intx_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ raw_spin_lock_init(&pcie->intx_mask_lock);
+
+ /* setup MSI */
+ ret = mobiveil_allocate_msi_domains(pcie);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+ struct mobiveil_pcie *pcie;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+ resource_size_t iobase;
+ int ret;
+
+ /* allocate the PCIe port */
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+
+ ret = mobiveil_pcie_parse_dt(pcie);
+ if (ret) {
+ dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&pcie->resources);
+
+ /* parse the host bridge base addresses from the device tree file */
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, &iobase);
+ if (ret) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * configure all inbound and outbound windows and prepare the RC for
+ * config access
+ */
+ ret = mobiveil_host_init(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host\n");
+ goto error;
+ }
+
+ /* fixup for PCIe class register */
+ csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
+
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ goto error;
+ }
+
+ ret = devm_request_pci_bus_resources(dev, &pcie->resources);
+ if (ret)
+ goto error;
+
+ /* Initialize bridge */
+ list_splice_init(&pcie->resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = pcie->root_bus_nr;
+ bridge->ops = &mobiveil_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ /* setup the kernel resources for the newly added PCIe root bus */
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret)
+ goto error;
+
+ bus = bridge->bus;
+
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+
+ return 0;
+error:
+ pci_free_resource_list(&pcie->resources);
+ return ret;
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+ {.compatible = "mbvl,gpex40-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+ .probe = mobiveil_pcie_probe,
+ .driver = {
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 6ab28f29ac6a0..874d75c9ee4ac 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -11,6 +11,7 @@
* Author: Phil Edworthy <phil.edworthy@renesas.com>
*/
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -24,18 +25,23 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
-#define CONFIG_SEND_ENABLE (1 << 31)
+#define CONFIG_SEND_ENABLE BIT(31)
#define TYPE0 (0 << 8)
-#define TYPE1 (1 << 8)
+#define TYPE1 BIT(8)
#define PCIECDR 0x000020
#define PCIEMSR 0x000028
#define PCIEINTXR 0x000400
+#define PCIEPHYSR 0x0007f0
+#define PHYRDY BIT(0)
#define PCIEMSITXR 0x000840
/* Transfer control */
@@ -44,7 +50,7 @@
#define PCIETSTR 0x02004
#define DATA_LINK_ACTIVE 1
#define PCIEERRFR 0x02020
-#define UNSUPPORTED_REQUEST (1 << 4)
+#define UNSUPPORTED_REQUEST BIT(4)
#define PCIEMSIFR 0x02044
#define PCIEMSIALR 0x02048
#define MSIFE 1
@@ -57,17 +63,17 @@
/* local address reg & mask */
#define PCIELAR(x) (0x02200 + ((x) * 0x20))
#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
-#define LAM_PREFETCH (1 << 3)
-#define LAM_64BIT (1 << 2)
-#define LAR_ENABLE (1 << 1)
+#define LAM_PREFETCH BIT(3)
+#define LAM_64BIT BIT(2)
+#define LAR_ENABLE BIT(1)
/* PCIe address reg & mask */
#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
-#define PAR_ENABLE (1 << 31)
-#define IO_SPACE (1 << 8)
+#define PAR_ENABLE BIT(31)
+#define IO_SPACE BIT(8)
/* Configuration */
#define PCICONF(x) (0x010000 + ((x) * 0x4))
@@ -79,47 +85,46 @@
#define IDSETR1 0x011004
#define TLCTLR 0x011048
#define MACSR 0x011054
-#define SPCHGFIN (1 << 4)
-#define SPCHGFAIL (1 << 6)
-#define SPCHGSUC (1 << 7)
+#define SPCHGFIN BIT(4)
+#define SPCHGFAIL BIT(6)
+#define SPCHGSUC BIT(7)
#define LINK_SPEED (0xf << 16)
#define LINK_SPEED_2_5GTS (1 << 16)
#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
-#define SPEED_CHANGE (1 << 24)
-#define SCRAMBLE_DISABLE (1 << 27)
+#define SPEED_CHANGE BIT(24)
+#define SCRAMBLE_DISABLE BIT(27)
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
-#define SPCNGRSN (1 << 31)
+#define SPCNGRSN BIT(31)
/* R-Car H1 PHY */
#define H1_PCIEPHYADRR 0x04000c
-#define WRITE_CMD (1 << 16)
-#define PHY_ACK (1 << 24)
+#define WRITE_CMD BIT(16)
+#define PHY_ACK BIT(24)
#define RATE_POS 12
#define LANE_POS 8
#define ADR_POS 0
#define H1_PCIEPHYDOUTR 0x040014
-#define H1_PCIEPHYSR 0x040018
/* R-Car Gen2 PHY */
#define GEN2_PCIEPHYADDR 0x780
#define GEN2_PCIEPHYDATA 0x784
#define GEN2_PCIEPHYCTRL 0x78c
-#define INT_PCI_MSI_NR 32
+#define INT_PCI_MSI_NR 32
-#define RCONF(x) (PCICONF(0)+(x))
-#define RPMCAP(x) (PMCAP(0)+(x))
-#define REXPCAP(x) (EXPCAP(0)+(x))
-#define RVCCAP(x) (VCCAP(0)+(x))
+#define RCONF(x) (PCICONF(0) + (x))
+#define RPMCAP(x) (PMCAP(0) + (x))
+#define REXPCAP(x) (EXPCAP(0) + (x))
+#define RVCCAP(x) (VCCAP(0) + (x))
-#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
-#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
-#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
-#define RCAR_PCI_MAX_RESOURCES 4
-#define MAX_NR_INBOUND_MAPS 6
+#define RCAR_PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
@@ -139,10 +144,10 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
/* Structure representing the PCIe interface */
struct rcar_pcie {
struct device *dev;
+ struct phy *phy;
void __iomem *base;
struct list_head resources;
int root_bus_nr;
- struct clk *clk;
struct clk *bus_clk;
struct rcar_msi msi;
};
@@ -527,12 +532,12 @@ static void phy_write_reg(struct rcar_pcie *pcie,
phy_wait_for_ack(pcie);
}
-static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
{
unsigned int timeout = 10;
while (timeout--) {
- if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
return 0;
msleep(5);
@@ -541,6 +546,21 @@ static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
return -ETIMEDOUT;
}
+static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10000;
+
+ while (timeout--) {
+ if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
+
+ udelay(5);
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
{
int err;
@@ -551,6 +571,10 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
/* Set mode */
rcar_pci_write_reg(pcie, 1, PCIEMSR);
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err)
+ return err;
+
/*
* Initial header for port config space is type 1, set the device
* class to match. Hardware takes care of propagating the IDSETR
@@ -605,10 +629,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
return 0;
}
-static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
+static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
{
- unsigned int timeout = 10;
-
/* Initialize the phy */
phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
@@ -627,17 +649,10 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
- while (timeout--) {
- if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
- return rcar_pcie_hw_init(pcie);
-
- msleep(5);
- }
-
- return -ETIMEDOUT;
+ return 0;
}
-static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
+static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
{
/*
* These settings come from the R-Car Series, 2nd Generation User's
@@ -654,7 +669,18 @@ static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
- return rcar_pcie_hw_init(pcie);
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
+{
+ int err;
+
+ err = phy_init(pcie->phy);
+ if (err)
+ return err;
+
+ return phy_power_on(pcie->phy);
}
static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -842,6 +868,20 @@ static const struct irq_domain_ops msi_domain_ops = {
.map = rcar_msi_map,
};
+static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
+{
+ struct rcar_msi *msi = &pcie->msi;
+ int i, irq;
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++) {
+ irq = irq_find_mapping(msi->domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(msi->domain);
+}
+
static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -896,16 +936,35 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
return 0;
err:
- irq_domain_remove(msi->domain);
+ rcar_pcie_unmap_msi(pcie);
return err;
}
+static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
+{
+ struct rcar_msi *msi = &pcie->msi;
+
+ /* Disable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /* Disable address decoding of the MSI interrupt, MSIFE */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
+
+ free_pages(msi->pages, 0);
+
+ rcar_pcie_unmap_msi(pcie);
+}
+
static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct resource res;
int err, i;
+ pcie->phy = devm_phy_optional_get(dev, "pcie");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
return err;
@@ -914,30 +973,17 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- pcie->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(pcie->clk)) {
- dev_err(dev, "cannot get platform clock\n");
- return PTR_ERR(pcie->clk);
- }
- err = clk_prepare_enable(pcie->clk);
- if (err)
- return err;
-
pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
dev_err(dev, "cannot get pcie bus clock\n");
- err = PTR_ERR(pcie->bus_clk);
- goto fail_clk;
+ return PTR_ERR(pcie->bus_clk);
}
- err = clk_prepare_enable(pcie->bus_clk);
- if (err)
- goto fail_clk;
i = irq_of_parse_and_map(dev->of_node, 0);
if (!i) {
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
- goto err_map_reg;
+ goto err_irq1;
}
pcie->msi.irq1 = i;
@@ -945,17 +991,15 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
if (!i) {
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
- goto err_map_reg;
+ goto err_irq2;
}
pcie->msi.irq2 = i;
return 0;
-err_map_reg:
- clk_disable_unprepare(pcie->bus_clk);
-fail_clk:
- clk_disable_unprepare(pcie->clk);
-
+err_irq2:
+ irq_dispose_mapping(pcie->msi.irq1);
+err_irq1:
return err;
}
@@ -1051,63 +1095,28 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
}
static const struct of_device_id rcar_pcie_of_match[] = {
- { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
+ { .compatible = "renesas,pcie-r8a7779",
+ .data = rcar_pcie_phy_init_h1 },
{ .compatible = "renesas,pcie-r8a7790",
- .data = rcar_pcie_hw_init_gen2 },
+ .data = rcar_pcie_phy_init_gen2 },
{ .compatible = "renesas,pcie-r8a7791",
- .data = rcar_pcie_hw_init_gen2 },
+ .data = rcar_pcie_phy_init_gen2 },
{ .compatible = "renesas,pcie-rcar-gen2",
- .data = rcar_pcie_hw_init_gen2 },
- { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
- { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7795",
+ .data = rcar_pcie_phy_init_gen3 },
+ { .compatible = "renesas,pcie-rcar-gen3",
+ .data = rcar_pcie_phy_init_gen3 },
{},
};
-static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
-{
- int err;
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- resource_size_t iobase;
- struct resource_entry *win, *tmp;
-
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
- &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pci->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
- struct resource *res = win->res;
-
- if (resource_type(res) == IORESOURCE_IO) {
- err = pci_remap_iospace(res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
-
- resource_list_destroy_entry(win);
- }
- }
- }
-
- return 0;
-
-out_release_res:
- pci_free_resource_list(&pci->resources);
- return err;
-}
-
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
unsigned int data;
int err;
- int (*hw_init_fn)(struct rcar_pcie *);
+ int (*phy_init_fn)(struct rcar_pcie *);
struct pci_host_bridge *bridge;
bridge = pci_alloc_host_bridge(sizeof(*pcie));
@@ -1118,36 +1127,45 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
- INIT_LIST_HEAD(&pcie->resources);
-
- err = rcar_pcie_parse_request_of_pci_ranges(pcie);
+ err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
if (err)
goto err_free_bridge;
+ pm_runtime_enable(pcie->dev);
+ err = pm_runtime_get_sync(pcie->dev);
+ if (err < 0) {
+ dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+ goto err_pm_disable;
+ }
+
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(dev, "failed to request resources: %d\n", err);
- goto err_free_resource_list;
+ goto err_pm_put;
+ }
+
+ err = clk_prepare_enable(pcie->bus_clk);
+ if (err) {
+ dev_err(dev, "failed to enable bus clock: %d\n", err);
+ goto err_unmap_msi_irqs;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
- goto err_free_resource_list;
+ goto err_clk_disable;
- pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
- if (err < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
- goto err_pm_disable;
+ phy_init_fn = of_device_get_match_data(dev);
+ err = phy_init_fn(pcie);
+ if (err) {
+ dev_err(dev, "failed to init PCIe PHY\n");
+ goto err_clk_disable;
}
/* Failure to get a link might just be that no cards are inserted */
- hw_init_fn = of_device_get_match_data(dev);
- err = hw_init_fn(pcie);
- if (err) {
+ if (rcar_pcie_hw_init(pcie)) {
dev_info(dev, "PCIe link down\n");
err = -ENODEV;
- goto err_pm_put;
+ goto err_clk_disable;
}
data = rcar_pci_read_reg(pcie, MACSR);
@@ -1159,24 +1177,34 @@ static int rcar_pcie_probe(struct platform_device *pdev)
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
- goto err_pm_put;
+ goto err_clk_disable;
}
}
err = rcar_pcie_enable(pcie);
if (err)
- goto err_pm_put;
+ goto err_msi_teardown;
return 0;
+err_msi_teardown:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pcie_teardown_msi(pcie);
+
+err_clk_disable:
+ clk_disable_unprepare(pcie->bus_clk);
+
+err_unmap_msi_irqs:
+ irq_dispose_mapping(pcie->msi.irq2);
+ irq_dispose_mapping(pcie->msi.irq1);
+
err_pm_put:
pm_runtime_put(dev);
err_pm_disable:
pm_runtime_disable(dev);
-
-err_free_resource_list:
pci_free_resource_list(&pcie->resources);
+
err_free_bridge:
pci_free_host_bridge(bridge);
diff --git a/drivers/pci/host/pcie-rockchip-ep.c b/drivers/pci/host/pcie-rockchip-ep.c
new file mode 100644
index 0000000000000..fc267a49a932e
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-ep.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe endpoint controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/configfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epf.h>
+#include <linux/sizes.h>
+
+#include "pcie-rockchip.h"
+
+/**
+ * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
+ * @rockchip: Rockchip PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ * the sending of a memory write (MSI) / normal message (legacy
+ * IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ * the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct rockchip_pcie_ep {
+ struct rockchip_pcie rockchip;
+ struct pci_epc *epc;
+ u32 max_regions;
+ unsigned long ob_region_map;
+ phys_addr_t *ob_addr;
+ phys_addr_t irq_phys_addr;
+ void __iomem *irq_cpu_addr;
+ u64 irq_pci_addr;
+ u8 irq_pci_fn;
+ u8 irq_pending;
+};
+
+static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
+ u32 region)
+{
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
+}
+
+static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+ u32 r, u32 type, u64 cpu_addr,
+ u64 pci_addr, size_t size)
+{
+ u64 sz = 1ULL << fls64(size - 1);
+ int num_pass_bits = ilog2(sz);
+ u32 addr0, addr1, desc0, desc1;
+ bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
+
+ /* The minimal region size is 1MB */
+ if (num_pass_bits < 8)
+ num_pass_bits = 8;
+
+ cpu_addr -= rockchip->mem_res->start;
+ addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
+ PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+ addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
+ desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
+ desc1 = 0;
+
+ if (is_nor_msg) {
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+ rockchip_pcie_write(rockchip, desc0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+ rockchip_pcie_write(rockchip, desc1,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+ } else {
+ /* PCI bus address region */
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+ rockchip_pcie_write(rockchip, desc0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+ rockchip_pcie_write(rockchip, desc1,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+
+ addr0 =
+ ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ (lower_32_bits(cpu_addr) &
+ PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+ addr1 = upper_32_bits(cpu_addr);
+ }
+
+ /* CPU bus address region */
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
+}
+
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+ struct pci_epf_header *hdr)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ /* All functions share the same vendor ID with function 0 */
+ if (fn == 0) {
+ u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
+ (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
+
+ rockchip_pcie_write(rockchip, vid_regs,
+ PCIE_CORE_CONFIG_VENDOR);
+ }
+
+ rockchip_pcie_write(rockchip, hdr->deviceid << 16,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
+
+ rockchip_pcie_write(rockchip,
+ hdr->revid |
+ hdr->progif_code << 8 |
+ hdr->subclass_code << 16 |
+ hdr->baseclass_code << 24,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
+ rockchip_pcie_write(rockchip, hdr->cache_line_size,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_CACHE_LINE_SIZE);
+ rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_SUBSYSTEM_VENDOR_ID);
+ rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_INTERRUPT_LINE);
+
+ return 0;
+}
+
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ dma_addr_t bar_phys = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ int flags = epf_bar->flags;
+ u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+ u64 sz;
+
+ /* BAR size is 2^(aperture + 7) */
+ sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
+
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values.
+ */
+ sz = 1ULL << fls64(sz - 1);
+ aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
+ } else {
+ bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+ bool is_64bits = sz > SZ_2G;
+
+ if (is_64bits && (bar & 1))
+ return -EINVAL;
+
+ if (is_64bits && is_prefetch)
+ ctrl =
+ ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+ else if (is_prefetch)
+ ctrl =
+ ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+ else if (is_64bits)
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
+ else
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
+ }
+
+ if (bar < BAR_4) {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ addr0 = lower_32_bits(bar_phys);
+ addr1 = upper_32_bits(bar_phys);
+
+ cfg = rockchip_pcie_read(rockchip, reg);
+ cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+
+ rockchip_pcie_write(rockchip, cfg, reg);
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 reg, cfg, b, ctrl;
+ enum pci_barno bar = epf_bar->barno;
+
+ if (bar < BAR_4) {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
+ cfg = rockchip_pcie_read(rockchip, reg);
+ cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+
+ rockchip_pcie_write(rockchip, cfg, reg);
+ rockchip_pcie_write(rockchip, 0x0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+ rockchip_pcie_write(rockchip, 0x0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+}
+
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr, u64 pci_addr,
+ size_t size)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *pcie = &ep->rockchip;
+ u32 r;
+
+ r = find_first_zero_bit(&ep->ob_region_map,
+ sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ /*
+ * Region 0 is reserved for configuration space and shouldn't
+ * be used elsewhere per TRM, so leave it out.
+ */
+ if (r >= ep->max_regions - 1) {
+ dev_err(&epc->dev, "no free outbound region\n");
+ return -EINVAL;
+ }
+
+ rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
+ pci_addr, size);
+
+ set_bit(r, &ep->ob_region_map);
+ ep->ob_addr[r] = addr;
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 r;
+
+ for (r = 0; r < ep->max_regions - 1; r++)
+ if (ep->ob_addr[r] == addr)
+ break;
+
+ /*
+ * Region 0 is reserved for configuration space and shouldn't
+ * be used elsewhere per TRM, so leave it out.
+ */
+ if (r == ep->max_regions - 1)
+ return;
+
+ rockchip_pcie_clear_ep_ob_atu(rockchip, r);
+
+ ep->ob_addr[r] = 0;
+ clear_bit(r, &ep->ob_region_map);
+}
+
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+ u8 multi_msg_cap)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags;
+
+ flags = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
+ flags |=
+ ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ PCI_MSI_FLAGS_64BIT;
+ flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
+ rockchip_pcie_write(rockchip, flags,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ return 0;
+}
+
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags;
+
+ flags = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+ return -EINVAL;
+
+ return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+}
+
+static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx, bool is_asserted)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 r = ep->max_regions - 1;
+ u32 offset;
+ u16 status;
+ u8 msg_code;
+
+ if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
+ ep->irq_pci_fn != fn)) {
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
+ AXI_WRAPPER_NOR_MSG,
+ ep->irq_phys_addr, 0, 0);
+ ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
+ ep->irq_pci_fn = fn;
+ }
+
+ intx &= 3;
+ if (is_asserted) {
+ ep->irq_pending |= BIT(intx);
+ msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
+ } else {
+ ep->irq_pending &= ~BIT(intx);
+ msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
+ }
+
+ status = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+ status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+
+ if ((status != 0) ^ (ep->irq_pending != 0)) {
+ status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+ rockchip_pcie_write(rockchip, status,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+ }
+
+ offset =
+ ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
+ ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
+ writel(0, ep->irq_cpu_addr + offset);
+}
+
+static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx)
+{
+ u16 cmd;
+
+ cmd = rockchip_pcie_read(&ep->rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ return -EINVAL;
+
+ /*
+ * Should add some delay between toggling INTx per TRM vaguely saying
+ * it depends on some cycles of the AHB bus clock to function it. So
+ * add sufficient 1ms here.
+ */
+ rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
+ mdelay(1);
+ rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
+ return 0;
+}
+
+static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 interrupt_num)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags, mme, data, data_mask;
+ u8 msi_count;
+ u64 pci_addr, pci_addr_mask = 0xff;
+
+ /* Check MSI enable bit */
+ flags = rockchip_pcie_read(&ep->rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+ return -EINVAL;
+
+ /* Get MSI numbers from MME */
+ mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Set MSI private data */
+ data_mask = msi_count - 1;
+ data = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_DATA_64);
+ data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
+
+ /* Get MSI PCI address */
+ pci_addr = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ /* Set the outbound region if needed. */
+ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
+ ep->irq_pci_fn != fn)) {
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
+ AXI_WRAPPER_MEM_WRITE,
+ ep->irq_phys_addr,
+ pci_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
+ ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
+ ep->irq_pci_fn = fn;
+ }
+
+ writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+ return 0;
+}
+
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+ enum pci_epc_irq_type type,
+ u8 interrupt_num)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
+ case PCI_EPC_IRQ_MSI:
+ return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rockchip_pcie_ep_start(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct pci_epf *epf;
+ u32 cfg;
+
+ cfg = BIT(0);
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ cfg |= BIT(epf->func_no);
+
+ rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ pci_epf_linkup(epf);
+
+ return 0;
+}
+
+static const struct pci_epc_ops rockchip_pcie_epc_ops = {
+ .write_header = rockchip_pcie_ep_write_header,
+ .set_bar = rockchip_pcie_ep_set_bar,
+ .clear_bar = rockchip_pcie_ep_clear_bar,
+ .map_addr = rockchip_pcie_ep_map_addr,
+ .unmap_addr = rockchip_pcie_ep_unmap_addr,
+ .set_msi = rockchip_pcie_ep_set_msi,
+ .get_msi = rockchip_pcie_ep_get_msi,
+ .raise_irq = rockchip_pcie_ep_raise_irq,
+ .start = rockchip_pcie_ep_start,
+};
+
+static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
+ struct rockchip_pcie_ep *ep)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
+
+ err = of_property_read_u32(dev->of_node,
+ "rockchip,max-outbound-regions",
+ &ep->max_regions);
+ if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
+ ep->max_regions = MAX_REGION_LIMIT;
+
+ err = of_property_read_u8(dev->of_node, "max-functions",
+ &ep->epc->max_functions);
+ if (err < 0)
+ ep->epc->max_functions = 1;
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_pcie_ep_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie-ep"},
+ {},
+};
+
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie_ep *ep;
+ struct rockchip_pcie *rockchip;
+ struct pci_epc *epc;
+ size_t max_regions;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ rockchip = &ep->rockchip;
+ rockchip->is_rc = false;
+ rockchip->dev = dev;
+
+ epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ err = rockchip_pcie_parse_ep_dt(rockchip, ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_disable_clocks;
+
+ /* Establish the link automatically */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ max_regions = ep->max_regions;
+ ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr),
+ GFP_KERNEL);
+
+ if (!ep->ob_addr) {
+ err = -ENOMEM;
+ goto err_uninit_port;
+ }
+
+ /* Only enable function 0 by default */
+ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+ err = pci_epc_mem_init(epc, rockchip->mem_res->start,
+ resource_size(rockchip->mem_res));
+ if (err < 0) {
+ dev_err(dev, "failed to initialize the memory space\n");
+ goto err_uninit_port;
+ }
+
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+ SZ_128K);
+ if (!ep->irq_cpu_addr) {
+ dev_err(dev, "failed to reserve memory space for MSI\n");
+ err = -ENOMEM;
+ goto err_epc_mem_exit;
+ }
+
+ ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+
+ return 0;
+err_epc_mem_exit:
+ pci_epc_mem_exit(epc);
+err_uninit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_disable_clocks:
+ rockchip_pcie_disable_clocks(rockchip);
+ return err;
+}
+
+static struct platform_driver rockchip_pcie_ep_driver = {
+ .driver = {
+ .name = "rockchip-pcie-ep",
+ .of_match_table = rockchip_pcie_ep_of_match,
+ },
+ .probe = rockchip_pcie_ep_probe,
+};
+
+builtin_platform_driver(rockchip_pcie_ep_driver);
diff --git a/drivers/pci/host/pcie-rockchip-host.c b/drivers/pci/host/pcie-rockchip-host.c
new file mode 100644
index 0000000000000..1372d270764f9
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-host.c
@@ -0,0 +1,1142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include "../pci.h"
+#include "pcie-rockchip.h"
+
+static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+
+ /* Update Tx credit maximum update interval */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
+ val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
+ val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
+ rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
+}
+
+static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, int dev)
+{
+ /* access only one slot on each root port */
+ if (bus->number == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ /*
+ * do not read more than one device on the bus directly attached
+ * to RC's downstream side.
+ */
+ if (bus->primary == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ return 1;
+}
+
+static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+ u8 map;
+
+ if (rockchip->legacy_phy)
+ return GENMASK(MAX_LANE_NUM - 1, 0);
+
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
+ map = val & PCIE_CORE_LANE_MAP_MASK;
+
+ /* The link may be using a reverse-indexed mapping. */
+ if (val & PCIE_CORE_LANE_MAP_REVERSE)
+ map = bitrev8(map) >> 4;
+
+ return map;
+}
+
+static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 val)
+{
+ u32 mask, tmp, offset;
+ void __iomem *addr;
+
+ offset = where & ~0x3;
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+
+ /*
+ * N.B. This read/modify/write isn't safe in general because it can
+ * corrupt RW1C bits in adjacent registers. But the hardware
+ * doesn't support smaller writes.
+ */
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ if (!IS_ALIGNED(busdev, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (bus->parent->number == rockchip->root_bus_nr)
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
+ if (size == 4) {
+ *val = readl(rockchip->reg_base + busdev);
+ } else if (size == 2) {
+ *val = readw(rockchip->reg_base + busdev);
+ } else if (size == 1) {
+ *val = readb(rockchip->reg_base + busdev);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+ if (!IS_ALIGNED(busdev, size))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (bus->parent->number == rockchip->root_bus_nr)
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
+ if (size == 4)
+ writel(val, rockchip->reg_base + busdev);
+ else if (size == 2)
+ writew(val, rockchip->reg_base + busdev);
+ else if (size == 1)
+ writeb(val, rockchip->reg_base + busdev);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
+ val);
+}
+
+static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
+ val);
+}
+
+static struct pci_ops rockchip_pcie_ops = {
+ .read = rockchip_pcie_rd_conf,
+ .write = rockchip_pcie_wr_conf,
+};
+
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+ int curr;
+ u32 status, scale, power;
+
+ if (IS_ERR(rockchip->vpcie3v3))
+ return;
+
+ /*
+ * Set RC's captured slot power limit and scale if
+ * vpcie3v3 available. The default values are both zero
+ * which means the software should set these two according
+ * to the actual power supply.
+ */
+ curr = regulator_get_current_limit(rockchip->vpcie3v3);
+ if (curr <= 0)
+ return;
+
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+}
+
+/**
+ * rockchip_pcie_host_init_port - Initialize hardware
+ * @rockchip: PCIe port information
+ */
+static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err, i = MAX_LANE_NUM;
+ u32 status;
+
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ return err;
+
+ /* Fix the transmitted FTS count desired to exit from L0s. */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
+ status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+
+ rockchip_pcie_set_power_limit(rockchip);
+
+ /* Set RC's clock architecture as common clock */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKSTA_SLC << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Set RC's RCB to 128 */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RCB;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Enable Gen1 training */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+
+ /* 500ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+ status, PCIE_LINK_UP(status), 20,
+ 500 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(dev, "PCIe link training gen1 timeout!\n");
+ goto err_power_off_phy;
+ }
+
+ if (rockchip->link_gen == 2) {
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ status, PCIE_LINK_IS_GEN2(status), 20,
+ 500 * USEC_PER_MSEC);
+ if (err)
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ }
+
+ /* Check the final link width from negotiated lane counter from MGMT */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_SHIFT);
+ dev_dbg(dev, "current link width is x%d\n", status);
+
+ /* Power off unused lane(s) */
+ rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ if (!(rockchip->lanes_map & BIT(i))) {
+ dev_dbg(dev, "idling lane %d\n", i);
+ phy_power_off(rockchip->phys[i]);
+ }
+ }
+
+ rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ PCIE_CORE_CONFIG_VENDOR);
+ rockchip_pcie_write(rockchip,
+ PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+ PCIE_RC_CONFIG_RID_CCR);
+
+ /* Clear THP cap's next cap pointer to remove L1 substate cap */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+ status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
+ /* Clear L0s from RC's link cap */
+ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
+ status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+ status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+ status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+
+ return 0;
+err_power_off_phy:
+ while (i--)
+ phy_power_off(rockchip->phys[i]);
+ i = MAX_LANE_NUM;
+ while (i--)
+ phy_exit(rockchip->phys[i]);
+ return err;
+}
+
+static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 sub_reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LOCAL) {
+ dev_dbg(dev, "local interrupt received\n");
+ sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
+ if (sub_reg & PCIE_CORE_INT_PRFPE)
+ dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFPE)
+ dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_RRPE)
+ dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_PRFO)
+ dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFO)
+ dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_RT)
+ dev_dbg(dev, "replay timer timed out\n");
+
+ if (sub_reg & PCIE_CORE_INT_RTR)
+ dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
+
+ if (sub_reg & PCIE_CORE_INT_PE)
+ dev_dbg(dev, "phy error detected on receive side\n");
+
+ if (sub_reg & PCIE_CORE_INT_MTR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_UCR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_FCE)
+ dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+
+ if (sub_reg & PCIE_CORE_INT_CT)
+ dev_dbg(dev, "a request timed out waiting for completion\n");
+
+ if (sub_reg & PCIE_CORE_INT_UTC)
+ dev_dbg(dev, "unmapped TC error\n");
+
+ if (sub_reg & PCIE_CORE_INT_MMVC)
+ dev_dbg(dev, "MSI mask register changes\n");
+
+ rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
+ } else if (reg & PCIE_CLIENT_INT_PHY) {
+ dev_dbg(dev, "phy link changes\n");
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_clr_bw_int(rockchip);
+ }
+
+ rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
+ dev_dbg(dev, "legacy done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_MSG)
+ dev_dbg(dev, "message done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_HOT_RST)
+ dev_dbg(dev, "hot reset interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_DPA)
+ dev_dbg(dev, "dpa interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_FATAL_ERR)
+ dev_dbg(dev, "fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
+ dev_dbg(dev, "no fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_CORR_ERR)
+ dev_dbg(dev, "correctable error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_PHY)
+ dev_dbg(dev, "phy interrupt received\n");
+
+ rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
+ PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
+ PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
+ PCIE_CLIENT_INT_NFATAL_ERR |
+ PCIE_CLIENT_INT_CORR_ERR |
+ PCIE_CLIENT_INT_PHY),
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 hwirq;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
+
+ while (reg) {
+ hwirq = ffs(reg) - 1;
+ reg &= ~BIT(hwirq);
+
+ virq = irq_find_mapping(rockchip->irq_domain, hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+{
+ int irq, err;
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0) {
+ dev_err(dev, "missing sys IRQ resource\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+ IRQF_SHARED, "pcie-sys", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+ return err;
+ }
+
+ irq = platform_get_irq_byname(pdev, "legacy");
+ if (irq < 0) {
+ dev_err(dev, "missing legacy IRQ resource\n");
+ return irq;
+ }
+
+ irq_set_chained_handler_and_data(irq,
+ rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ irq = platform_get_irq_byname(pdev, "client");
+ if (irq < 0) {
+ dev_err(dev, "missing client IRQ resource\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+ IRQF_SHARED, "pcie-client", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe client IRQ\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * rockchip_pcie_parse_host_dt - Parse Device Tree
+ * @rockchip: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_setup_irq(rockchip);
+ if (err)
+ return err;
+
+ rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+ if (IS_ERR(rockchip->vpcie12v)) {
+ if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie12v regulator found\n");
+ }
+
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie3v3 regulator found\n");
+ }
+
+ rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8)) {
+ if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie1v8 regulator found\n");
+ }
+
+ rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9)) {
+ if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie0v9 regulator found\n");
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie12v)) {
+ err = regulator_enable(rockchip->vpcie12v);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie12v regulator\n");
+ goto err_out;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie3v3)) {
+ err = regulator_enable(rockchip->vpcie3v3);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie3v3 regulator\n");
+ goto err_disable_12v;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie1v8)) {
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
+ }
+ }
+
+ return 0;
+
+err_disable_1v8:
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+err_disable_3v3:
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+err_disable_12v:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+err_out:
+ return err;
+}
+
+static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
+ (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
+ PCIE_CORE_INT_MASK);
+
+ rockchip_pcie_enable_bw_int(rockchip);
+}
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct device_node *intc = of_get_next_child(dev->of_node, NULL);
+
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
+ int region_no, int type, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ob_addr_0;
+ u32 ob_addr_1;
+ u32 ob_desc_0;
+ u32 aw_offset;
+
+ if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < 8)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+ if (region_no == 0) {
+ if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+ if (region_no != 0) {
+ if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+
+ aw_offset = (region_no << OB_REG_SIZE_SHIFT);
+
+ ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
+ ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
+ ob_addr_1 = upper_addr;
+ ob_desc_0 = (1 << 23 | type);
+
+ rockchip_pcie_write(rockchip, ob_addr_0,
+ PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_addr_1,
+ PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_desc_0,
+ PCIE_CORE_OB_REGION_DESC0 + aw_offset);
+ rockchip_pcie_write(rockchip, 0,
+ PCIE_CORE_OB_REGION_DESC1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
+ int region_no, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ib_addr_0;
+ u32 ib_addr_1;
+ u32 aw_offset;
+
+ if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+
+ aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
+
+ ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
+ ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
+ ib_addr_1 = upper_addr;
+
+ rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int offset;
+ int err;
+ int reg_no;
+
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+
+ for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ rockchip->mem_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ return err;
+ }
+
+ offset = rockchip->mem_size >> 20;
+ for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ rockchip->io_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ /* assign message regions */
+ rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
+ AXI_WRAPPER_NOR_MSG,
+ 20 - 1, 0, 0);
+
+ rockchip->msg_bus_addr = rockchip->mem_bus_addr +
+ ((reg_no + offset) << 20);
+ return err;
+}
+
+static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
+{
+ u32 value;
+ int err;
+
+ /* send PME_TURN_OFF message */
+ writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+
+ /* read LTSSM and wait for falling into L2 link state */
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
+ value, PCIE_LINK_IS_L2(value), 20,
+ jiffies_to_usecs(5 * HZ));
+ if (err) {
+ dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
+{
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ int ret;
+
+ /* disable core and cli int since we don't need to ack PME_ACK */
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
+ PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
+
+ ret = rockchip_pcie_wait_l2(rockchip);
+ if (ret) {
+ rockchip_pcie_enable_interrupts(rockchip);
+ return ret;
+ }
+
+ rockchip_pcie_deinit_phys(rockchip);
+
+ rockchip_pcie_disable_clocks(rockchip);
+
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
+ return ret;
+}
+
+static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
+{
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ goto err_disable_0v9;
+
+ err = rockchip_pcie_host_init_port(rockchip);
+ if (err)
+ goto err_pcie_resume;
+
+ err = rockchip_pcie_cfg_atu(rockchip);
+ if (err)
+ goto err_err_deinit_port;
+
+ /* Need this to enter L1 again */
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ return 0;
+
+err_err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_pcie_resume:
+ rockchip_pcie_disable_clocks(rockchip);
+err_disable_0v9:
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+ return err;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct rockchip_pcie *rockchip;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *win;
+ resource_size_t io_base;
+ struct resource *mem;
+ struct resource *io;
+ int err;
+
+ LIST_HEAD(res);
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
+ if (!bridge)
+ return -ENOMEM;
+
+ rockchip = pci_host_bridge_priv(bridge);
+
+ platform_set_drvdata(pdev, rockchip);
+ rockchip->dev = dev;
+ rockchip->is_rc = true;
+
+ err = rockchip_pcie_parse_host_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_set_vpcie(rockchip);
+ if (err) {
+ dev_err(dev, "failed to set vpcie regulator\n");
+ goto err_set_vpcie;
+ }
+
+ err = rockchip_pcie_host_init_port(rockchip);
+ if (err)
+ goto err_vpcie;
+
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ err = rockchip_pcie_init_irq_domain(rockchip);
+ if (err < 0)
+ goto err_deinit_port;
+
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &res, &io_base);
+ if (err)
+ goto err_remove_irq_domain;
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto err_free_res;
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "I/O";
+ rockchip->io_size = resource_size(io);
+ rockchip->io_bus_addr = io->start - win->offset;
+ err = pci_remap_iospace(io, io_base);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, io);
+ continue;
+ }
+ rockchip->io = io;
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ mem->name = "MEM";
+ rockchip->mem_size = resource_size(mem);
+ rockchip->mem_bus_addr = mem->start - win->offset;
+ break;
+ case IORESOURCE_BUS:
+ rockchip->root_bus_nr = win->res->start;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ err = rockchip_pcie_cfg_atu(rockchip);
+ if (err)
+ goto err_unmap_iospace;
+
+ rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
+ if (!rockchip->msg_region) {
+ err = -ENOMEM;
+ goto err_unmap_iospace;
+ }
+
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = rockchip;
+ bridge->busnr = 0;
+ bridge->ops = &rockchip_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err < 0)
+ goto err_unmap_iospace;
+
+ bus = bridge->bus;
+
+ rockchip->root_bus = bus;
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+ return 0;
+
+err_unmap_iospace:
+ pci_unmap_iospace(rockchip->io);
+err_free_res:
+ pci_free_resource_list(&res);
+err_remove_irq_domain:
+ irq_domain_remove(rockchip->irq_domain);
+err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_vpcie:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+err_set_vpcie:
+ rockchip_pcie_disable_clocks(rockchip);
+ return err;
+}
+
+static int rockchip_pcie_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+
+ pci_stop_root_bus(rockchip->root_bus);
+ pci_remove_root_bus(rockchip->root_bus);
+ pci_unmap_iospace(rockchip->io);
+ irq_domain_remove(rockchip->irq_domain);
+
+ rockchip_pcie_deinit_phys(rockchip);
+
+ rockchip_pcie_disable_clocks(rockchip);
+
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rockchip_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+ rockchip_pcie_resume_noirq)
+};
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .pm = &rockchip_pcie_pm_ops,
+ },
+ .probe = rockchip_pcie_probe,
+ .remove = rockchip_pcie_remove,
+};
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Rockchip Inc");
+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index f1e8f97ea1fb1..c53d1322a3d6c 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -11,535 +11,154 @@
* ARM PCI Host generic driver.
*/
-#include <linux/bitrev.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/iopoll.h>
-#include <linux/irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_pci.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <linux/regmap.h>
-/*
- * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
- * bits. This allows atomic updates of the register without locking.
- */
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
-
-#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
-#define MAX_LANE_NUM 4
-
-#define PCIE_CLIENT_BASE 0x0
-#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
-#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
-#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
-#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
-#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
-#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
-#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
-#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
-#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
-#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
-#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
-#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
-#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
-#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
-#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
-#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
-#define PCIE_CLIENT_INTR_SHIFT 5
-#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
-#define PCIE_CLIENT_INT_MSG BIT(14)
-#define PCIE_CLIENT_INT_HOT_RST BIT(13)
-#define PCIE_CLIENT_INT_DPA BIT(12)
-#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
-#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
-#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
-#define PCIE_CLIENT_INT_INTD BIT(8)
-#define PCIE_CLIENT_INT_INTC BIT(7)
-#define PCIE_CLIENT_INT_INTB BIT(6)
-#define PCIE_CLIENT_INT_INTA BIT(5)
-#define PCIE_CLIENT_INT_LOCAL BIT(4)
-#define PCIE_CLIENT_INT_UDMA BIT(3)
-#define PCIE_CLIENT_INT_PHY BIT(2)
-#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
-#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
-
-#define PCIE_CLIENT_INT_LEGACY \
- (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
- PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
-
-#define PCIE_CLIENT_INT_CLI \
- (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
- PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
- PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
- PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
- PCIE_CLIENT_INT_PHY)
-
-#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
-#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
-#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
-#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
-#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
-#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
-#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
-#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
-#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
-#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
-#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
- (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
-#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
-#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
-#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
-#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
-#define PCIE_CORE_INT_PRFPE BIT(0)
-#define PCIE_CORE_INT_CRFPE BIT(1)
-#define PCIE_CORE_INT_RRPE BIT(2)
-#define PCIE_CORE_INT_PRFO BIT(3)
-#define PCIE_CORE_INT_CRFO BIT(4)
-#define PCIE_CORE_INT_RT BIT(5)
-#define PCIE_CORE_INT_RTR BIT(6)
-#define PCIE_CORE_INT_PE BIT(7)
-#define PCIE_CORE_INT_MTR BIT(8)
-#define PCIE_CORE_INT_UCR BIT(9)
-#define PCIE_CORE_INT_FCE BIT(10)
-#define PCIE_CORE_INT_CT BIT(11)
-#define PCIE_CORE_INT_UTC BIT(18)
-#define PCIE_CORE_INT_MMVC BIT(19)
-#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
-#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
-#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
-
-#define PCIE_CORE_INT \
- (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
- PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
- PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
- PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
- PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
- PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
- PCIE_CORE_INT_MMVC)
-
-#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
-#define PCIE_RC_CONFIG_BASE 0xa00000
-#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
-#define PCIE_RC_CONFIG_SCC_SHIFT 16
-#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
-#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
-#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
-#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
-#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
-#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
-#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
-#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
-#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
-#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
-#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
-#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
-#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
-
-#define PCIE_CORE_AXI_CONF_BASE 0xc00000
-#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
-#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
-#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
-#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
-#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
-#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
-
-#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
-#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
-#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
-#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
-#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
-
-/* Size of one AXI Region (not Region 0) */
-#define AXI_REGION_SIZE BIT(20)
-/* Size of Region 0, equal to sum of sizes of other regions */
-#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
-#define OB_REG_SIZE_SHIFT 5
-#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
-#define AXI_WRAPPER_IO_WRITE 0x6
-#define AXI_WRAPPER_MEM_WRITE 0x2
-#define AXI_WRAPPER_TYPE0_CFG 0xa
-#define AXI_WRAPPER_TYPE1_CFG 0xb
-#define AXI_WRAPPER_NOR_MSG 0xc
-
-#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
-#define MIN_AXI_ADDR_BITS_PASSED 8
-#define PCIE_RC_SEND_PME_OFF 0x11960
-#define ROCKCHIP_VENDOR_ID 0x1d87
-#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
-#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
-#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
-#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
-#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
- (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
- PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
-#define PCIE_LINK_IS_L2(x) \
- (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
-#define PCIE_LINK_UP(x) \
- (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
-#define PCIE_LINK_IS_GEN2(x) \
- (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
-
-#define RC_REGION_0_ADDR_TRANS_H 0x00000000
-#define RC_REGION_0_ADDR_TRANS_L 0x00000000
-#define RC_REGION_0_PASS_BITS (25 - 1)
-#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
-#define MAX_AXI_WRAPPER_REGION_NUM 33
-
-struct rockchip_pcie {
- void __iomem *reg_base; /* DT axi-base */
- void __iomem *apb_base; /* DT apb-base */
- bool legacy_phy;
- struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
- struct regulator *vpcie12v; /* 12V power supply */
- struct regulator *vpcie3v3; /* 3.3V power supply */
- struct regulator *vpcie1v8; /* 1.8V power supply */
- struct regulator *vpcie0v9; /* 0.9V power supply */
- struct gpio_desc *ep_gpio;
- u32 lanes;
- u8 lanes_map;
- u8 root_bus_nr;
- int link_gen;
- struct device *dev;
- struct irq_domain *irq_domain;
- int offset;
- struct pci_bus *root_bus;
- struct resource *io;
- phys_addr_t io_bus_addr;
- u32 io_size;
- void __iomem *msg_region;
- u32 mem_size;
- phys_addr_t msg_bus_addr;
- phys_addr_t mem_bus_addr;
-};
-
-static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
-{
- return readl(rockchip->apb_base + reg);
-}
-
-static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
- u32 reg)
-{
- writel(val, rockchip->apb_base + reg);
-}
-
-static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
-{
- u32 status;
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-}
-
-static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
-{
- u32 status;
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-}
-
-static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
-{
- u32 val;
-
- /* Update Tx credit maximum update interval */
- val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
- val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
- val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
- rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
-}
+#include "../pci.h"
+#include "pcie-rockchip.h"
-static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, int dev)
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
{
- /* access only one slot on each root port */
- if (bus->number == rockchip->root_bus_nr && dev > 0)
- return 0;
-
- /*
- * do not read more than one device on the bus directly attached
- * to RC's downstream side.
- */
- if (bus->primary == rockchip->root_bus_nr && dev > 0)
- return 0;
-
- return 1;
-}
-
-static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
-{
- u32 val;
- u8 map;
-
- if (rockchip->legacy_phy)
- return GENMASK(MAX_LANE_NUM - 1, 0);
-
- val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
- map = val & PCIE_CORE_LANE_MAP_MASK;
-
- /* The link may be using a reverse-indexed mapping. */
- if (val & PCIE_CORE_LANE_MAP_REVERSE)
- map = bitrev8(map) >> 4;
-
- return map;
-}
-
-static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
- int where, int size, u32 *val)
-{
- void __iomem *addr;
-
- addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
-
- if (!IS_ALIGNED((uintptr_t)addr, size)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
+ struct resource *regs;
+ int err;
- if (size == 4) {
- *val = readl(addr);
- } else if (size == 2) {
- *val = readw(addr);
- } else if (size == 1) {
- *val = readb(addr);
+ if (rockchip->is_rc) {
+ regs = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "axi-base");
+ rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(rockchip->reg_base))
+ return PTR_ERR(rockchip->reg_base);
} else {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->mem_res =
+ platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mem-base");
+ if (!rockchip->mem_res)
+ return -EINVAL;
}
- return PCIBIOS_SUCCESSFUL;
-}
-static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
- int where, int size, u32 val)
-{
- u32 mask, tmp, offset;
- void __iomem *addr;
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "apb-base");
+ rockchip->apb_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
- offset = where & ~0x3;
- addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
- if (size == 4) {
- writel(val, addr);
- return PCIBIOS_SUCCESSFUL;
+ rockchip->lanes = 1;
+ err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
+ if (!err && (rockchip->lanes == 0 ||
+ rockchip->lanes == 3 ||
+ rockchip->lanes > 4)) {
+ dev_warn(dev, "invalid num-lanes, default to use one lane\n");
+ rockchip->lanes = 1;
}
- mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
-
- /*
- * N.B. This read/modify/write isn't safe in general because it can
- * corrupt RW1C bits in adjacent registers. But the hardware
- * doesn't support smaller writes.
- */
- tmp = readl(addr) & mask;
- tmp |= val << ((where & 0x3) * 8);
- writel(tmp, addr);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static void rockchip_pcie_cfg_configuration_accesses(
- struct rockchip_pcie *rockchip, u32 type)
-{
- u32 ob_desc_0;
-
- /* Configuration Accesses for region 0 */
- rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
-
- rockchip_pcie_write(rockchip,
- (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
- PCIE_CORE_OB_REGION_ADDR0);
- rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
- PCIE_CORE_OB_REGION_ADDR1);
- ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
- ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
- ob_desc_0 |= (type | (0x1 << 23));
- rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
- rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
-}
-
-static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, u32 devfn,
- int where, int size, u32 *val)
-{
- u32 busdev;
-
- busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
+ rockchip->link_gen = of_pci_get_max_link_speed(node);
+ if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+ rockchip->link_gen = 2;
- if (!IS_ALIGNED(busdev, size)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(rockchip->core_rst)) {
+ if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing core reset property in node\n");
+ return PTR_ERR(rockchip->core_rst);
}
- if (bus->parent->number == rockchip->root_bus_nr)
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
- else
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE1_CFG);
-
- if (size == 4) {
- *val = readl(rockchip->reg_base + busdev);
- } else if (size == 2) {
- *val = readw(rockchip->reg_base + busdev);
- } else if (size == 1) {
- *val = readb(rockchip->reg_base + busdev);
- } else {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
+ if (IS_ERR(rockchip->mgmt_rst)) {
+ if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_rst);
}
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
-{
- u32 busdev;
-
- busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
- if (!IS_ALIGNED(busdev, size))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (bus->parent->number == rockchip->root_bus_nr)
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
- else
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE1_CFG);
-
- if (size == 4)
- writel(val, rockchip->reg_base + busdev);
- else if (size == 2)
- writew(val, rockchip->reg_base + busdev);
- else if (size == 1)
- writeb(val, rockchip->reg_base + busdev);
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- return PCIBIOS_SUCCESSFUL;
-}
-static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
- int size, u32 *val)
-{
- struct rockchip_pcie *rockchip = bus->sysdata;
-
- if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
+ rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+ "mgmt-sticky");
+ if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+ if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt-sticky reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_sticky_rst);
}
- if (bus->number == rockchip->root_bus_nr)
- return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
-
- return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val);
-}
+ rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(rockchip->pipe_rst)) {
+ if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pipe reset property in node\n");
+ return PTR_ERR(rockchip->pipe_rst);
+ }
-static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
-{
- struct rockchip_pcie *rockchip = bus->sysdata;
+ rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
+ if (IS_ERR(rockchip->pm_rst)) {
+ if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pm reset property in node\n");
+ return PTR_ERR(rockchip->pm_rst);
+ }
- if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
+ rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
+ if (IS_ERR(rockchip->pclk_rst)) {
+ if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pclk reset property in node\n");
+ return PTR_ERR(rockchip->pclk_rst);
+ }
- if (bus->number == rockchip->root_bus_nr)
- return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+ rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_rst)) {
+ if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing aclk reset property in node\n");
+ return PTR_ERR(rockchip->aclk_rst);
+ }
- return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val);
-}
+ if (rockchip->is_rc) {
+ rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->ep_gpio)) {
+ dev_err(dev, "missing ep-gpios property in node\n");
+ return PTR_ERR(rockchip->ep_gpio);
+ }
+ }
-static struct pci_ops rockchip_pcie_ops = {
- .read = rockchip_pcie_rd_conf,
- .write = rockchip_pcie_wr_conf,
-};
+ rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_pcie)) {
+ dev_err(dev, "aclk clock not found\n");
+ return PTR_ERR(rockchip->aclk_pcie);
+ }
-static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
-{
- int curr;
- u32 status, scale, power;
+ rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
+ if (IS_ERR(rockchip->aclk_perf_pcie)) {
+ dev_err(dev, "aclk_perf clock not found\n");
+ return PTR_ERR(rockchip->aclk_perf_pcie);
+ }
- if (IS_ERR(rockchip->vpcie3v3))
- return;
+ rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
+ if (IS_ERR(rockchip->hclk_pcie)) {
+ dev_err(dev, "hclk clock not found\n");
+ return PTR_ERR(rockchip->hclk_pcie);
+ }
- /*
- * Set RC's captured slot power limit and scale if
- * vpcie3v3 available. The default values are both zero
- * which means the software should set these two according
- * to the actual power supply.
- */
- curr = regulator_get_current_limit(rockchip->vpcie3v3);
- if (curr <= 0)
- return;
-
- scale = 3; /* 0.001x */
- curr = curr / 1000; /* convert to mA */
- power = (curr * 3300) / 1000; /* milliwatt */
- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
- if (!scale) {
- dev_warn(rockchip->dev, "invalid power supply\n");
- return;
- }
- scale--;
- power = power / 10;
+ rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
+ if (IS_ERR(rockchip->clk_pcie_pm)) {
+ dev_err(dev, "pm clock not found\n");
+ return PTR_ERR(rockchip->clk_pcie_pm);
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
-/**
- * rockchip_pcie_init_port - Initialize hardware
- * @rockchip: PCIe port information
- */
-static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err, i;
- u32 status;
-
- gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+ u32 regs;
err = reset_control_assert(rockchip->aclk_rst);
if (err) {
@@ -618,13 +237,15 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
PCIE_CLIENT_CONFIG);
- rockchip_pcie_write(rockchip,
- PCIE_CLIENT_CONF_ENABLE |
- PCIE_CLIENT_LINK_TRAIN_ENABLE |
- PCIE_CLIENT_ARI_ENABLE |
- PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
- PCIE_CLIENT_MODE_RC,
- PCIE_CLIENT_CONFIG);
+ regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+ PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
+
+ if (rockchip->is_rc)
+ regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+ else
+ regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
+
+ rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG);
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_power_on(rockchip->phys[i]);
@@ -662,93 +283,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /* Fix the transmitted FTS count desired to exit from L0s. */
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
- status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
- (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
-
- rockchip_pcie_set_power_limit(rockchip);
-
- /* Set RC's clock architecture as common clock */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKSTA_SLC << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- /* Set RC's RCB to 128 */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKCTL_RCB;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- /* Enable Gen1 training */
- rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
- PCIE_CLIENT_CONFIG);
-
- gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
-
- /* 500ms timeout value should be enough for Gen1/2 training */
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
- status, PCIE_LINK_UP(status), 20,
- 500 * USEC_PER_MSEC);
- if (err) {
- dev_err(dev, "PCIe link training gen1 timeout!\n");
- goto err_power_off_phy;
- }
-
- if (rockchip->link_gen == 2) {
- /*
- * Enable retrain for gen2. This should be configured only after
- * gen1 finished.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKCTL_RL;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
- status, PCIE_LINK_IS_GEN2(status), 20,
- 500 * USEC_PER_MSEC);
- if (err)
- dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
- }
-
- /* Check the final link width from negotiated lane counter from MGMT */
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
- PCIE_CORE_PL_CONF_LANE_SHIFT);
- dev_dbg(dev, "current link width is x%d\n", status);
-
- /* Power off unused lane(s) */
- rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
- for (i = 0; i < MAX_LANE_NUM; i++) {
- if (!(rockchip->lanes_map & BIT(i))) {
- dev_dbg(dev, "idling lane %d\n", i);
- phy_power_off(rockchip->phys[i]);
- }
- }
-
- rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
- PCIE_CORE_CONFIG_VENDOR);
- rockchip_pcie_write(rockchip,
- PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
- PCIE_RC_CONFIG_RID_CCR);
-
- /* Clear THP cap's next cap pointer to remove L1 substate cap */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
- status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
-
- /* Clear L0s from RC's link cap */
- if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
- status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
- }
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
- status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
- status |= PCIE_RC_CONFIG_DCSR_MPS_256;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
-
return 0;
err_power_off_phy:
while (i--)
@@ -759,156 +293,9 @@ err_exit_phy:
phy_exit(rockchip->phys[i]);
return err;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_init_port);
-static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
-{
- int i;
-
- for (i = 0; i < MAX_LANE_NUM; i++) {
- /* inactive lanes are already powered off */
- if (rockchip->lanes_map & BIT(i))
- phy_power_off(rockchip->phys[i]);
- phy_exit(rockchip->phys[i]);
- }
-}
-
-static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
-{
- struct rockchip_pcie *rockchip = arg;
- struct device *dev = rockchip->dev;
- u32 reg;
- u32 sub_reg;
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- if (reg & PCIE_CLIENT_INT_LOCAL) {
- dev_dbg(dev, "local interrupt received\n");
- sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
- if (sub_reg & PCIE_CORE_INT_PRFPE)
- dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_CRFPE)
- dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_RRPE)
- dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_PRFO)
- dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
-
- if (sub_reg & PCIE_CORE_INT_CRFO)
- dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
-
- if (sub_reg & PCIE_CORE_INT_RT)
- dev_dbg(dev, "replay timer timed out\n");
-
- if (sub_reg & PCIE_CORE_INT_RTR)
- dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
-
- if (sub_reg & PCIE_CORE_INT_PE)
- dev_dbg(dev, "phy error detected on receive side\n");
-
- if (sub_reg & PCIE_CORE_INT_MTR)
- dev_dbg(dev, "malformed TLP received from the link\n");
-
- if (sub_reg & PCIE_CORE_INT_UCR)
- dev_dbg(dev, "malformed TLP received from the link\n");
-
- if (sub_reg & PCIE_CORE_INT_FCE)
- dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
-
- if (sub_reg & PCIE_CORE_INT_CT)
- dev_dbg(dev, "a request timed out waiting for completion\n");
-
- if (sub_reg & PCIE_CORE_INT_UTC)
- dev_dbg(dev, "unmapped TC error\n");
-
- if (sub_reg & PCIE_CORE_INT_MMVC)
- dev_dbg(dev, "MSI mask register changes\n");
-
- rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
- } else if (reg & PCIE_CLIENT_INT_PHY) {
- dev_dbg(dev, "phy link changes\n");
- rockchip_pcie_update_txcredit_mui(rockchip);
- rockchip_pcie_clr_bw_int(rockchip);
- }
-
- rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
- PCIE_CLIENT_INT_STATUS);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
-{
- struct rockchip_pcie *rockchip = arg;
- struct device *dev = rockchip->dev;
- u32 reg;
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
- dev_dbg(dev, "legacy done interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_MSG)
- dev_dbg(dev, "message done interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_HOT_RST)
- dev_dbg(dev, "hot reset interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_DPA)
- dev_dbg(dev, "dpa interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_FATAL_ERR)
- dev_dbg(dev, "fatal error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
- dev_dbg(dev, "no fatal error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_CORR_ERR)
- dev_dbg(dev, "correctable error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_PHY)
- dev_dbg(dev, "phy interrupt received\n");
-
- rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
- PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
- PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
- PCIE_CLIENT_INT_NFATAL_ERR |
- PCIE_CLIENT_INT_CORR_ERR |
- PCIE_CLIENT_INT_PHY),
- PCIE_CLIENT_INT_STATUS);
-
- return IRQ_HANDLED;
-}
-
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
- struct device *dev = rockchip->dev;
- u32 reg;
- u32 hwirq;
- u32 virq;
-
- chained_irq_enter(chip, desc);
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
-
- while (reg) {
- hwirq = ffs(reg) - 1;
- reg &= ~BIT(hwirq);
-
- virq = irq_find_mapping(rockchip->irq_domain, hwirq);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
struct phy *phy;
@@ -948,452 +335,22 @@ static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys);
-static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
{
- int irq, err;
- struct device *dev = rockchip->dev;
- struct platform_device *pdev = to_platform_device(dev);
-
- irq = platform_get_irq_byname(pdev, "sys");
- if (irq < 0) {
- dev_err(dev, "missing sys IRQ resource\n");
- return irq;
- }
-
- err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
- IRQF_SHARED, "pcie-sys", rockchip);
- if (err) {
- dev_err(dev, "failed to request PCIe subsystem IRQ\n");
- return err;
- }
-
- irq = platform_get_irq_byname(pdev, "legacy");
- if (irq < 0) {
- dev_err(dev, "missing legacy IRQ resource\n");
- return irq;
- }
-
- irq_set_chained_handler_and_data(irq,
- rockchip_pcie_legacy_int_handler,
- rockchip);
-
- irq = platform_get_irq_byname(pdev, "client");
- if (irq < 0) {
- dev_err(dev, "missing client IRQ resource\n");
- return irq;
- }
-
- err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
- IRQF_SHARED, "pcie-client", rockchip);
- if (err) {
- dev_err(dev, "failed to request PCIe client IRQ\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * rockchip_pcie_parse_dt - Parse Device Tree
- * @rockchip: PCIe port information
- *
- * Return: '0' on success and error value on failure
- */
-static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node;
- struct resource *regs;
- int err;
-
- regs = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- "axi-base");
- rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
- if (IS_ERR(rockchip->reg_base))
- return PTR_ERR(rockchip->reg_base);
-
- regs = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- "apb-base");
- rockchip->apb_base = devm_ioremap_resource(dev, regs);
- if (IS_ERR(rockchip->apb_base))
- return PTR_ERR(rockchip->apb_base);
-
- err = rockchip_pcie_get_phys(rockchip);
- if (err)
- return err;
-
- rockchip->lanes = 1;
- err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
- if (!err && (rockchip->lanes == 0 ||
- rockchip->lanes == 3 ||
- rockchip->lanes > 4)) {
- dev_warn(dev, "invalid num-lanes, default to use one lane\n");
- rockchip->lanes = 1;
- }
-
- rockchip->link_gen = of_pci_get_max_link_speed(node);
- if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
- rockchip->link_gen = 2;
-
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
-
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
-
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
-
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
-
- rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
- if (IS_ERR(rockchip->ep_gpio)) {
- dev_err(dev, "missing ep-gpios property in node\n");
- return PTR_ERR(rockchip->ep_gpio);
- }
-
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
-
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
-
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
-
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
-
- err = rockchip_pcie_setup_irq(rockchip);
- if (err)
- return err;
-
- rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
- if (IS_ERR(rockchip->vpcie12v)) {
- if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie12v regulator found\n");
- }
-
- rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
- if (IS_ERR(rockchip->vpcie3v3)) {
- if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie3v3 regulator found\n");
- }
-
- rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
- if (IS_ERR(rockchip->vpcie1v8)) {
- if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie1v8 regulator found\n");
- }
-
- rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
- if (IS_ERR(rockchip->vpcie0v9)) {
- if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie0v9 regulator found\n");
- }
-
- return 0;
-}
-
-static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- int err;
-
- if (!IS_ERR(rockchip->vpcie12v)) {
- err = regulator_enable(rockchip->vpcie12v);
- if (err) {
- dev_err(dev, "fail to enable vpcie12v regulator\n");
- goto err_out;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie3v3)) {
- err = regulator_enable(rockchip->vpcie3v3);
- if (err) {
- dev_err(dev, "fail to enable vpcie3v3 regulator\n");
- goto err_disable_12v;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie1v8)) {
- err = regulator_enable(rockchip->vpcie1v8);
- if (err) {
- dev_err(dev, "fail to enable vpcie1v8 regulator\n");
- goto err_disable_3v3;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- goto err_disable_1v8;
- }
- }
-
- return 0;
-
-err_disable_1v8:
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
-err_disable_3v3:
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
-err_disable_12v:
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
-err_out:
- return err;
-}
-
-static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
-{
- rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
- (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
- rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
- PCIE_CORE_INT_MASK);
-
- rockchip_pcie_enable_bw_int(rockchip);
-}
-
-static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
- .map = rockchip_pcie_intx_map,
-};
-
-static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- struct device_node *intc = of_get_next_child(dev->of_node, NULL);
-
- if (!intc) {
- dev_err(dev, "missing child interrupt-controller node\n");
- return -EINVAL;
- }
-
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
- if (!rockchip->irq_domain) {
- dev_err(dev, "failed to get a INTx IRQ domain\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
- int region_no, int type, u8 num_pass_bits,
- u32 lower_addr, u32 upper_addr)
-{
- u32 ob_addr_0;
- u32 ob_addr_1;
- u32 ob_desc_0;
- u32 aw_offset;
-
- if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
- return -EINVAL;
- if (num_pass_bits + 1 < 8)
- return -EINVAL;
- if (num_pass_bits > 63)
- return -EINVAL;
- if (region_no == 0) {
- if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
- return -EINVAL;
- }
- if (region_no != 0) {
- if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
- return -EINVAL;
- }
-
- aw_offset = (region_no << OB_REG_SIZE_SHIFT);
-
- ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
- ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
- ob_addr_1 = upper_addr;
- ob_desc_0 = (1 << 23 | type);
-
- rockchip_pcie_write(rockchip, ob_addr_0,
- PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
- rockchip_pcie_write(rockchip, ob_addr_1,
- PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
- rockchip_pcie_write(rockchip, ob_desc_0,
- PCIE_CORE_OB_REGION_DESC0 + aw_offset);
- rockchip_pcie_write(rockchip, 0,
- PCIE_CORE_OB_REGION_DESC1 + aw_offset);
-
- return 0;
-}
-
-static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
- int region_no, u8 num_pass_bits,
- u32 lower_addr, u32 upper_addr)
-{
- u32 ib_addr_0;
- u32 ib_addr_1;
- u32 aw_offset;
-
- if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
- return -EINVAL;
- if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
- return -EINVAL;
- if (num_pass_bits > 63)
- return -EINVAL;
-
- aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
-
- ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
- ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
- ib_addr_1 = upper_addr;
-
- rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
- rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
-
- return 0;
-}
-
-static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- int offset;
- int err;
- int reg_no;
-
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
-
- for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
- AXI_WRAPPER_MEM_WRITE,
- 20 - 1,
- rockchip->mem_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC mem outbound ATU failed\n");
- return err;
- }
- }
-
- err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
- if (err) {
- dev_err(dev, "program RC mem inbound ATU failed\n");
- return err;
- }
-
- offset = rockchip->mem_size >> 20;
- for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip,
- reg_no + 1 + offset,
- AXI_WRAPPER_IO_WRITE,
- 20 - 1,
- rockchip->io_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC io outbound ATU failed\n");
- return err;
- }
- }
-
- /* assign message regions */
- rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
- AXI_WRAPPER_NOR_MSG,
- 20 - 1, 0, 0);
-
- rockchip->msg_bus_addr = rockchip->mem_bus_addr +
- ((reg_no + offset) << 20);
- return err;
-}
-
-static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
-{
- u32 value;
- int err;
-
- /* send PME_TURN_OFF message */
- writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+ int i;
- /* read LTSSM and wait for falling into L2 link state */
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
- value, PCIE_LINK_IS_L2(value), 20,
- jiffies_to_usecs(5 * HZ));
- if (err) {
- dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
- return err;
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ /* inactive lanes are already powered off */
+ if (rockchip->lanes_map & BIT(i))
+ phy_power_off(rockchip->phys[i]);
+ phy_exit(rockchip->phys[i]);
}
-
- return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys);
-static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err;
@@ -1432,8 +389,9 @@ err_aclk_perf_pcie:
clk_disable_unprepare(rockchip->aclk_pcie);
return err;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-static void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(void *data)
{
struct rockchip_pcie *rockchip = data;
@@ -1442,267 +400,25 @@ static void rockchip_pcie_disable_clocks(void *data)
clk_disable_unprepare(rockchip->aclk_perf_pcie);
clk_disable_unprepare(rockchip->aclk_pcie);
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
-static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
-{
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
- int ret;
-
- /* disable core and cli int since we don't need to ack PME_ACK */
- rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
- PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
- rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
-
- ret = rockchip_pcie_wait_l2(rockchip);
- if (ret) {
- rockchip_pcie_enable_interrupts(rockchip);
- return ret;
- }
-
- rockchip_pcie_deinit_phys(rockchip);
-
- rockchip_pcie_disable_clocks(rockchip);
-
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
-
- return ret;
-}
-
-static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
-{
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
- int err;
-
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- return err;
- }
- }
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- goto err_disable_0v9;
-
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_pcie_resume;
-
- err = rockchip_pcie_cfg_atu(rockchip);
- if (err)
- goto err_err_deinit_port;
-
- /* Need this to enter L1 again */
- rockchip_pcie_update_txcredit_mui(rockchip);
- rockchip_pcie_enable_interrupts(rockchip);
-
- return 0;
-
-err_err_deinit_port:
- rockchip_pcie_deinit_phys(rockchip);
-err_pcie_resume:
- rockchip_pcie_disable_clocks(rockchip);
-err_disable_0v9:
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
- return err;
-}
-
-static int rockchip_pcie_probe(struct platform_device *pdev)
-{
- struct rockchip_pcie *rockchip;
- struct device *dev = &pdev->dev;
- struct pci_bus *bus, *child;
- struct pci_host_bridge *bridge;
- struct resource_entry *win;
- resource_size_t io_base;
- struct resource *mem;
- struct resource *io;
- int err;
-
- LIST_HEAD(res);
-
- if (!dev->of_node)
- return -ENODEV;
-
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
- if (!bridge)
- return -ENOMEM;
-
- rockchip = pci_host_bridge_priv(bridge);
-
- platform_set_drvdata(pdev, rockchip);
- rockchip->dev = dev;
-
- err = rockchip_pcie_parse_dt(rockchip);
- if (err)
- return err;
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- return err;
-
- err = rockchip_pcie_set_vpcie(rockchip);
- if (err) {
- dev_err(dev, "failed to set vpcie regulator\n");
- goto err_set_vpcie;
- }
-
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_vpcie;
-
- rockchip_pcie_enable_interrupts(rockchip);
-
- err = rockchip_pcie_init_irq_domain(rockchip);
- if (err < 0)
- goto err_deinit_port;
-
- err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
- &res, &io_base);
- if (err)
- goto err_remove_irq_domain;
-
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto err_free_res;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "I/O";
- rockchip->io_size = resource_size(io);
- rockchip->io_bus_addr = io->start - win->offset;
- err = pci_remap_iospace(io, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, io);
- continue;
- }
- rockchip->io = io;
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "MEM";
- rockchip->mem_size = resource_size(mem);
- rockchip->mem_bus_addr = mem->start - win->offset;
- break;
- case IORESOURCE_BUS:
- rockchip->root_bus_nr = win->res->start;
- break;
- default:
- continue;
- }
- }
-
- err = rockchip_pcie_cfg_atu(rockchip);
- if (err)
- goto err_unmap_iospace;
-
- rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
- if (!rockchip->msg_region) {
- err = -ENOMEM;
- goto err_unmap_iospace;
- }
-
- list_splice_init(&res, &bridge->windows);
- bridge->dev.parent = dev;
- bridge->sysdata = rockchip;
- bridge->busnr = 0;
- bridge->ops = &rockchip_pcie_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
-
- err = pci_scan_root_bus_bridge(bridge);
- if (err < 0)
- goto err_unmap_iospace;
-
- bus = bridge->bus;
-
- rockchip->root_bus = bus;
-
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(bus);
- return 0;
-
-err_unmap_iospace:
- pci_unmap_iospace(rockchip->io);
-err_free_res:
- pci_free_resource_list(&res);
-err_remove_irq_domain:
- irq_domain_remove(rockchip->irq_domain);
-err_deinit_port:
- rockchip_pcie_deinit_phys(rockchip);
-err_vpcie:
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
-err_set_vpcie:
- rockchip_pcie_disable_clocks(rockchip);
- return err;
-}
-
-static int rockchip_pcie_remove(struct platform_device *pdev)
+void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type)
{
- struct device *dev = &pdev->dev;
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
-
- pci_stop_root_bus(rockchip->root_bus);
- pci_remove_root_bus(rockchip->root_bus);
- pci_unmap_iospace(rockchip->io);
- irq_domain_remove(rockchip->irq_domain);
-
- rockchip_pcie_deinit_phys(rockchip);
-
- rockchip_pcie_disable_clocks(rockchip);
+ u32 ob_desc_0;
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ /* Configuration Accesses for region 0 */
+ rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
- return 0;
+ rockchip_pcie_write(rockchip,
+ (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+ PCIE_CORE_OB_REGION_ADDR0);
+ rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+ PCIE_CORE_OB_REGION_ADDR1);
+ ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
+ ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
+ ob_desc_0 |= (type | (0x1 << 23));
+ rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
}
-
-static const struct dev_pm_ops rockchip_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
- rockchip_pcie_resume_noirq)
-};
-
-static const struct of_device_id rockchip_pcie_of_match[] = {
- { .compatible = "rockchip,rk3399-pcie", },
- {}
-};
-MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
-
-static struct platform_driver rockchip_pcie_driver = {
- .driver = {
- .name = "rockchip-pcie",
- .of_match_table = rockchip_pcie_of_match,
- .pm = &rockchip_pcie_pm_ops,
- },
- .probe = rockchip_pcie_probe,
- .remove = rockchip_pcie_remove,
-};
-module_platform_driver(rockchip_pcie_driver);
-
-MODULE_AUTHOR("Rockchip Inc");
-MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
-MODULE_LICENSE("GPL v2");
+EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
diff --git a/drivers/pci/host/pcie-rockchip.h b/drivers/pci/host/pcie-rockchip.h
new file mode 100644
index 0000000000000..8e87a059ce73d
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip.h
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *
+ */
+
+#ifndef _PCIE_ROCKCHIP_H
+#define _PCIE_ROCKCHIP_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
+ * bits. This allows atomic updates of the register without locking.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define MAX_LANE_NUM 4
+#define MAX_REGION_LIMIT 32
+#define MIN_EP_APERTURE 28
+
+#define PCIE_CLIENT_BASE 0x0
+#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
+#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
+#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
+#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
+#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
+#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
+#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
+#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
+#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
+#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
+#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
+#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
+#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
+#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
+#define PCIE_CLIENT_INTR_SHIFT 5
+#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
+#define PCIE_CLIENT_INT_MSG BIT(14)
+#define PCIE_CLIENT_INT_HOT_RST BIT(13)
+#define PCIE_CLIENT_INT_DPA BIT(12)
+#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
+#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
+#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
+#define PCIE_CLIENT_INT_INTD BIT(8)
+#define PCIE_CLIENT_INT_INTC BIT(7)
+#define PCIE_CLIENT_INT_INTB BIT(6)
+#define PCIE_CLIENT_INT_INTA BIT(5)
+#define PCIE_CLIENT_INT_LOCAL BIT(4)
+#define PCIE_CLIENT_INT_UDMA BIT(3)
+#define PCIE_CLIENT_INT_PHY BIT(2)
+#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
+#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
+
+#define PCIE_CLIENT_INT_LEGACY \
+ (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
+ PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CLIENT_INT_CLI \
+ (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
+ PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
+ PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
+ PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
+ PCIE_CLIENT_INT_PHY)
+
+#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
+#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
+#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
+#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
+#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
+#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
+#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
+#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
+#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
+#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
+ (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
+#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
+#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
+#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
+#define PCIE_CORE_INT_PRFPE BIT(0)
+#define PCIE_CORE_INT_CRFPE BIT(1)
+#define PCIE_CORE_INT_RRPE BIT(2)
+#define PCIE_CORE_INT_PRFO BIT(3)
+#define PCIE_CORE_INT_CRFO BIT(4)
+#define PCIE_CORE_INT_RT BIT(5)
+#define PCIE_CORE_INT_RTR BIT(6)
+#define PCIE_CORE_INT_PE BIT(7)
+#define PCIE_CORE_INT_MTR BIT(8)
+#define PCIE_CORE_INT_UCR BIT(9)
+#define PCIE_CORE_INT_FCE BIT(10)
+#define PCIE_CORE_INT_CT BIT(11)
+#define PCIE_CORE_INT_UTC BIT(18)
+#define PCIE_CORE_INT_MMVC BIT(19)
+#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
+#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
+#define PCIE_CORE_PHY_FUNC_CFG (PCIE_CORE_CTRL_MGMT_BASE + 0x2c0)
+#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED 0x0
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS 0x1
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS 0x4
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS 0x6
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
+
+#define PCIE_CORE_INT \
+ (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
+ PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
+ PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
+ PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
+ PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
+ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
+ PCIE_CORE_INT_MMVC)
+
+#define PCIE_RC_RP_ATS_BASE 0x400000
+#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
+#define PCIE_RC_CONFIG_BASE 0xa00000
+#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
+#define PCIE_RC_CONFIG_SCC_SHIFT 16
+#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
+#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
+#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
+#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
+#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
+#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
+#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
+#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
+#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
+#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
+#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
+
+#define PCIE_CORE_AXI_CONF_BASE 0xc00000
+#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
+#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
+#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
+#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
+
+#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
+#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+
+/* Size of one AXI Region (not Region 0) */
+#define AXI_REGION_SIZE BIT(20)
+/* Size of Region 0, equal to sum of sizes of other regions */
+#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
+#define OB_REG_SIZE_SHIFT 5
+#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
+#define AXI_WRAPPER_IO_WRITE 0x6
+#define AXI_WRAPPER_MEM_WRITE 0x2
+#define AXI_WRAPPER_TYPE0_CFG 0xa
+#define AXI_WRAPPER_TYPE1_CFG 0xb
+#define AXI_WRAPPER_NOR_MSG 0xc
+
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
+#define MIN_AXI_ADDR_BITS_PASSED 8
+#define PCIE_RC_SEND_PME_OFF 0x11960
+#define ROCKCHIP_VENDOR_ID 0x1d87
+#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
+#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
+#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
+#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
+#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
+ (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
+ PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
+#define PCIE_LINK_IS_L2(x) \
+ (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_UP(x) \
+ (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
+#define PCIE_LINK_IS_GEN2(x) \
+ (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
+
+#define RC_REGION_0_ADDR_TRANS_H 0x00000000
+#define RC_REGION_0_ADDR_TRANS_L 0x00000000
+#define RC_REGION_0_PASS_BITS (25 - 1)
+#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
+#define MAX_AXI_WRAPPER_REGION_NUM 33
+
+#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2
+#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3
+#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4
+#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27
+#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5)
+#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
+ (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
+#define ROCKCHIP_PCIE_MSG_CODE_MASK GENMASK(15, 8)
+#define ROCKCHIP_PCIE_MSG_CODE(code) \
+ (((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK)
+#define ROCKCHIP_PCIE_MSG_NO_DATA BIT(16)
+
+#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4
+#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24)
+#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
+#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
+#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+ (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ (((devfn) << 12) & \
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
+ (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \
+ (PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+ (GENMASK(4, 0) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << ((b) * 8)) & \
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+ (GENMASK(7, 5) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 8 + 5)) & \
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+struct rockchip_pcie {
+ void __iomem *reg_base; /* DT axi-base */
+ void __iomem *apb_base; /* DT apb-base */
+ bool legacy_phy;
+ struct phy *phys[MAX_LANE_NUM];
+ struct reset_control *core_rst;
+ struct reset_control *mgmt_rst;
+ struct reset_control *mgmt_sticky_rst;
+ struct reset_control *pipe_rst;
+ struct reset_control *pm_rst;
+ struct reset_control *aclk_rst;
+ struct reset_control *pclk_rst;
+ struct clk *aclk_pcie;
+ struct clk *aclk_perf_pcie;
+ struct clk *hclk_pcie;
+ struct clk *clk_pcie_pm;
+ struct regulator *vpcie12v; /* 12V power supply */
+ struct regulator *vpcie3v3; /* 3.3V power supply */
+ struct regulator *vpcie1v8; /* 1.8V power supply */
+ struct regulator *vpcie0v9; /* 0.9V power supply */
+ struct gpio_desc *ep_gpio;
+ u32 lanes;
+ u8 lanes_map;
+ u8 root_bus_nr;
+ int link_gen;
+ struct device *dev;
+ struct irq_domain *irq_domain;
+ int offset;
+ struct pci_bus *root_bus;
+ struct resource *io;
+ phys_addr_t io_bus_addr;
+ u32 io_size;
+ void __iomem *msg_region;
+ u32 mem_size;
+ phys_addr_t msg_bus_addr;
+ phys_addr_t mem_bus_addr;
+ bool is_rc;
+ struct resource *mem_res;
+};
+
+static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
+{
+ return readl(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
+{
+ writel(val, rockchip->apb_base + reg);
+}
+
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip);
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
+void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type);
+
+#endif /* _PCIE_ROCKCHIP_H */
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4839ae578711b..6a4bbb5b3de00 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
+#include "../pci.h"
+
/* Bridge core config registers */
#define BRCFG_PCIE_RX0 0x00000000
#define BRCFG_INTERRUPT 0x00000010
@@ -825,7 +827,6 @@ static const struct of_device_id nwl_pcie_of_match[] = {
static int nwl_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
@@ -855,7 +856,8 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
- err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 0ad188effc091..b110a3a814e35 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -23,6 +23,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
/* Register definitions */
#define XILINX_PCIE_REG_BIR 0x00000130
#define XILINX_PCIE_REG_IDR 0x00000138
@@ -643,8 +645,8 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
- err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, &res,
- &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 930a8fa08bd66..942b64fc7f1f0 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -24,6 +24,28 @@
#define VMD_MEMBAR1 2
#define VMD_MEMBAR2 4
+#define PCI_REG_VMCAP 0x40
+#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
+#define PCI_REG_VMCONFIG 0x44
+#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
+#define PCI_REG_VMLOCK 0x70
+#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
+
+enum vmd_features {
+ /*
+ * Device may contain registers which hint the physical location of the
+ * membars, in order to allow proper address translation during
+ * resource assignment to enable guest virtualization
+ */
+ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
+
+ /*
+ * Device may provide root port configuration information which limits
+ * bus numbering
+ */
+ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
+};
+
/*
* Lock for manipulating VMD IRQ lists.
*/
@@ -546,7 +568,7 @@ static int vmd_find_free_domain(void)
return domain + 1;
}
-static int vmd_enable_domain(struct vmd_dev *vmd)
+static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{
struct pci_sysdata *sd = &vmd->sysdata;
struct fwnode_handle *fn;
@@ -554,12 +576,57 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
u32 upper_bits;
unsigned long flags;
LIST_HEAD(resources);
+ resource_size_t offset[2] = {0};
+ resource_size_t membar2_offset = 0x2000, busn_start = 0;
+
+ /*
+ * Shadow registers may exist in certain VMD device ids which allow
+ * guests to correctly assign host physical addresses to the root ports
+ * and child devices. These registers will either return the host value
+ * or 0, depending on an enable bit in the VMD device.
+ */
+ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
+ u32 vmlock;
+ int ret;
+
+ membar2_offset = 0x2018;
+ ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
+ if (ret || vmlock == ~0)
+ return -ENODEV;
+
+ if (MB2_SHADOW_EN(vmlock)) {
+ void __iomem *membar2;
+
+ membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
+ if (!membar2)
+ return -ENOMEM;
+ offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
+ readq(membar2 + 0x2008);
+ offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
+ readq(membar2 + 0x2010);
+ pci_iounmap(vmd->dev, membar2);
+ }
+ }
+
+ /*
+ * Certain VMD devices may have a root port configuration option which
+ * limits the bus range to between 0-127 or 128-255
+ */
+ if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
+ u32 vmcap, vmconfig;
+
+ pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
+ pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
+ if (BUS_RESTRICT_CAP(vmcap) &&
+ (BUS_RESTRICT_CFG(vmconfig) == 0x1))
+ busn_start = 128;
+ }
res = &vmd->dev->resource[VMD_CFGBAR];
vmd->resources[0] = (struct resource) {
.name = "VMD CFGBAR",
- .start = 0,
- .end = (resource_size(res) >> 20) - 1,
+ .start = busn_start,
+ .end = busn_start + (resource_size(res) >> 20) - 1,
.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
};
@@ -600,7 +667,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
flags &= ~IORESOURCE_MEM_64;
vmd->resources[2] = (struct resource) {
.name = "VMD MEMBAR2",
- .start = res->start + 0x2000,
+ .start = res->start + membar2_offset,
.end = res->end,
.flags = flags,
.parent = res,
@@ -624,10 +691,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
return -ENODEV;
pci_add_resource(&resources, &vmd->resources[0]);
- pci_add_resource(&resources, &vmd->resources[1]);
- pci_add_resource(&resources, &vmd->resources[2]);
- vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
- &resources);
+ pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+ pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+
+ vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
+ sd, &resources);
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
@@ -713,7 +781,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
- err = vmd_enable_domain(vmd);
+ err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
if (err)
return err;
@@ -778,7 +846,10 @@ static int vmd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
static const struct pci_device_id vmd_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index a8f21d051e0c6..e9f78eb390d2e 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -104,14 +104,11 @@ config HOTPLUG_PCI_CPCI_GENERIC
When in doubt, say N.
config HOTPLUG_PCI_SHPC
- tristate "SHPC PCI Hotplug driver"
+ bool "SHPC PCI Hotplug driver"
help
Say Y here if you have a motherboard with a SHPC PCI Hotplug
controller.
- To compile this driver as a module, choose M here: the
- module will be called shpchp.
-
When in doubt, say N.
config HOTPLUG_PCI_POWERNV
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index c9816166978ee..3979f89b250ad 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -63,22 +63,17 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
/**
* acpi_get_hp_hw_control_from_firmware
* @dev: the pci_dev of the bridge that has a hotplug controller
- * @flags: requested control bits for _OSC
*
* Attempt to take hotplug control from firmware.
*/
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
{
+ const struct pci_host_bridge *host;
+ const struct acpi_pci_root *root;
acpi_status status;
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- if (!flags) {
- err("Invalid flags %u specified!\n", flags);
- return -EINVAL;
- }
-
/*
* Per PCI firmware specification, we should run the ACPI _OSC
* method to get control of hotplug hardware before using it. If
@@ -88,25 +83,20 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
* OSHP within the scope of the hotplug controller and its parents,
* up to the host bridge under which this controller exists.
*/
- handle = acpi_find_root_bridge_handle(pdev);
- if (handle) {
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
- dbg("Trying to get hotplug control for %s\n",
- (char *)string.pointer);
- status = acpi_pci_osc_control_set(handle, &flags, flags);
- if (ACPI_SUCCESS(status))
- goto got_one;
- if (status == AE_SUPPORT)
- goto no_control;
- kfree(string.pointer);
- string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
- }
+ if (shpchp_is_native(pdev))
+ return 0;
+
+ /* If _OSC exists, we should not evaluate OSHP */
+ host = pci_find_host_bridge(pdev->bus);
+ root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
+ if (root->osc_support_set)
+ goto no_control;
handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
- * space at all. Try to get acpi handle of parent pci bus.
+ * space at all. Try to get ACPI handle of parent PCI bus.
*/
struct pci_bus *pbus;
for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
@@ -118,8 +108,8 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
while (handle) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
- dbg("Trying to get hotplug control for %s\n",
- (char *)string.pointer);
+ pci_info(pdev, "Requesting control of SHPC hotplug via OSHP (%s)\n",
+ (char *)string.pointer);
status = acpi_run_oshp(handle);
if (ACPI_SUCCESS(status))
goto got_one;
@@ -131,13 +121,12 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
break;
}
no_control:
- dbg("Cannot get control of hotplug hardware for pci %s\n",
- pci_name(pdev));
+ pci_info(pdev, "Cannot get control of SHPC hotplug\n");
kfree(string.pointer);
return -ENODEV;
got_one:
- dbg("Gained control for hotplug HW for pci %s (%s)\n",
- pci_name(pdev), (char *)string.pointer);
+ pci_info(pdev, "Gained control of SHPC hotplug (%s)\n",
+ (char *)string.pointer);
kfree(string.pointer);
return 0;
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index b45b375c0e6c0..3a17b290df5dd 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -287,11 +287,12 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
/*
* Expose slots to user space for functions that have _EJ0 or _RMV or
* are located in dock stations. Do not expose them for devices handled
- * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
- * expose slots to user space in those cases.
+ * by the native PCIe hotplug (PCIeHP) or standard PCI hotplug
+ * (SHPCHP), because that code is supposed to expose slots to user
+ * space in those cases.
*/
if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
- && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) {
+ && !(pdev && hotplug_is_native(pdev))) {
unsigned long long sun;
int retval;
@@ -430,6 +431,29 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
}
+static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
+{
+ struct pci_bus *bus = bridge->subordinate;
+ struct pci_dev *dev;
+ int max;
+
+ if (!bus)
+ return;
+
+ max = bus->busn_res.start;
+ /* Scan already configured non-hotplug bridges */
+ for_each_pci_bridge(dev, bus) {
+ if (!hotplug_is_native(dev))
+ max = pci_scan_bridge(bus, dev, max, 0);
+ }
+
+ /* Scan non-hotplug bridges that need to be reconfigured */
+ for_each_pci_bridge(dev, bus) {
+ if (!hotplug_is_native(dev))
+ max = pci_scan_bridge(bus, dev, max, 1);
+ }
+}
+
/**
* enable_slot - enable, configure a slot
* @slot: slot to be enabled
@@ -442,25 +466,42 @@ static void enable_slot(struct acpiphp_slot *slot)
struct pci_dev *dev;
struct pci_bus *bus = slot->bus;
struct acpiphp_func *func;
- int max, pass;
- LIST_HEAD(add_list);
- acpiphp_rescan_slot(slot);
- max = acpiphp_max_busnr(bus);
- for (pass = 0; pass < 2; pass++) {
+ if (bus->self && hotplug_is_native(bus->self)) {
+ /*
+ * If native hotplug is used, it will take care of hotplug
+ * slot management and resource allocation for hotplug
+ * bridges. However, ACPI hotplug may still be used for
+ * non-hotplug bridges to bring in additional devices such
+ * as a Thunderbolt host controller.
+ */
for_each_pci_bridge(dev, bus) {
- if (PCI_SLOT(dev->devfn) != slot->device)
- continue;
-
- max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate) {
- check_hotplug_bridge(slot, dev);
- pcibios_resource_survey_bus(dev->subordinate);
- __pci_bus_size_bridges(dev->subordinate, &add_list);
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ acpiphp_native_scan_bridge(dev);
+ }
+ pci_assign_unassigned_bridge_resources(bus->self);
+ } else {
+ LIST_HEAD(add_list);
+ int max, pass;
+
+ acpiphp_rescan_slot(slot);
+ max = acpiphp_max_busnr(bus);
+ for (pass = 0; pass < 2; pass++) {
+ for_each_pci_bridge(dev, bus) {
+ if (PCI_SLOT(dev->devfn) != slot->device)
+ continue;
+
+ max = pci_scan_bridge(bus, dev, max, pass);
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
+ }
}
}
+ __pci_bus_assign_resources(bus, &add_list, NULL);
}
- __pci_bus_assign_resources(bus, &add_list, NULL);
acpiphp_sanitize_bus(bus);
pcie_bus_configure_settings(bus);
@@ -481,7 +522,7 @@ static void enable_slot(struct acpiphp_slot *slot)
if (!dev) {
/* Do not set SLOT_ENABLED flag if some funcs
are not added. */
- slot->flags &= (~SLOT_ENABLED);
+ slot->flags &= ~SLOT_ENABLED;
continue;
}
}
@@ -510,7 +551,7 @@ static void disable_slot(struct acpiphp_slot *slot)
list_for_each_entry(func, &slot->funcs, sibling)
acpi_bus_trim(func_to_acpi_device(func));
- slot->flags &= (~SLOT_ENABLED);
+ slot->flags &= ~SLOT_ENABLED;
}
static bool slot_no_hotplug(struct acpiphp_slot *slot)
@@ -608,6 +649,11 @@ static void trim_stale_devices(struct pci_dev *dev)
alive = pci_device_is_present(dev);
if (!alive) {
+ pci_dev_set_disconnected(dev, NULL);
+ if (pci_has_subordinate(dev))
+ pci_walk_bus(dev->subordinate, pci_dev_set_disconnected,
+ NULL);
+
pci_stop_and_remove_bus_device(dev);
if (adev)
acpi_bus_trim(adev);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index b81ca3fa0e84e..1869b0411ce08 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -379,7 +379,7 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_max_bus_speed(struct slot *slot)
{
- int rc;
+ int rc = 0;
u8 mode = 0;
enum pci_bus_speed speed;
struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 88e917c9120fd..5f892065585e8 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -121,7 +121,7 @@ struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
-void pcie_enable_notification(struct controller *ctrl);
+void pcie_reenable_notification(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 332b723ff9e63..44a6a63802d56 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -283,7 +283,7 @@ static int pciehp_resume(struct pcie_device *dev)
ctrl = get_service_data(dev);
/* reinitialize the chipset's event detection logic */
- pcie_enable_notification(ctrl);
+ pcie_reenable_notification(ctrl);
slot = ctrl->slot;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 18a42f8f5dc56..718b6073afad1 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -10,7 +10,6 @@
* All rights reserved.
*
* Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
- *
*/
#include <linux/kernel.h>
@@ -147,25 +146,22 @@ static void pcie_wait_cmd(struct controller *ctrl)
else
rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
- /*
- * Controllers with errata like Intel CF118 don't generate
- * completion notifications unless the power/indicator/interlock
- * control bits are changed. On such controllers, we'll emit this
- * timeout message when we wait for completion of commands that
- * don't change those bits, e.g., commands that merely enable
- * interrupts.
- */
if (!rc)
ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
ctrl->slot_ctrl,
jiffies_to_msecs(jiffies - ctrl->cmd_started));
}
+#define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
+ PCI_EXP_SLTCTL_PIC | \
+ PCI_EXP_SLTCTL_AIC | \
+ PCI_EXP_SLTCTL_EIC)
+
static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
u16 mask, bool wait)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 slot_ctrl;
+ u16 slot_ctrl_orig, slot_ctrl;
mutex_lock(&ctrl->ctrl_lock);
@@ -180,6 +176,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
goto out;
}
+ slot_ctrl_orig = slot_ctrl;
slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask);
ctrl->cmd_busy = 1;
@@ -189,6 +186,17 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
ctrl->slot_ctrl = slot_ctrl;
/*
+ * Controllers with the Intel CF118 and similar errata advertise
+ * Command Completed support, but they only set Command Completed
+ * if we change the "Control" bits for power, power indicator,
+ * attention indicator, or interlock. If we only change the
+ * "Enable" bits, they never set the Command Completed bit.
+ */
+ if (pdev->broken_cmd_compl &&
+ (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
+ ctrl->cmd_busy = 0;
+
+ /*
* Optionally wait for the hardware to be ready for a new command,
* indicating completion of the above issued command.
*/
@@ -231,25 +239,11 @@ bool pciehp_check_link_active(struct controller *ctrl)
return ret;
}
-static void __pcie_wait_link_active(struct controller *ctrl, bool active)
-{
- int timeout = 1000;
-
- if (pciehp_check_link_active(ctrl) == active)
- return;
- while (timeout > 0) {
- msleep(10);
- timeout -= 10;
- if (pciehp_check_link_active(ctrl) == active)
- return;
- }
- ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
- active ? "set" : "cleared");
-}
-
static void pcie_wait_link_active(struct controller *ctrl)
{
- __pcie_wait_link_active(ctrl, true);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+
+ pcie_wait_for_link(pdev, true);
}
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
@@ -659,7 +653,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return handled;
}
-void pcie_enable_notification(struct controller *ctrl)
+static void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -697,6 +691,17 @@ void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
+void pcie_reenable_notification(struct controller *ctrl)
+{
+ /*
+ * Clear both Presence and Data Link Layer Changed to make sure
+ * those events still fire after we have re-enabled them.
+ */
+ pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_enable_notification(ctrl);
+}
+
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
@@ -861,7 +866,7 @@ struct controller *pcie_init(struct pcie_device *dev)
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
PCI_EXP_SLTSTA_DLLSC);
- ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
@@ -872,7 +877,8 @@ struct controller *pcie_init(struct pcie_device *dev)
FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
- FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
+ FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
+ pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
if (pcie_init_slot(ctrl))
goto abort_ctrl;
@@ -891,3 +897,21 @@ void pciehp_release_ctrl(struct controller *ctrl)
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}
+
+static void quirk_cmd_compl(struct pci_dev *pdev)
+{
+ u32 slot_cap;
+
+ if (pci_is_pcie(pdev)) {
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
+ if (slot_cap & PCI_EXP_SLTCAP_HPC &&
+ !(slot_cap & PCI_EXP_SLTCAP_NCCS))
+ pdev->broken_cmd_compl = 1;
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index d44100687dfea..6c2e8d7307c6b 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -220,12 +220,16 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs,
for_each_child_of_node(dn, child) {
ret = of_changeset_attach_node(ocs, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
ret = pnv_php_populate_changeset(ocs, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index c55730b61c9a4..516e4835019cc 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -105,7 +105,6 @@ struct controller {
};
/* Define AMD SHPC ID */
-#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
/* AMD PCI-X bridge registers */
@@ -173,17 +172,6 @@ static inline const char *slot_name(struct slot *slot)
return hotplug_slot_name(slot->hotplug_slot);
}
-#ifdef CONFIG_ACPI
-#include <linux/pci-acpi.h>
-static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
- u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- return acpi_get_hp_hw_control_from_firmware(dev, flags);
-}
-#else
-#define get_hp_hw_control_from_firmware(dev) (0)
-#endif
-
struct ctrl_reg {
volatile u32 base_offset;
volatile u32 slot_avail1;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 1f0f96908b5ad..e91be287f292b 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -270,24 +270,12 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static int is_shpc_capable(struct pci_dev *dev)
-{
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
- return 1;
- if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
- return 0;
- if (get_hp_hw_control_from_firmware(dev))
- return 0;
- return 1;
-}
-
static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
struct controller *ctrl;
- if (!is_shpc_capable(pdev))
+ if (acpi_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index bedda5bda9107..1047b56e57304 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -585,13 +585,13 @@ static int shpchp_enable_slot (struct slot *p_slot)
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
- if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
- (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
+ if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
+ p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
&& p_slot->ctrl->num_slots == 1) {
- /* handle amd pogo errata; this must be done before enable */
+ /* handle AMD POGO errata; this must be done before enable */
amd_pogo_errata_save_misc_reg(p_slot);
retval = board_added(p_slot);
- /* handle amd pogo errata; this must be done after enable */
+ /* handle AMD POGO errata; this must be done after enable */
amd_pogo_errata_restore_misc_reg(p_slot);
} else
retval = board_added(p_slot);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 8adf4a64f2919..d0d73dbbd5ca4 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -469,6 +469,7 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
+ iov->driver_max_VFs = total;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
iov->pgsz = pgsz;
iov->self = dev;
@@ -827,9 +828,42 @@ int pci_sriov_get_totalvfs(struct pci_dev *dev)
if (!dev->is_physfn)
return 0;
- if (dev->sriov->driver_max_VFs)
- return dev->sriov->driver_max_VFs;
-
- return dev->sriov->total_VFs;
+ return dev->sriov->driver_max_VFs;
}
EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
+
+/**
+ * pci_sriov_configure_simple - helper to configure SR-IOV
+ * @dev: the PCI device
+ * @nr_virtfn: number of virtual functions to enable, 0 to disable
+ *
+ * Enable or disable SR-IOV for devices that don't require any PF setup
+ * before enabling SR-IOV. Return value is negative on error, or number of
+ * VFs allocated on success.
+ */
+int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
+{
+ int rc;
+
+ might_sleep();
+
+ if (!dev->is_physfn)
+ return -ENODEV;
+
+ if (pci_vfs_assigned(dev)) {
+ pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ if (nr_virtfn == 0) {
+ sriov_disable(dev);
+ return 0;
+ }
+
+ rc = sriov_enable(dev, nr_virtfn);
+ if (rc < 0)
+ return rc;
+
+ return nr_virtfn;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index a28355c273ae8..d088c9147f105 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -244,8 +244,9 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
#if defined(CONFIG_OF_ADDRESS)
/**
- * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
- * @dev: device node of the host bridge having the range property
+ * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
+ * host bridge resources from DT
+ * @dev: host bridge device
* @busno: bus number associated with the bridge root bus
* @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
@@ -253,8 +254,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* address for the start of the I/O range. Can be NULL if the caller doesn't
* expect I/O ranges to be present in the device tree.
*
- * It is the caller's job to free the @resources list.
- *
* This function will parse the "ranges" property of a PCI host bridge device
* node and setup the resource mapping based on its content. It is expected
* that the property conforms with the Power ePAPR document.
@@ -262,11 +261,11 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* It returns zero if the range parsing has been successful or a standard error
* value if it failed.
*/
-int of_pci_get_host_bridge_resources(struct device_node *dev,
+int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base)
{
- struct resource_entry *window;
+ struct device_node *dev_node = dev->of_node;
struct resource *res;
struct resource *bus_range;
struct of_pci_range range;
@@ -277,19 +276,19 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
if (io_base)
*io_base = (resource_size_t)OF_BAD_ADDR;
- bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
+ bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
if (!bus_range)
return -ENOMEM;
- pr_info("host bridge %pOF ranges:\n", dev);
+ dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
- err = of_pci_parse_bus_range(dev, bus_range);
+ err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
bus_range->start = busno;
bus_range->end = bus_max;
bus_range->flags = IORESOURCE_BUS;
- pr_info(" No bus range found for %pOF, using %pR\n",
- dev, bus_range);
+ dev_info(dev, " No bus range found for %pOF, using %pR\n",
+ dev_node, bus_range);
} else {
if (bus_range->end > bus_range->start + bus_max)
bus_range->end = bus_range->start + bus_max;
@@ -297,11 +296,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
pci_add_resource(resources, bus_range);
/* Check for ranges property */
- err = of_pci_range_parser_init(&parser, dev);
+ err = of_pci_range_parser_init(&parser, dev_node);
if (err)
- goto parse_failed;
+ goto failed;
- pr_debug("Parsing ranges property...\n");
+ dev_dbg(dev, "Parsing ranges property...\n");
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
@@ -310,9 +309,9 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
snprintf(range_type, 4, "MEM");
else
snprintf(range_type, 4, "err");
- pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
- range.cpu_addr, range.cpu_addr + range.size - 1,
- range.pci_addr);
+ dev_info(dev, " %s %#010llx..%#010llx -> %#010llx\n",
+ range_type, range.cpu_addr,
+ range.cpu_addr + range.size - 1, range.pci_addr);
/*
* If we failed translation or got a zero-sized region
@@ -321,28 +320,28 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
- res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
- goto parse_failed;
+ goto failed;
}
- err = of_pci_range_to_resource(&range, dev, res);
+ err = of_pci_range_to_resource(&range, dev_node, res);
if (err) {
- kfree(res);
+ devm_kfree(dev, res);
continue;
}
if (resource_type(res) == IORESOURCE_IO) {
if (!io_base) {
- pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
- dev);
+ dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
+ dev_node);
err = -EINVAL;
- goto conversion_failed;
+ goto failed;
}
if (*io_base != (resource_size_t)OF_BAD_ADDR)
- pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
- dev);
+ dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
+ dev_node);
*io_base = range.cpu_addr;
}
@@ -351,15 +350,11 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
return 0;
-conversion_failed:
- kfree(res);
-parse_failed:
- resource_list_for_each_entry(window, resources)
- kfree(window->res);
+failed:
pci_free_resource_list(resources);
return err;
}
-EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
+EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
/**
@@ -599,12 +594,12 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
struct resource **bus_range)
{
int err, res_valid = 0;
- struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(resources);
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
+ &iobase);
if (err)
return err;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1abdbf267c196..65113b6eed147 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -370,26 +370,57 @@ EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
* pciehp_is_native - Check whether a hotplug port is handled by the OS
- * @pdev: Hotplug port to check
+ * @bridge: Hotplug port to check
*
- * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
- * and return the value of the "PCI Express Native Hot Plug control" bit.
- * On failure to obtain the _OSC Control Field return %false.
+ * Returns true if the given @bridge is handled by the native PCIe hotplug
+ * driver.
*/
-bool pciehp_is_native(struct pci_dev *pdev)
+bool pciehp_is_native(struct pci_dev *bridge)
{
- struct acpi_pci_root *root;
- acpi_handle handle;
+ const struct pci_host_bridge *host;
+ u32 slot_cap;
- handle = acpi_find_root_bridge_handle(pdev);
- if (!handle)
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
- root = acpi_pci_find_root(handle);
- if (!root)
+ pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
+ if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
+ return false;
+
+ if (pcie_ports_native)
+ return true;
+
+ host = pci_find_host_bridge(bridge->bus);
+ return host->native_pcie_hotplug;
+}
+
+/**
+ * shpchp_is_native - Check whether a hotplug port is handled by the OS
+ * @bridge: Hotplug port to check
+ *
+ * Returns true if the given @bridge is handled by the native SHPC hotplug
+ * driver.
+ */
+bool shpchp_is_native(struct pci_dev *bridge)
+{
+ const struct pci_host_bridge *host;
+
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
+ return false;
+
+ /*
+ * It is assumed that AMD GOLAM chips support SHPC but they do not
+ * have SHPC capability.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_AMD &&
+ bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
+ return true;
+
+ if (!pci_find_capability(bridge, PCI_CAP_ID_SHPC))
return false;
- return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+ host = pci_find_host_bridge(bridge->bus);
+ return host->native_shpc_hotplug;
}
/**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 18ba62c764805..c125d53033c69 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1539,7 +1539,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
/**
* pci_uevent_ers - emit a uevent during recovery path of PCI device
* @pdev: PCI device undergoing error recovery
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
new file mode 100644
index 0000000000000..9795649fc6f93
--- /dev/null
+++ b/drivers/pci/pci-pf-stub.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* pci-pf-stub - simple stub driver for PCI SR-IOV PF device
+ *
+ * This driver is meant to act as a "whitelist" for devices that provde
+ * SR-IOV functionality while at the same time not actually needing a
+ * driver of their own.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+/**
+ * pci_pf_stub_whitelist - White list of devices to bind pci-pf-stub onto
+ *
+ * This table provides the list of IDs this driver is supposed to bind
+ * onto. You could think of this as a list of "quirked" devices where we
+ * are adding support for SR-IOV here since there are no other drivers
+ * that they would be running under.
+ */
+static const struct pci_device_id pci_pf_stub_whitelist[] = {
+ { PCI_VDEVICE(AMAZON, 0x0053) },
+ /* required last entry */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, pci_pf_stub_whitelist);
+
+static int pci_pf_stub_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ pci_info(dev, "claimed by pci-pf-stub\n");
+ return 0;
+}
+
+static struct pci_driver pf_stub_driver = {
+ .name = "pci-pf-stub",
+ .id_table = pci_pf_stub_whitelist,
+ .probe = pci_pf_stub_probe,
+ .sriov_configure = pci_sriov_configure_simple,
+};
+
+static int __init pci_pf_stub_init(void)
+{
+ return pci_register_driver(&pf_stub_driver);
+}
+
+static void __exit pci_pf_stub_exit(void)
+{
+ pci_unregister_driver(&pf_stub_driver);
+}
+
+module_init(pci_pf_stub_init);
+module_exit(pci_pf_stub_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 366d93af051d4..788a200fb2dc4 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -288,13 +288,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!val) {
- if (pci_is_enabled(pdev))
- pci_disable_device(pdev);
- else
- result = -EIO;
- } else
+ device_lock(dev);
+ if (dev->driver)
+ result = -EBUSY;
+ else if (val)
result = pci_enable_device(pdev);
+ else if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+ else
+ result = -EIO;
+ device_unlock(dev);
return result < 0 ? result : count;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e90cf5c32e143..97acba712e4e7 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -112,6 +112,14 @@ unsigned int pcibios_max_latency = 255;
/* If set, the PCIe ARI capability will not be used. */
static bool pcie_ari_disabled;
+/* If set, the PCIe ATS capability will not be used. */
+static bool pcie_ats_disabled;
+
+bool pci_ats_disabled(void)
+{
+ return pcie_ats_disabled;
+}
+
/* Disable bridge_d3 for all PCIe ports */
static bool pci_bridge_d3_disable;
/* Force bridge_d3 for all PCIe ports */
@@ -4153,6 +4161,35 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ int timeout = 1000;
+ bool ret;
+ u16 lnk_status;
+
+ for (;;) {
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ if (ret == active)
+ return true;
+ if (timeout <= 0)
+ break;
+ msleep(10);
+ timeout -= 10;
+ }
+
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+
+ return false;
+}
void pci_reset_secondary_bus(struct pci_dev *dev)
{
@@ -5085,49 +5122,6 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
EXPORT_SYMBOL(pcie_set_mps);
/**
- * pcie_get_minimum_link - determine minimum link settings of a PCI device
- * @dev: PCI device to query
- * @speed: storage for minimum speed
- * @width: storage for minimum width
- *
- * This function will walk up the PCI device chain and determine the minimum
- * link width and speed of the device.
- */
-int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width)
-{
- int ret;
-
- *speed = PCI_SPEED_UNKNOWN;
- *width = PCIE_LNK_WIDTH_UNKNOWN;
-
- while (dev) {
- u16 lnksta;
- enum pci_bus_speed next_speed;
- enum pcie_link_width next_width;
-
- ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
- if (ret)
- return ret;
-
- next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
- next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
-
- if (next_speed < *speed)
- *speed = next_speed;
-
- if (next_width < *width)
- *width = next_width;
-
- dev = dev->bus->self;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(pcie_get_minimum_link);
-
-/**
* pcie_bandwidth_available - determine minimum link settings of a PCIe
* device and its bandwidth limitation
* @dev: PCI device to query
@@ -5717,15 +5711,14 @@ static void pci_no_domains(void)
#endif
}
-#ifdef CONFIG_PCI_DOMAINS
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
static atomic_t __domain_nr = ATOMIC_INIT(-1);
-int pci_get_new_domain_nr(void)
+static int pci_get_new_domain_nr(void)
{
return atomic_inc_return(&__domain_nr);
}
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
static int of_pci_bus_find_domain_nr(struct device *parent)
{
static int use_dt_domains = -1;
@@ -5780,7 +5773,6 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
acpi_pci_bus_find_domain_nr(bus);
}
#endif
-#endif
/**
* pci_ext_cfg_avail - can we access extended PCI config space?
@@ -5808,6 +5800,9 @@ static int __init pci_setup(char *str)
if (*str && (str = pcibios_setup(str)) && *str) {
if (!strcmp(str, "nomsi")) {
pci_no_msi();
+ } else if (!strncmp(str, "noats", 5)) {
+ pr_info("PCIe: ATS is disabled\n");
+ pcie_ats_disabled = true;
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
} else if (!strncmp(str, "realloc=", 8)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 023f7cf25bff4..c358e7a07f3fa 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -353,6 +353,11 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
void pci_enable_acs(struct pci_dev *dev);
+/* PCI error reporting and recovery */
+void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
+void pcie_do_nonfatal_recovery(struct pci_dev *dev);
+
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
@@ -407,4 +412,44 @@ static inline u64 pci_rebar_size_to_bytes(int size)
return 1ULL << (size + 20);
}
+struct device_node;
+
+#ifdef CONFIG_OF
+int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+int of_get_pci_domain_nr(struct device_node *node);
+int of_pci_get_max_link_speed(struct device_node *node);
+
+#else
+static inline int
+of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+{
+ return -EINVAL;
+}
+
+static inline int
+of_get_pci_domain_nr(struct device_node *node)
+{
+ return -1;
+}
+
+static inline int
+of_pci_get_max_link_speed(struct device_node *node)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+#if defined(CONFIG_OF_ADDRESS)
+int devm_of_pci_get_host_bridge_resources(struct device *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base);
+#else
+static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base)
+{
+ return -EINVAL;
+}
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 800e1d404a455..03f4e0b3a1400 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for PCI Express features and port driver
-pcieportdrv-y := portdrv_core.o portdrv_pci.o
+pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 779b3879b1b5e..9735c19bf39cf 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -94,7 +94,7 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
*/
static void aer_enable_rootport(struct aer_rpc *rpc)
{
- struct pci_dev *pdev = rpc->rpd->port;
+ struct pci_dev *pdev = rpc->rpd;
int aer_pos;
u16 reg16;
u32 reg32;
@@ -136,7 +136,7 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
*/
static void aer_disable_rootport(struct aer_rpc *rpc)
{
- struct pci_dev *pdev = rpc->rpd->port;
+ struct pci_dev *pdev = rpc->rpd;
u32 reg32;
int pos;
@@ -232,7 +232,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
/* Initialize Root lock access, e_lock, to Root Error Status Reg */
spin_lock_init(&rpc->e_lock);
- rpc->rpd = dev;
+ rpc->rpd = dev->port;
INIT_WORK(&rpc->dpc_handler, aer_isr);
mutex_init(&rpc->rpc_mutex);
@@ -353,10 +353,7 @@ static void aer_error_resume(struct pci_dev *dev)
pos = dev->aer_cap;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
- if (dev->error_state == pci_channel_io_normal)
- status &= ~mask; /* Clear corresponding nonfatal bits */
- else
- status &= mask; /* Clear corresponding fatal bits */
+ status &= ~mask; /* Clear corresponding nonfatal bits */
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 08b4584f62fe8..6e0ad9a68fd9c 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -58,7 +58,7 @@ struct aer_err_source {
};
struct aer_rpc {
- struct pcie_device *rpd; /* Root Port device */
+ struct pci_dev *rpd; /* Root Port device */
struct work_struct dpc_handler;
struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
struct aer_err_info e_info;
@@ -76,36 +76,6 @@ struct aer_rpc {
*/
};
-struct aer_broadcast_data {
- enum pci_channel_state state;
- enum pci_ers_result result;
-};
-
-static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
- enum pci_ers_result new)
-{
- if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
- return PCI_ERS_RESULT_NO_AER_DRIVER;
-
- if (new == PCI_ERS_RESULT_NONE)
- return orig;
-
- switch (orig) {
- case PCI_ERS_RESULT_CAN_RECOVER:
- case PCI_ERS_RESULT_RECOVERED:
- orig = new;
- break;
- case PCI_ERS_RESULT_DISCONNECT:
- if (new == PCI_ERS_RESULT_NEED_RESET)
- orig = PCI_ERS_RESULT_NEED_RESET;
- break;
- default:
- break;
- }
-
- return orig;
-}
-
extern struct bus_type pcie_port_bus_type;
void aer_isr(struct work_struct *work);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 0ea5acc403233..42d4f3f32282b 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/kfifo.h>
#include "aerdrv.h"
+#include "../../pci.h"
#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
@@ -227,329 +228,14 @@ static bool find_source_device(struct pci_dev *parent,
return true;
}
-static int report_error_detected(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- dev->error_state = result_data->state;
-
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->error_detected) {
- if (result_data->state == pci_channel_io_frozen &&
- dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- /*
- * In case of fatal recovery, if one of down-
- * stream device has no driver. We might be
- * unable to recover because a later insmod
- * of a driver for this device is unaware of
- * its hw state.
- */
- pci_printk(KERN_DEBUG, dev, "device has %s\n",
- dev->driver ?
- "no AER-aware driver" : "no driver");
- }
-
- /*
- * If there's any device in the subtree that does not
- * have an error_detected callback, returning
- * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
- * the subsequent mmio_enabled/slot_reset/resume
- * callbacks of "any" device in the subtree. All the
- * devices in the subtree are left in the error state
- * without recovery.
- */
-
- if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
- vote = PCI_ERS_RESULT_NO_AER_DRIVER;
- else
- vote = PCI_ERS_RESULT_NONE;
- } else {
- err_handler = dev->driver->err_handler;
- vote = err_handler->error_detected(dev, result_data->state);
- pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
- }
-
- result_data->result = merge_result(result_data->result, vote);
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_mmio_enabled(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->mmio_enabled)
- goto out;
-
- err_handler = dev->driver->err_handler;
- vote = err_handler->mmio_enabled(dev);
- result_data->result = merge_result(result_data->result, vote);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_slot_reset(struct pci_dev *dev, void *data)
-{
- pci_ers_result_t vote;
- const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
- result_data = (struct aer_broadcast_data *) data;
-
- device_lock(&dev->dev);
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->slot_reset)
- goto out;
-
- err_handler = dev->driver->err_handler;
- vote = err_handler->slot_reset(dev);
- result_data->result = merge_result(result_data->result, vote);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-static int report_resume(struct pci_dev *dev, void *data)
-{
- const struct pci_error_handlers *err_handler;
-
- device_lock(&dev->dev);
- dev->error_state = pci_channel_io_normal;
-
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->resume)
- goto out;
-
- err_handler = dev->driver->err_handler;
- err_handler->resume(dev);
- pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
-out:
- device_unlock(&dev->dev);
- return 0;
-}
-
-/**
- * broadcast_error_message - handle message broadcast to downstream drivers
- * @dev: pointer to from where in a hierarchy message is broadcasted down
- * @state: error state
- * @error_mesg: message to print
- * @cb: callback to be broadcasted
- *
- * Invoked during error recovery process. Once being invoked, the content
- * of error severity will be broadcasted to all downstream drivers in a
- * hierarchy in question.
- */
-static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
- enum pci_channel_state state,
- char *error_mesg,
- int (*cb)(struct pci_dev *, void *))
-{
- struct aer_broadcast_data result_data;
-
- pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
- result_data.state = state;
- if (cb == report_error_detected)
- result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
- else
- result_data.result = PCI_ERS_RESULT_RECOVERED;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- if (cb == report_error_detected)
- dev->error_state = state;
- pci_walk_bus(dev->subordinate, cb, &result_data);
- if (cb == report_resume) {
- pci_cleanup_aer_uncorrect_error_status(dev);
- dev->error_state = pci_channel_io_normal;
- }
- } else {
- /*
- * If the error is reported by an end point, we think this
- * error is related to the upstream link of the end point.
- */
- if (state == pci_channel_io_normal)
- /*
- * the error is non fatal so the bus is ok, just invoke
- * the callback for the function that logged the error.
- */
- cb(dev, &result_data);
- else
- pci_walk_bus(dev->bus, cb, &result_data);
- }
-
- return result_data.result;
-}
-
-/**
- * default_reset_link - default reset function
- * @dev: pointer to pci_dev data structure
- *
- * Invoked when performing link reset on a Downstream Port or a
- * Root Port with no aer driver.
- */
-static pci_ers_result_t default_reset_link(struct pci_dev *dev)
-{
- pci_reset_bridge_secondary_bus(dev);
- pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static int find_aer_service_iter(struct device *device, void *data)
-{
- struct pcie_port_service_driver *service_driver, **drv;
-
- drv = (struct pcie_port_service_driver **) data;
-
- if (device->bus == &pcie_port_bus_type && device->driver) {
- service_driver = to_service_driver(device->driver);
- if (service_driver->service == PCIE_PORT_SERVICE_AER) {
- *drv = service_driver;
- return 1;
- }
- }
-
- return 0;
-}
-
-static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
-{
- struct pcie_port_service_driver *drv = NULL;
-
- device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
-
- return drv;
-}
-
-static pci_ers_result_t reset_link(struct pci_dev *dev)
-{
- struct pci_dev *udev;
- pci_ers_result_t status;
- struct pcie_port_service_driver *driver;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /* Reset this port for all subordinates */
- udev = dev;
- } else {
- /* Reset the upstream component (likely downstream port) */
- udev = dev->bus->self;
- }
-
- /* Use the aer driver of the component firstly */
- driver = find_aer_service(udev);
-
- if (driver && driver->reset_link) {
- status = driver->reset_link(udev);
- } else if (udev->has_secondary_link) {
- status = default_reset_link(udev);
- } else {
- pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
- pci_name(udev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- if (status != PCI_ERS_RESULT_RECOVERED) {
- pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
- pci_name(udev));
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return status;
-}
-
-/**
- * do_recovery - handle nonfatal/fatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- * @severity: error severity type
- *
- * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
- * error detected message to all downstream drivers within a hierarchy in
- * question and return the returned code.
- */
-static void do_recovery(struct pci_dev *dev, int severity)
-{
- pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
- enum pci_channel_state state;
-
- if (severity == AER_FATAL)
- state = pci_channel_io_frozen;
- else
- state = pci_channel_io_normal;
-
- status = broadcast_error_message(dev,
- state,
- "error_detected",
- report_error_detected);
-
- if (severity == AER_FATAL) {
- result = reset_link(dev);
- if (result != PCI_ERS_RESULT_RECOVERED)
- goto failed;
- }
-
- if (status == PCI_ERS_RESULT_CAN_RECOVER)
- status = broadcast_error_message(dev,
- state,
- "mmio_enabled",
- report_mmio_enabled);
-
- if (status == PCI_ERS_RESULT_NEED_RESET) {
- /*
- * TODO: Should call platform-specific
- * functions to reset slot before calling
- * drivers' slot_reset callbacks?
- */
- status = broadcast_error_message(dev,
- state,
- "slot_reset",
- report_slot_reset);
- }
-
- if (status != PCI_ERS_RESULT_RECOVERED)
- goto failed;
-
- broadcast_error_message(dev,
- state,
- "resume",
- report_resume);
-
- pci_info(dev, "AER: Device recovery successful\n");
- return;
-
-failed:
- pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
- /* TODO: Should kernel panic here? */
- pci_info(dev, "AER: Device recovery failed\n");
-}
-
/**
* handle_error_source - handle logging error into an event log
- * @aerdev: pointer to pcie_device data structure of the root port
* @dev: pointer to pci_dev data structure of error source device
* @info: comprehensive error information
*
* Invoked when an error being detected by Root Port.
*/
-static void handle_error_source(struct pcie_device *aerdev,
- struct pci_dev *dev,
- struct aer_err_info *info)
+static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
{
int pos;
@@ -562,12 +248,13 @@ static void handle_error_source(struct pcie_device *aerdev,
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
- } else
- do_recovery(dev, info->severity);
+ } else if (info->severity == AER_NONFATAL)
+ pcie_do_nonfatal_recovery(dev);
+ else if (info->severity == AER_FATAL)
+ pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
-static void aer_recover_work_func(struct work_struct *work);
#define AER_RECOVER_RING_ORDER 4
#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
@@ -582,6 +269,30 @@ struct aer_recover_entry {
static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
AER_RECOVER_RING_SIZE);
+
+static void aer_recover_work_func(struct work_struct *work)
+{
+ struct aer_recover_entry entry;
+ struct pci_dev *pdev;
+
+ while (kfifo_get(&aer_recover_ring, &entry)) {
+ pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
+ entry.devfn);
+ if (!pdev) {
+ pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ continue;
+ }
+ cper_print_aer(pdev, entry.severity, entry.regs);
+ if (entry.severity == AER_NONFATAL)
+ pcie_do_nonfatal_recovery(pdev);
+ else if (entry.severity == AER_FATAL)
+ pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
+ pci_dev_put(pdev);
+ }
+}
+
/*
* Mutual exclusion for writers of aer_recover_ring, reader side don't
* need lock, because there is only one reader and lock is not needed
@@ -611,27 +322,6 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
}
EXPORT_SYMBOL_GPL(aer_recover_queue);
-
-static void aer_recover_work_func(struct work_struct *work)
-{
- struct aer_recover_entry entry;
- struct pci_dev *pdev;
-
- while (kfifo_get(&aer_recover_ring, &entry)) {
- pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
- entry.devfn);
- if (!pdev) {
- pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
- entry.domain, entry.bus,
- PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
- continue;
- }
- cper_print_aer(pdev, entry.severity, entry.regs);
- if (entry.severity != AER_CORRECTABLE)
- do_recovery(pdev, entry.severity);
- pci_dev_put(pdev);
- }
-}
#endif
/**
@@ -695,8 +385,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return 1;
}
-static inline void aer_process_err_devices(struct pcie_device *p_device,
- struct aer_err_info *e_info)
+static inline void aer_process_err_devices(struct aer_err_info *e_info)
{
int i;
@@ -707,19 +396,19 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (get_device_error_info(e_info->dev[i], e_info))
- handle_error_source(p_device, e_info->dev[i], e_info);
+ handle_error_source(e_info->dev[i], e_info);
}
}
/**
* aer_isr_one_error - consume an error detected by root port
- * @p_device: pointer to error root port service device
+ * @rpc: pointer to the root port which holds an error
* @e_src: pointer to an error source
*/
-static void aer_isr_one_error(struct pcie_device *p_device,
+static void aer_isr_one_error(struct aer_rpc *rpc,
struct aer_err_source *e_src)
{
- struct aer_rpc *rpc = get_service_data(p_device);
+ struct pci_dev *pdev = rpc->rpd;
struct aer_err_info *e_info = &rpc->e_info;
/*
@@ -734,11 +423,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
e_info->multi_error_valid = 1;
else
e_info->multi_error_valid = 0;
+ aer_print_port_info(pdev, e_info);
- aer_print_port_info(p_device->port, e_info);
-
- if (find_source_device(p_device->port, e_info))
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(pdev, e_info))
+ aer_process_err_devices(e_info);
}
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
@@ -754,10 +442,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
else
e_info->multi_error_valid = 0;
- aer_print_port_info(p_device->port, e_info);
+ aer_print_port_info(pdev, e_info);
- if (find_source_device(p_device->port, e_info))
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(pdev, e_info))
+ aer_process_err_devices(e_info);
}
}
@@ -799,11 +487,10 @@ static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
void aer_isr(struct work_struct *work)
{
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
- struct pcie_device *p_device = rpc->rpd;
struct aer_err_source uninitialized_var(e_src);
mutex_lock(&rpc->rpc_mutex);
while (get_e_source(rpc, &e_src))
- aer_isr_one_error(p_device, &e_src);
+ aer_isr_one_error(rpc, &e_src);
mutex_unlock(&rpc->rpc_mutex);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index cfc89dd578316..4985bdf64c2e7 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -163,17 +163,17 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
int id = ((dev->bus->number << 8) | dev->devfn);
if (!info->status) {
- pci_err(dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n",
- aer_error_severity_string[info->severity], id);
+ pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
+ aer_error_severity_string[info->severity]);
goto out;
}
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
- aer_error_layer[layer], id, aer_agent_string[agent]);
+ aer_error_layer[layer], aer_agent_string[agent]);
pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device,
@@ -186,17 +186,21 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
- pci_err(dev, " Error of this Agent(%04x) is reported first\n", id);
+ pci_err(dev, " Error of this Agent is reported first\n");
trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
- info->severity);
+ info->severity, info->tlp_header_valid, &info->tlp);
}
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
{
- pci_info(dev, "AER: %s%s error received: id=%04x\n",
+ u8 bus = info->id >> 8;
+ u8 devfn = info->id & 0xff;
+
+ pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity], info->id);
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -216,28 +220,30 @@ EXPORT_SYMBOL_GPL(cper_severity_to_aer);
void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer)
{
- int layer, agent, status_strs_size, tlp_header_valid = 0;
+ int layer, agent, tlp_header_valid = 0;
u32 status, mask;
- const char **status_strs;
+ struct aer_err_info info;
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
- status_strs = aer_correctable_error_string;
- status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
} else {
status = aer->uncor_status;
mask = aer->uncor_mask;
- status_strs = aer_uncorrectable_error_string;
- status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
tlp_header_valid = status & AER_LOG_TLP_MASKS;
}
layer = AER_GET_LAYER_ERROR(aer_severity, status);
agent = AER_GET_AGENT(aer_severity, status);
+ memset(&info, 0, sizeof(info));
+ info.severity = aer_severity;
+ info.status = status;
+ info.mask = mask;
+ info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
+
pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
- cper_print_bits("", status, status_strs, status_strs_size);
+ __aer_print_error(dev, &info);
pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
aer_error_layer[layer], aer_agent_string[agent]);
@@ -249,6 +255,6 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
__print_tlp_header(dev, &aer->header_log);
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
- aer_severity);
+ aer_severity, tlp_header_valid, &aer->header_log);
}
#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index f76eb7704f646..c687c817b47d0 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -400,6 +400,15 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
info->l1ss_cap = 0;
return;
}
+
+ /*
+ * If we don't have LTR for the entire path from the Root Complex
+ * to this device, we can't use ASPM L1.2 because it relies on the
+ * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
+ */
+ if (!pdev->ltr_path)
+ info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
&info->l1ss_ctl1);
pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 8c57d607e603e..d6436681c5354 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -68,44 +68,35 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
static void dpc_wait_link_inactive(struct dpc_dev *dpc)
{
- unsigned long timeout = jiffies + HZ;
struct pci_dev *pdev = dpc->dev->port;
- struct device *dev = &dpc->dev->device;
- u16 lnk_status;
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- while (lnk_status & PCI_EXP_LNKSTA_DLLLA &&
- !time_after(jiffies, timeout)) {
- msleep(10);
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- }
- if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
- dev_warn(dev, "Link state not disabled for DPC event\n");
+ pcie_wait_for_link(pdev, false);
}
-static void dpc_work(struct work_struct *work)
+static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
- struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
- struct pci_dev *dev, *temp, *pdev = dpc->dev->port;
- struct pci_bus *parent = pdev->subordinate;
- u16 cap = dpc->cap_pos, ctl;
-
- pci_lock_rescan_remove();
- list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
- bus_list) {
- pci_dev_get(dev);
- pci_dev_set_disconnected(dev, NULL);
- if (pci_has_subordinate(dev))
- pci_walk_bus(dev->subordinate,
- pci_dev_set_disconnected, NULL);
- pci_stop_and_remove_bus_device(dev);
- pci_dev_put(dev);
- }
- pci_unlock_rescan_remove();
-
+ struct dpc_dev *dpc;
+ struct pcie_device *pciedev;
+ struct device *devdpc;
+ u16 cap, ctl;
+
+ /*
+ * DPC disables the Link automatically in hardware, so it has
+ * already been reset by the time we get here.
+ */
+ devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC);
+ pciedev = to_pcie_device(devdpc);
+ dpc = get_service_data(pciedev);
+ cap = dpc->cap_pos;
+
+ /*
+ * Wait until the Link is inactive, then clear DPC Trigger Status
+ * to allow the Port to leave DPC.
+ */
dpc_wait_link_inactive(dpc);
+
if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
- return;
+ return PCI_ERS_RESULT_DISCONNECT;
if (dpc->rp_extensions && dpc->rp_pio_status) {
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
dpc->rp_pio_status);
@@ -113,11 +104,22 @@ static void dpc_work(struct work_struct *work)
}
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
- PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
+ PCI_EXP_DPC_STATUS_TRIGGER);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
ctl | PCI_EXP_DPC_CTL_INT_EN);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void dpc_work(struct work_struct *work)
+{
+ struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
+ struct pci_dev *pdev = dpc->dev->port;
+
+ /* We configure DPC so it only triggers on ERR_FATAL */
+ pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
}
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
@@ -223,6 +225,9 @@ static irqreturn_t dpc_irq(int irq, void *context)
if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
dpc_process_rp_pio_error(dpc);
+ pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_INTERRUPT);
+
schedule_work(&dpc->work);
return IRQ_HANDLED;
@@ -270,7 +275,7 @@ static int dpc_probe(struct pcie_device *dev)
}
}
- ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
+ ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
@@ -288,7 +293,7 @@ static void dpc_remove(struct pcie_device *dev)
u16 ctl;
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
- ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
+ ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
}
@@ -298,6 +303,7 @@ static struct pcie_port_service_driver dpcdriver = {
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
.remove = dpc_remove,
+ .reset_link = dpc_reset_link,
};
static int __init dpc_service_init(void)
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
new file mode 100644
index 0000000000000..f7ce0cb0b0b70
--- /dev/null
+++ b/drivers/pci/pcie/err.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file implements the error recovery as a core part of PCIe error
+ * reporting. When a PCIe error is delivered, an error message will be
+ * collected and printed to console, then, an error recovery procedure
+ * will be executed by following the PCI error recovery rules.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/aer.h>
+#include "portdrv.h"
+#include "../pci.h"
+
+struct aer_broadcast_data {
+ enum pci_channel_state state;
+ enum pci_ers_result result;
+};
+
+static pci_ers_result_t merge_result(enum pci_ers_result orig,
+ enum pci_ers_result new)
+{
+ if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
+ return PCI_ERS_RESULT_NO_AER_DRIVER;
+
+ if (new == PCI_ERS_RESULT_NONE)
+ return orig;
+
+ switch (orig) {
+ case PCI_ERS_RESULT_CAN_RECOVER:
+ case PCI_ERS_RESULT_RECOVERED:
+ orig = new;
+ break;
+ case PCI_ERS_RESULT_DISCONNECT:
+ if (new == PCI_ERS_RESULT_NEED_RESET)
+ orig = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ default:
+ break;
+ }
+
+ return orig;
+}
+
+static int report_error_detected(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ dev->error_state = result_data->state;
+
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->error_detected) {
+ if (result_data->state == pci_channel_io_frozen &&
+ dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * In case of fatal recovery, if one of down-
+ * stream device has no driver. We might be
+ * unable to recover because a later insmod
+ * of a driver for this device is unaware of
+ * its hw state.
+ */
+ pci_printk(KERN_DEBUG, dev, "device has %s\n",
+ dev->driver ?
+ "no AER-aware driver" : "no driver");
+ }
+
+ /*
+ * If there's any device in the subtree that does not
+ * have an error_detected callback, returning
+ * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
+ * the subsequent mmio_enabled/slot_reset/resume
+ * callbacks of "any" device in the subtree. All the
+ * devices in the subtree are left in the error state
+ * without recovery.
+ */
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+ vote = PCI_ERS_RESULT_NO_AER_DRIVER;
+ else
+ vote = PCI_ERS_RESULT_NONE;
+ } else {
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->error_detected(dev, result_data->state);
+ pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
+ }
+
+ result_data->result = merge_result(result_data->result, vote);
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->mmio_enabled)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->mmio_enabled(dev);
+ result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_slot_reset(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->slot_reset)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->slot_reset(dev);
+ result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_resume(struct pci_dev *dev, void *data)
+{
+ const struct pci_error_handlers *err_handler;
+
+ device_lock(&dev->dev);
+ dev->error_state = pci_channel_io_normal;
+
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->resume)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ err_handler->resume(dev);
+ pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+/**
+ * default_reset_link - default reset function
+ * @dev: pointer to pci_dev data structure
+ *
+ * Invoked when performing link reset on a Downstream Port or a
+ * Root Port with no aer driver.
+ */
+static pci_ers_result_t default_reset_link(struct pci_dev *dev)
+{
+ pci_reset_bridge_secondary_bus(dev);
+ pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
+{
+ struct pci_dev *udev;
+ pci_ers_result_t status;
+ struct pcie_port_service_driver *driver = NULL;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ /* Reset this port for all subordinates */
+ udev = dev;
+ } else {
+ /* Reset the upstream component (likely downstream port) */
+ udev = dev->bus->self;
+ }
+
+ /* Use the aer driver of the component firstly */
+ driver = pcie_port_find_service(udev, service);
+
+ if (driver && driver->reset_link) {
+ status = driver->reset_link(udev);
+ } else if (udev->has_secondary_link) {
+ status = default_reset_link(udev);
+ } else {
+ pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (status != PCI_ERS_RESULT_RECOVERED) {
+ pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return status;
+}
+
+/**
+ * broadcast_error_message - handle message broadcast to downstream drivers
+ * @dev: pointer to from where in a hierarchy message is broadcasted down
+ * @state: error state
+ * @error_mesg: message to print
+ * @cb: callback to be broadcasted
+ *
+ * Invoked during error recovery process. Once being invoked, the content
+ * of error severity will be broadcasted to all downstream drivers in a
+ * hierarchy in question.
+ */
+static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
+ enum pci_channel_state state,
+ char *error_mesg,
+ int (*cb)(struct pci_dev *, void *))
+{
+ struct aer_broadcast_data result_data;
+
+ pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
+ result_data.state = state;
+ if (cb == report_error_detected)
+ result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
+ else
+ result_data.result = PCI_ERS_RESULT_RECOVERED;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * If the error is reported by a bridge, we think this error
+ * is related to the downstream link of the bridge, so we
+ * do error recovery on all subordinates of the bridge instead
+ * of the bridge and clear the error status of the bridge.
+ */
+ if (cb == report_error_detected)
+ dev->error_state = state;
+ pci_walk_bus(dev->subordinate, cb, &result_data);
+ if (cb == report_resume) {
+ pci_cleanup_aer_uncorrect_error_status(dev);
+ dev->error_state = pci_channel_io_normal;
+ }
+ } else {
+ /*
+ * If the error is reported by an end point, we think this
+ * error is related to the upstream link of the end point.
+ */
+ if (state == pci_channel_io_normal)
+ /*
+ * the error is non fatal so the bus is ok, just invoke
+ * the callback for the function that logged the error.
+ */
+ cb(dev, &result_data);
+ else
+ pci_walk_bus(dev->bus, cb, &result_data);
+ }
+
+ return result_data.result;
+}
+
+/**
+ * pcie_do_fatal_recovery - handle fatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ *
+ * Invoked when an error is fatal. Once being invoked, removes the devices
+ * beneath this AER agent, followed by reset link e.g. secondary bus reset
+ * followed by re-enumeration of devices.
+ */
+void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
+{
+ struct pci_dev *udev;
+ struct pci_bus *parent;
+ struct pci_dev *pdev, *temp;
+ pci_ers_result_t result;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ udev = dev;
+ else
+ udev = dev->bus->self;
+
+ parent = udev->subordinate;
+ pci_lock_rescan_remove();
+ list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
+ bus_list) {
+ pci_dev_get(pdev);
+ pci_dev_set_disconnected(pdev, NULL);
+ if (pci_has_subordinate(pdev))
+ pci_walk_bus(pdev->subordinate,
+ pci_dev_set_disconnected, NULL);
+ pci_stop_and_remove_bus_device(pdev);
+ pci_dev_put(pdev);
+ }
+
+ result = reset_link(udev, service);
+
+ if ((service == PCIE_PORT_SERVICE_AER) &&
+ (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
+ /*
+ * If the error is reported by a bridge, we think this error
+ * is related to the downstream link of the bridge, so we
+ * do error recovery on all subordinates of the bridge instead
+ * of the bridge and clear the error status of the bridge.
+ */
+ pci_cleanup_aer_uncorrect_error_status(dev);
+ }
+
+ if (result == PCI_ERS_RESULT_RECOVERED) {
+ if (pcie_wait_for_link(udev, true))
+ pci_rescan_bus(udev->bus);
+ pci_info(dev, "Device recovery from fatal error successful\n");
+ } else {
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+ pci_info(dev, "Device recovery from fatal error failed\n");
+ }
+
+ pci_unlock_rescan_remove();
+}
+
+/**
+ * pcie_do_nonfatal_recovery - handle nonfatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ *
+ * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
+ * error detected message to all downstream drivers within a hierarchy in
+ * question and return the returned code.
+ */
+void pcie_do_nonfatal_recovery(struct pci_dev *dev)
+{
+ pci_ers_result_t status;
+ enum pci_channel_state state;
+
+ state = pci_channel_io_normal;
+
+ status = broadcast_error_message(dev,
+ state,
+ "error_detected",
+ report_error_detected);
+
+ if (status == PCI_ERS_RESULT_CAN_RECOVER)
+ status = broadcast_error_message(dev,
+ state,
+ "mmio_enabled",
+ report_mmio_enabled);
+
+ if (status == PCI_ERS_RESULT_NEED_RESET) {
+ /*
+ * TODO: Should call platform-specific
+ * functions to reset slot before calling
+ * drivers' slot_reset callbacks?
+ */
+ status = broadcast_error_message(dev,
+ state,
+ "slot_reset",
+ report_slot_reset);
+ }
+
+ if (status != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
+
+ broadcast_error_message(dev,
+ state,
+ "resume",
+ report_resume);
+
+ pci_info(dev, "AER: Device recovery successful\n");
+ return;
+
+failed:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+
+ /* TODO: Should kernel panic here? */
+ pci_info(dev, "AER: Device recovery failed\n");
+}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d0c6783dbfe33..2bb5db7b53e6c 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,8 +11,6 @@
#include <linux/compiler.h>
-extern bool pcie_ports_native;
-
/* Service Type */
#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
@@ -112,4 +110,7 @@ static inline bool pcie_pme_no_msi(void) { return false; }
static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
+struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
+ u32 service);
+struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
deleted file mode 100644
index 8ab5d434b9c60..0000000000000
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PCIe Port Native Services Support, ACPI-Related Part
- *
- * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-
-#include "aer/aerdrv.h"
-#include "../pci.h"
-#include "portdrv.h"
-
-/**
- * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
- * @port: PCIe Port service for a root port or event collector.
- * @srv_mask: Bit mask of services that can be enabled for @port.
- *
- * Invoked when @port is identified as a PCIe port device. To avoid conflicts
- * with the BIOS PCIe port native services support requires the BIOS to yield
- * control of these services to the kernel. The mask of services that the BIOS
- * allows to be enabled for @port is written to @srv_mask.
- *
- * NOTE: It turns out that we cannot do that for individual port services
- * separately, because that would make some systems work incorrectly.
- */
-void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
-{
- struct acpi_pci_root *root;
- acpi_handle handle;
- u32 flags;
-
- if (acpi_pci_disabled)
- return;
-
- handle = acpi_find_root_bridge_handle(port);
- if (!handle)
- return;
-
- root = acpi_pci_find_root(handle);
- if (!root)
- return;
-
- flags = root->osc_control_set;
-
- *srv_mask = 0;
- if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_HP;
- if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_PME;
- if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
- *srv_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
-}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index c9c0663db282d..e0261ad4bcdd5 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -19,6 +19,12 @@
#include "../pci.h"
#include "portdrv.h"
+struct portdrv_service_data {
+ struct pcie_port_service_driver *drv;
+ struct device *dev;
+ u32 service;
+};
+
/**
* release_pcie_device - free PCI Express port service device structure
* @dev: Port service device to release
@@ -199,7 +205,7 @@ static int get_port_device_capability(struct pci_dev *dev)
int services = 0;
if (dev->is_hotplug_bridge &&
- (pcie_ports_native || host->native_hotplug)) {
+ (pcie_ports_native || host->native_pcie_hotplug)) {
services |= PCIE_PORT_SERVICE_HP;
/*
@@ -398,6 +404,69 @@ static int remove_iter(struct device *dev, void *data)
return 0;
}
+static int find_service_iter(struct device *device, void *data)
+{
+ struct pcie_port_service_driver *service_driver;
+ struct portdrv_service_data *pdrvs;
+ u32 service;
+
+ pdrvs = (struct portdrv_service_data *) data;
+ service = pdrvs->service;
+
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ service_driver = to_service_driver(device->driver);
+ if (service_driver->service == service) {
+ pdrvs->drv = service_driver;
+ pdrvs->dev = device;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pcie_port_find_service - find the service driver
+ * @dev: PCI Express port the service is associated with
+ * @service: Service to find
+ *
+ * Find PCI Express port service driver associated with given service
+ */
+struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
+ u32 service)
+{
+ struct pcie_port_service_driver *drv;
+ struct portdrv_service_data pdrvs;
+
+ pdrvs.drv = NULL;
+ pdrvs.service = service;
+ device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
+
+ drv = pdrvs.drv;
+ return drv;
+}
+
+/**
+ * pcie_port_find_device - find the struct device
+ * @dev: PCI Express port the service is associated with
+ * @service: For the service to find
+ *
+ * Find the struct device associated with given service on a pci_dev
+ */
+struct device *pcie_port_find_device(struct pci_dev *dev,
+ u32 service)
+{
+ struct device *device;
+ struct portdrv_service_data pdrvs;
+
+ pdrvs.dev = NULL;
+ pdrvs.service = service;
+ device_for_each_child(&dev->dev, &pdrvs, find_service_iter);
+
+ device = pdrvs.dev;
+ return device;
+}
+
/**
* pcie_port_device_remove - unregister PCI Express port service devices
* @dev: PCI Express port the service devices to unregister are associated with
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ac91b6fd0bcd5..ac876e32de4b0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -526,12 +526,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
if (bridge->release_fn)
bridge->release_fn(bridge);
+
+ pci_free_resource_list(&bridge->windows);
}
static void pci_release_host_bridge_dev(struct device *dev)
{
devm_pci_release_host_bridge_dev(dev);
- pci_free_host_bridge(to_pci_host_bridge(dev));
+ kfree(to_pci_host_bridge(dev));
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -552,8 +554,10 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
* OS from interfering.
*/
bridge->native_aer = 1;
- bridge->native_hotplug = 1;
+ bridge->native_pcie_hotplug = 1;
+ bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
+ bridge->native_ltr = 1;
return bridge;
}
@@ -882,6 +886,45 @@ free:
return err;
}
+static bool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge)
+{
+ int pos;
+ u32 status;
+
+ /*
+ * If extended config space isn't accessible on a bridge's primary
+ * bus, we certainly can't access it on the secondary bus.
+ */
+ if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
+ return false;
+
+ /*
+ * PCIe Root Ports and switch ports are PCIe on both sides, so if
+ * extended config space is accessible on the primary, it's also
+ * accessible on the secondary.
+ */
+ if (pci_is_pcie(bridge) &&
+ (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM ||
+ pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM))
+ return true;
+
+ /*
+ * For the other bridge types:
+ * - PCI-to-PCI bridges
+ * - PCIe-to-PCI/PCI-X forward bridges
+ * - PCI/PCI-X-to-PCIe reverse bridges
+ * extended config space on the secondary side is only accessible
+ * if the bridge supports PCI-X Mode 2.
+ */
+ pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
+ if (!pos)
+ return false;
+
+ pci_read_config_dword(bridge, pos + PCI_X_STATUS, &status);
+ return status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ);
+}
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -923,6 +966,16 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
pci_set_bus_of_node(child);
pci_set_bus_speed(child);
+ /*
+ * Check whether extended config space is accessible on the child
+ * bus. Note that we currently assume it is always accessible on
+ * the root bus.
+ */
+ if (!pci_bridge_child_ext_cfg_accessible(bridge)) {
+ child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG;
+ pci_info(child, "extended config space not accessible\n");
+ }
+
/* Set up default resource pointers and names */
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
@@ -998,6 +1051,8 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* already configured by the BIOS and after we are done with all of
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
+ *
+ * Return: New subordinate number covering all buses behind this bridge.
*/
static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
int max, unsigned int available_buses,
@@ -1188,20 +1243,15 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
(is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
pci_domain_nr(bus), child->number);
- /* Has only triggered on CardBus, fixup is in yenta_socket */
+ /* Check that all devices are accessible */
while (bus->parent) {
if ((child->busn_res.end > bus->busn_res.end) ||
(child->number > bus->busn_res.end) ||
(child->number < bus->number) ||
(child->busn_res.end < bus->number)) {
- dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
- &child->busn_res,
- (bus->number > child->busn_res.end &&
- bus->busn_res.end < child->number) ?
- "wholly" : "partially",
- bus->self->transparent ? " transparent" : "",
- dev_name(&bus->dev),
- &bus->busn_res);
+ dev_info(&dev->dev, "devices behind bridge are unusable because %pR cannot be assigned for them\n",
+ &child->busn_res);
+ break;
}
bus = bus->parent;
}
@@ -1230,6 +1280,8 @@ out:
* already configured by the BIOS and after we are done with all of
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
+ *
+ * Return: New subordinate number covering all buses behind this bridge.
*/
int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
{
@@ -1393,6 +1445,9 @@ int pci_cfg_space_size(struct pci_dev *dev)
u32 status;
u16 class;
+ if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
+ return PCI_CFG_SPACE_SIZE;
+
class = dev->class >> 8;
if (class == PCI_CLASS_BRIDGE_HOST)
return pci_cfg_space_size_ext(dev);
@@ -1954,9 +2009,13 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
static void pci_configure_ltr(struct pci_dev *dev)
{
#ifdef CONFIG_PCIEASPM
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
u32 cap;
struct pci_dev *bridge;
+ if (!host->native_ltr)
+ return;
+
if (!pci_is_pcie(dev))
return;
@@ -2638,7 +2697,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
for_each_pci_bridge(dev, bus) {
cmax = max;
max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
- used_buses += cmax - max;
+
+ /*
+ * Reserve one bus for each bridge now to avoid extending
+ * hotplug bridges too much during the second scan below.
+ */
+ used_buses++;
+ if (cmax - max > 1)
+ used_buses += cmax - max - 1;
}
/* Scan bridges that need to be reconfigured */
@@ -2661,12 +2727,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* bridges if any.
*/
buses = available_buses / hotplug_bridges;
- buses = min(buses, available_buses - used_buses);
+ buses = min(buses, available_buses - used_buses + 1);
}
cmax = max;
max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
- used_buses += max - cmax;
+ /* One bus is already accounted so don't add it again */
+ if (max - cmax > 1)
+ used_buses += max - cmax - 1;
}
/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2990ad1e7c995..f439de848658e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -30,6 +30,162 @@
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
+static ktime_t fixup_debug_start(struct pci_dev *dev,
+ void (*fn)(struct pci_dev *dev))
+{
+ if (initcall_debug)
+ pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current));
+
+ return ktime_get();
+}
+
+static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
+ void (*fn)(struct pci_dev *dev))
+{
+ ktime_t delta, rettime;
+ unsigned long long duration;
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ if (initcall_debug || duration > 10000)
+ pci_info(dev, "%pF took %lld usecs\n", fn, duration);
+}
+
+static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+{
+ ktime_t calltime;
+
+ for (; f < end; f++)
+ if ((f->class == (u32) (dev->class >> f->class_shift) ||
+ f->class == (u32) PCI_ANY_ID) &&
+ (f->vendor == dev->vendor ||
+ f->vendor == (u16) PCI_ANY_ID) &&
+ (f->device == dev->device ||
+ f->device == (u16) PCI_ANY_ID)) {
+ calltime = fixup_debug_start(dev, f->hook);
+ f->hook(dev);
+ fixup_debug_report(dev, calltime, f->hook);
+ }
+}
+
+extern struct pci_fixup __start_pci_fixups_early[];
+extern struct pci_fixup __end_pci_fixups_early[];
+extern struct pci_fixup __start_pci_fixups_header[];
+extern struct pci_fixup __end_pci_fixups_header[];
+extern struct pci_fixup __start_pci_fixups_final[];
+extern struct pci_fixup __end_pci_fixups_final[];
+extern struct pci_fixup __start_pci_fixups_enable[];
+extern struct pci_fixup __end_pci_fixups_enable[];
+extern struct pci_fixup __start_pci_fixups_resume[];
+extern struct pci_fixup __end_pci_fixups_resume[];
+extern struct pci_fixup __start_pci_fixups_resume_early[];
+extern struct pci_fixup __end_pci_fixups_resume_early[];
+extern struct pci_fixup __start_pci_fixups_suspend[];
+extern struct pci_fixup __end_pci_fixups_suspend[];
+extern struct pci_fixup __start_pci_fixups_suspend_late[];
+extern struct pci_fixup __end_pci_fixups_suspend_late[];
+
+static bool pci_apply_fixup_final_quirks;
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+ struct pci_fixup *start, *end;
+
+ switch (pass) {
+ case pci_fixup_early:
+ start = __start_pci_fixups_early;
+ end = __end_pci_fixups_early;
+ break;
+
+ case pci_fixup_header:
+ start = __start_pci_fixups_header;
+ end = __end_pci_fixups_header;
+ break;
+
+ case pci_fixup_final:
+ if (!pci_apply_fixup_final_quirks)
+ return;
+ start = __start_pci_fixups_final;
+ end = __end_pci_fixups_final;
+ break;
+
+ case pci_fixup_enable:
+ start = __start_pci_fixups_enable;
+ end = __end_pci_fixups_enable;
+ break;
+
+ case pci_fixup_resume:
+ start = __start_pci_fixups_resume;
+ end = __end_pci_fixups_resume;
+ break;
+
+ case pci_fixup_resume_early:
+ start = __start_pci_fixups_resume_early;
+ end = __end_pci_fixups_resume_early;
+ break;
+
+ case pci_fixup_suspend:
+ start = __start_pci_fixups_suspend;
+ end = __end_pci_fixups_suspend;
+ break;
+
+ case pci_fixup_suspend_late:
+ start = __start_pci_fixups_suspend_late;
+ end = __end_pci_fixups_suspend_late;
+ break;
+
+ default:
+ /* stupid compiler warning, you would think with an enum... */
+ return;
+ }
+ pci_do_fixups(dev, start, end);
+}
+EXPORT_SYMBOL(pci_fixup_device);
+
+static int __init pci_apply_final_quirks(void)
+{
+ struct pci_dev *dev = NULL;
+ u8 cls = 0;
+ u8 tmp;
+
+ if (pci_cache_line_size)
+ printk(KERN_DEBUG "PCI: CLS %u bytes\n",
+ pci_cache_line_size << 2);
+
+ pci_apply_fixup_final_quirks = true;
+ for_each_pci_dev(dev) {
+ pci_fixup_device(pci_fixup_final, dev);
+ /*
+ * If arch hasn't set it explicitly yet, use the CLS
+ * value shared by all PCI devices. If there's a
+ * mismatch, fall back to the default value.
+ */
+ if (!pci_cache_line_size) {
+ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
+ if (!cls)
+ cls = tmp;
+ if (!tmp || cls == tmp)
+ continue;
+
+ printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
+ cls << 2, tmp << 2,
+ pci_dfl_cache_line_size << 2);
+ pci_cache_line_size = pci_dfl_cache_line_size;
+ }
+ }
+
+ if (!pci_cache_line_size) {
+ printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
+ cls << 2, pci_dfl_cache_line_size << 2);
+ pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
+ }
+
+ return 0;
+}
+fs_initcall_sync(pci_apply_final_quirks);
+
/*
* Decoding should be disabled for a PCI device during BAR sizing to avoid
* conflict. But doing so may cause problems on host bridge and perhaps other
@@ -43,9 +199,10 @@ static void quirk_mmio_always_on(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
-/* The Mellanox Tavor device gives false positive parity errors
- * Mark this device with a broken_parity_status, to allow
- * PCI scanning code to "skip" this now blacklisted device.
+/*
+ * The Mellanox Tavor device gives false positive parity errors. Mark this
+ * device with a broken_parity_status to allow PCI scanning code to "skip"
+ * this now blacklisted device.
*/
static void quirk_mellanox_tavor(struct pci_dev *dev)
{
@@ -54,15 +211,19 @@ static void quirk_mellanox_tavor(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
-/* Deal with broken BIOSes that neglect to enable passive release,
- which can cause problems in combination with the 82441FX/PPro MTRRs */
+/*
+ * Deal with broken BIOSes that neglect to enable passive release,
+ * which can cause problems in combination with the 82441FX/PPro MTRRs
+ */
static void quirk_passive_release(struct pci_dev *dev)
{
struct pci_dev *d = NULL;
unsigned char dlc;
- /* We have to make sure a particular bit is set in the PIIX3
- ISA bridge, so we have to go out and find it. */
+ /*
+ * We have to make sure a particular bit is set in the PIIX3
+ * ISA bridge, so we have to go out and find it.
+ */
while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
pci_read_config_byte(d, 0x82, &dlc);
if (!(dlc & 1<<1)) {
@@ -75,13 +236,14 @@ static void quirk_passive_release(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
-/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
- but VIA don't answer queries. If you happen to have good contacts at VIA
- ask them for me please -- Alan
-
- This appears to be BIOS not version dependent. So presumably there is a
- chipset level fix */
-
+/*
+ * The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
+ * workaround but VIA don't answer queries. If you happen to have good
+ * contacts at VIA ask them for me please -- Alan
+ *
+ * This appears to be BIOS not version dependent. So presumably there is a
+ * chipset level fix.
+ */
static void quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
@@ -89,10 +251,10 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev)
pci_info(dev, "Activating ISA DMA hang workarounds\n");
}
}
- /*
- * Its not totally clear which chipsets are the problematic ones
- * We know 82C586 and 82C596 variants are affected.
- */
+/*
+ * It's not totally clear which chipsets are the problematic ones. We know
+ * 82C586 and 82C596 variants are affected.
+ */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
@@ -121,9 +283,7 @@ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
-/*
- * Chipsets where PCI->PCI transfers vanish or hang
- */
+/* Chipsets where PCI->PCI transfers vanish or hang */
static void quirk_nopcipci(struct pci_dev *dev)
{
if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
@@ -146,9 +306,7 @@ static void quirk_nopciamd(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
-/*
- * Triton requires workarounds to be used by the drivers
- */
+/* Triton requires workarounds to be used by the drivers */
static void quirk_triton(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
@@ -162,53 +320,62 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_tr
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
/*
- * VIA Apollo KT133 needs PCI latency patch
- * Made according to a windows driver based patch by George E. Breese
- * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
- * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
- * the info on which Mr Breese based his work.
+ * VIA Apollo KT133 needs PCI latency patch
+ * Made according to a Windows driver-based patch by George E. Breese;
+ * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
+ * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for the info on
+ * which Mr Breese based his work.
*
- * Updated based on further information from the site and also on
- * information provided by VIA
+ * Updated based on further information from the site and also on
+ * information provided by VIA
*/
static void quirk_vialatency(struct pci_dev *dev)
{
struct pci_dev *p;
u8 busarb;
- /* Ok we have a potential problem chipset here. Now see if we have
- a buggy southbridge */
+ /*
+ * Ok, we have a potential problem chipset here. Now see if we have
+ * a buggy southbridge.
+ */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
if (p != NULL) {
- /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
- /* Check for buggy part revisions */
+
+ /*
+ * 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A;
+ * thanks Dan Hollis.
+ * Check for buggy part revisions
+ */
if (p->revision < 0x40 || p->revision > 0x42)
goto exit;
} else {
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
if (p == NULL) /* No problem parts */
goto exit;
+
/* Check for buggy part revisions */
if (p->revision < 0x10 || p->revision > 0x12)
goto exit;
}
/*
- * Ok we have the problem. Now set the PCI master grant to
- * occur every master grant. The apparent bug is that under high
- * PCI load (quite common in Linux of course) you can get data
- * loss when the CPU is held off the bus for 3 bus master requests
- * This happens to include the IDE controllers....
+ * Ok we have the problem. Now set the PCI master grant to occur
+ * every master grant. The apparent bug is that under high PCI load
+ * (quite common in Linux of course) you can get data loss when the
+ * CPU is held off the bus for 3 bus master requests. This happens
+ * to include the IDE controllers....
*
- * VIA only apply this fix when an SB Live! is present but under
- * both Linux and Windows this isn't enough, and we have seen
- * corruption without SB Live! but with things like 3 UDMA IDE
- * controllers. So we ignore that bit of the VIA recommendation..
+ * VIA only apply this fix when an SB Live! is present but under
+ * both Linux and Windows this isn't enough, and we have seen
+ * corruption without SB Live! but with things like 3 UDMA IDE
+ * controllers. So we ignore that bit of the VIA recommendation..
*/
-
pci_read_config_byte(dev, 0x76, &busarb);
- /* Set bit 4 and bi 5 of byte 76 to 0x01
- "Master priority rotation on every PCI master grant */
+
+ /*
+ * Set bit 4 and bit 5 of byte 76 to 0x01
+ * "Master priority rotation on every PCI master grant"
+ */
busarb &= ~(1<<5);
busarb |= (1<<4);
pci_write_config_byte(dev, 0x76, busarb);
@@ -224,9 +391,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vial
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
-/*
- * VIA Apollo VP3 needs ETBF on BT848/878
- */
+/* VIA Apollo VP3 needs ETBF on BT848/878 */
static void quirk_viaetbf(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
@@ -246,10 +411,9 @@ static void quirk_vsfx(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
/*
- * Ali Magik requires workarounds to be used by the drivers
- * that DMA to AGP space. Latency must be set to 0xA and triton
- * workaround applied too
- * [Info kindly provided by ALi]
+ * ALi Magik requires workarounds to be used by the drivers that DMA to AGP
+ * space. Latency must be set to 0xA and Triton workaround applied too.
+ * [Info kindly provided by ALi]
*/
static void quirk_alimagik(struct pci_dev *dev)
{
@@ -261,10 +425,7 @@ static void quirk_alimagik(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
-/*
- * Natoma has some interesting boundary conditions with Zoran stuff
- * at least
- */
+/* Natoma has some interesting boundary conditions with Zoran stuff at least */
static void quirk_natoma(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
@@ -280,8 +441,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quir
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
/*
- * This chip can cause PCI parity errors if config register 0xA0 is read
- * while DMAs are occurring.
+ * This chip can cause PCI parity errors if config register 0xA0 is read
+ * while DMAs are occurring.
*/
static void quirk_citrine(struct pci_dev *dev)
{
@@ -321,8 +482,8 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
/*
- * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
- * If it's needed, re-allocate the region.
+ * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ * If it's needed, re-allocate the region.
*/
static void quirk_s3_64M(struct pci_dev *dev)
{
@@ -413,8 +574,8 @@ static void quirk_io_region(struct pci_dev *dev, int port,
}
/*
- * ATI Northbridge setups MCE the processor if you even
- * read somewhere between 0x3b0->0x3bb or read 0x3d3
+ * ATI Northbridge setups MCE the processor if you even read somewhere
+ * between 0x3b0->0x3bb or read 0x3d3
*/
static void quirk_ati_exploding_mce(struct pci_dev *dev)
{
@@ -429,6 +590,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
* In the AMD NL platform, this device ([1022:7912]) has a class code of
* PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
* claim it.
+ *
* But the dwc3 driver is a more specific driver for this device, and we'd
* prefer to use it instead of xhci. To prevent xhci from claiming the
* device, change the class code to 0x0c03fe, which the PCI r3.0 spec
@@ -448,11 +610,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
quirk_amd_nl_class);
/*
- * Let's make the southbridge information explicit instead
- * of having to worry about people probing the ACPI areas,
- * for example.. (Yes, it happens, and if you read the wrong
- * ACPI register it will put the machine to sleep with no
- * way of waking it up again. Bummer).
+ * Let's make the southbridge information explicit instead of having to
+ * worry about people probing the ACPI areas, for example.. (Yes, it
+ * happens, and if you read the wrong ACPI register it will put the machine
+ * to sleep with no way of waking it up again. Bummer).
*
* ALI M7101: Two IO regions pointed to by words at
* 0xE0 (64 bytes of ACPI registers)
@@ -508,6 +669,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
break;
size = bit;
}
+
/*
* For now we only print it out. Eventually we'll want to
* reserve it, but let's get enough confirmation reports first.
@@ -579,8 +741,7 @@ static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
* priority and can't tell whether the legacy device or the one created
* here is really at that address. This happens on boards with broken
* BIOSes.
- */
-
+ */
pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
if (enable & ICH4_ACPI_EN)
quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
@@ -617,7 +778,8 @@ static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
"ICH6 GPIO");
}
-static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
+static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
+ const char *name, int dynsize)
{
u32 val;
u32 size, base;
@@ -641,7 +803,10 @@ static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
}
base &= ~(size-1);
- /* Just print it out for now. We should reserve it after more debugging */
+ /*
+ * Just print it out for now. We should reserve it after more
+ * debugging.
+ */
pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
}
@@ -657,7 +822,8 @@ static void quirk_ich6_lpc(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
-static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
+static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
+ const char *name)
{
u32 val;
u32 mask, base;
@@ -668,15 +834,15 @@ static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const cha
if (!(val & 1))
return;
- /*
- * IO base in bits 15:2, mask in bits 23:18, both
- * are dword-based
- */
+ /* IO base in bits 15:2, mask in bits 23:18, both are dword-based */
base = val & 0xfffc;
mask = (val >> 16) & 0xfc;
mask |= 3;
- /* Just print it out for now. We should reserve it after more debugging */
+ /*
+ * Just print it out for now. We should reserve it after more
+ * debugging.
+ */
pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
}
@@ -748,8 +914,8 @@ static void quirk_vt8235_acpi(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
/*
- * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
- * Disable fast back-to-back on the secondary bus segment
+ * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast
+ * back-to-back: Disable fast back-to-back on the secondary bus segment
*/
static void quirk_xio2000a(struct pci_dev *dev)
{
@@ -774,8 +940,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
* VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
* devices to the external APIC.
*
- * TODO: When we have device-specific interrupt routers,
- * this code will go away from quirks.
+ * TODO: When we have device-specific interrupt routers, this code will go
+ * away from quirks.
*/
static void quirk_via_ioapic(struct pci_dev *dev)
{
@@ -816,13 +982,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
/*
- * The AMD io apic can hang the box when an apic irq is masked.
+ * The AMD IO-APIC can hang the box when an APIC IRQ is masked.
* We check all revs >= B0 (yet not in the pre production!) as the bug
* is currently marked NoFix
*
* We have multiple reports of hangs with this chipset that went away with
* noapic specified. For the moment we assume it's the erratum. We may be wrong
- * of course. However the advice is demonstrably good even if so..
+ * of course. However the advice is demonstrably good even if so.
*/
static void quirk_amd_ioapic(struct pci_dev *dev)
{
@@ -838,7 +1004,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_a
static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
{
- /* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */
+ /* Fix for improper SR-IOV configuration on Cavium cn88xx RNM device */
if (dev->subsystem_device == 0xa118)
dev->sriov->link = dev->devfn;
}
@@ -860,19 +1026,17 @@ static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
/*
- * FIXME: it is questionable that quirk_via_acpi
- * is needed. It shows up as an ISA bridge, and does not
- * support the PCI_INTERRUPT_LINE register at all. Therefore
- * it seems like setting the pci_dev's 'irq' to the
- * value of the ACPI SCI interrupt is only done for convenience.
+ * FIXME: it is questionable that quirk_via_acpi() is needed. It shows up
+ * as an ISA bridge, and does not support the PCI_INTERRUPT_LINE register
+ * at all. Therefore it seems like setting the pci_dev's IRQ to the value
+ * of the ACPI SCI interrupt is only done for convenience.
* -jgarzik
*/
static void quirk_via_acpi(struct pci_dev *d)
{
- /*
- * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
- */
u8 irq;
+
+ /* VIA ACPI device: SCI IRQ line in PCI config byte 0x42 */
pci_read_config_byte(d, 0x42, &irq);
irq &= 0xf;
if (irq && (irq != 2))
@@ -881,11 +1045,7 @@ static void quirk_via_acpi(struct pci_dev *d)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
-
-/*
- * VIA bridges which have VLink
- */
-
+/* VIA bridges which have VLink */
static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
static void quirk_via_bridge(struct pci_dev *dev)
@@ -893,9 +1053,11 @@ static void quirk_via_bridge(struct pci_dev *dev)
/* See what bridge we have and find the device ranges */
switch (dev->device) {
case PCI_DEVICE_ID_VIA_82C686:
- /* The VT82C686 is special, it attaches to PCI and can have
- any device number. All its subdevices are functions of
- that single device. */
+ /*
+ * The VT82C686 is special; it attaches to PCI and can have
+ * any device number. All its subdevices are functions of
+ * that single device.
+ */
via_vlink_dev_lo = PCI_SLOT(dev->devfn);
via_vlink_dev_hi = PCI_SLOT(dev->devfn);
break;
@@ -923,19 +1085,17 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_b
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
-/**
- * quirk_via_vlink - VIA VLink IRQ number update
- * @dev: PCI device
+/*
+ * quirk_via_vlink - VIA VLink IRQ number update
+ * @dev: PCI device
*
- * If the device we are dealing with is on a PIC IRQ we need to
- * ensure that the IRQ line register which usually is not relevant
- * for PCI cards, is actually written so that interrupts get sent
- * to the right place.
- * We only do this on systems where a VIA south bridge was detected,
- * and only for VIA devices on the motherboard (see quirk_via_bridge
- * above).
+ * If the device we are dealing with is on a PIC IRQ we need to ensure that
+ * the IRQ line register which usually is not relevant for PCI cards, is
+ * actually written so that interrupts get sent to the right place.
+ *
+ * We only do this on systems where a VIA south bridge was detected, and
+ * only for VIA devices on the motherboard (see quirk_via_bridge above).
*/
-
static void quirk_via_vlink(struct pci_dev *dev)
{
u8 irq, new_irq;
@@ -955,9 +1115,10 @@ static void quirk_via_vlink(struct pci_dev *dev)
PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
return;
- /* This is an internal VLink device on a PIC interrupt. The BIOS
- ought to have set this but may not have, so we redo it */
-
+ /*
+ * This is an internal VLink device on a PIC interrupt. The BIOS
+ * ought to have set this but may not have, so we redo it.
+ */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
if (new_irq != irq) {
pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
@@ -969,10 +1130,9 @@ static void quirk_via_vlink(struct pci_dev *dev)
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
/*
- * VIA VT82C598 has its device ID settable and many BIOSes
- * set it to the ID of VT82C597 for backward compatibility.
- * We need to switch it off to be able to recognize the real
- * type of the chip.
+ * VIA VT82C598 has its device ID settable and many BIOSes set it to the ID
+ * of VT82C597 for backward compatibility. We need to switch it off to be
+ * able to recognize the real type of the chip.
*/
static void quirk_vt82c598_id(struct pci_dev *dev)
{
@@ -982,10 +1142,10 @@ static void quirk_vt82c598_id(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
/*
- * CardBus controllers have a legacy base address that enables them
- * to respond as i82365 pcmcia controllers. We don't want them to
- * do this even if the Linux CardBus driver is not loaded, because
- * the Linux i82365 driver does not (and should not) handle CardBus.
+ * CardBus controllers have a legacy base address that enables them to
+ * respond as i82365 pcmcia controllers. We don't want them to do this
+ * even if the Linux CardBus driver is not loaded, because the Linux i82365
+ * driver does not (and should not) handle CardBus.
*/
static void quirk_cardbus_legacy(struct pci_dev *dev)
{
@@ -997,11 +1157,11 @@ DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
/*
- * Following the PCI ordering rules is optional on the AMD762. I'm not
- * sure what the designers were smoking but let's not inhale...
+ * Following the PCI ordering rules is optional on the AMD762. I'm not sure
+ * what the designers were smoking but let's not inhale...
*
- * To be fair to AMD, it follows the spec by default, its BIOS people
- * who turn it off!
+ * To be fair to AMD, it follows the spec by default, it's BIOS people who
+ * turn it off!
*/
static void quirk_amd_ordering(struct pci_dev *dev)
{
@@ -1020,11 +1180,11 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
/*
- * DreamWorks provided workaround for Dunord I-3000 problem
+ * DreamWorks-provided workaround for Dunord I-3000 problem
*
- * This card decodes and responds to addresses not apparently
- * assigned to it. We force a larger allocation to ensure that
- * nothing gets put too close to it.
+ * This card decodes and responds to addresses not apparently assigned to
+ * it. We force a larger allocation to ensure that nothing gets put too
+ * close to it.
*/
static void quirk_dunord(struct pci_dev *dev)
{
@@ -1037,10 +1197,9 @@ static void quirk_dunord(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
/*
- * i82380FB mobile docking controller: its PCI-to-PCI bridge
- * is subtractive decoding (transparent), and does indicate this
- * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
- * instead of 0x01.
+ * i82380FB mobile docking controller: its PCI-to-PCI bridge is subtractive
+ * decoding (transparent), and does indicate this in the ProgIf.
+ * Unfortunately, the ProgIf value is wrong - 0x80 instead of 0x01.
*/
static void quirk_transparent_bridge(struct pci_dev *dev)
{
@@ -1050,10 +1209,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
/*
- * Common misconfiguration of the MediaGX/Geode PCI master that will
- * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1
- * datasheets found at http://www.national.com/analog for info on what
- * these bits do. <christer@weinigel.se>
+ * Common misconfiguration of the MediaGX/Geode PCI master that will reduce
+ * PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 datasheets
+ * found at http://www.national.com/analog for info on what these bits do.
+ * <christer@weinigel.se>
*/
static void quirk_mediagx_master(struct pci_dev *dev)
{
@@ -1071,9 +1230,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qui
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
/*
- * Ensure C0 rev restreaming is off. This is normally done by
- * the BIOS but in the odd case it is not the results are corruption
- * hence the presence of a Linux check
+ * Ensure C0 rev restreaming is off. This is normally done by the BIOS but
+ * in the odd case it is not the results are corruption hence the presence
+ * of a Linux check.
*/
static void quirk_disable_pxb(struct pci_dev *pdev)
{
@@ -1117,9 +1276,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
-/*
- * Serverworks CSB5 IDE does not fully support native mode
- */
+/* Serverworks CSB5 IDE does not fully support native mode */
static void quirk_svwks_csb5ide(struct pci_dev *pdev)
{
u8 prog;
@@ -1133,9 +1290,7 @@ static void quirk_svwks_csb5ide(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
-/*
- * Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
- */
+/* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same */
static void quirk_ide_samemode(struct pci_dev *pdev)
{
u8 prog;
@@ -1151,10 +1306,7 @@ static void quirk_ide_samemode(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
-/*
- * Some ATA devices break if put into D3
- */
-
+/* Some ATA devices break if put into D3 */
static void quirk_no_ata_d3(struct pci_dev *pdev)
{
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
@@ -1172,7 +1324,8 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
-/* This was originally an Alpha specific thing, but it really fits here.
+/*
+ * This was originally an Alpha-specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
*/
static void quirk_eisa_bridge(struct pci_dev *dev)
@@ -1181,7 +1334,6 @@ static void quirk_eisa_bridge(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
-
/*
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
@@ -1398,15 +1550,19 @@ static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
+
/* read the Function Disable register, dword mode only */
val = readl(asus_rcba_base + 0x3418);
- writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
+
+ /* enable the SMBus device */
+ writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
}
static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
{
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
+
iounmap(asus_rcba_base);
asus_rcba_base = NULL;
pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
@@ -1423,9 +1579,7 @@ DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
-/*
- * SiS 96x south bridge: BIOS typically hides SMBus device...
- */
+/* SiS 96x south bridge: BIOS typically hides SMBus device... */
static void quirk_sis_96x_smbus(struct pci_dev *dev)
{
u8 val = 0;
@@ -1448,7 +1602,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_si
* ... This is further complicated by the fact that some SiS96x south
* bridges pretend to be 85C503/5513 instead. In that case see if we
* spotted a compatible north bridge to make sure.
- * (pci_find_device doesn't work yet)
+ * (pci_find_device() doesn't work yet)
*
* We can also enable the sis96x bit in the discovery register..
*/
@@ -1468,9 +1622,9 @@ static void quirk_sis_503(struct pci_dev *dev)
}
/*
- * Ok, it now shows up as a 96x.. run the 96x quirk by
- * hand in case it has already been processed.
- * (depends on link order, which is apparently not guaranteed)
+ * Ok, it now shows up as a 96x. Run the 96x quirk by hand in case
+ * it has already been processed. (Depends on link order, which is
+ * apparently not guaranteed)
*/
dev->device = devid;
quirk_sis_96x_smbus(dev);
@@ -1478,7 +1632,6 @@ static void quirk_sis_503(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
-
/*
* On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
* and MC97 modem controller are disabled when a second PCI soundcard is
@@ -1515,9 +1668,8 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_h
#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
/*
- * If we are using libata we can drive this chip properly but must
- * do this early on to make the additional device appear during
- * the PCI scanning.
+ * If we are using libata we can drive this chip properly but must do this
+ * early on to make the additional device appear during the PCI scanning.
*/
static void quirk_jmicron_ata(struct pci_dev *pdev)
{
@@ -1613,14 +1765,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
if ((pdev->class >> 8) != 0xff00)
return;
- /* the first BAR is the location of the IO APIC...we must
+ /*
+ * The first BAR is the location of the IO-APIC... we must
* not touch this (and it's already covered by the fixmap), so
- * forcibly insert it into the resource tree */
+ * forcibly insert it into the resource tree.
+ */
if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
insert_resource(&iomem_resource, &pdev->resource[0]);
- /* The next five BARs all seem to be rubbish, so just clean
- * them out */
+ /*
+ * The next five BARs all seem to be rubbish, so just clean
+ * them out.
+ */
for (i = 1; i < 6; i++)
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
}
@@ -1638,8 +1794,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
/*
- * It's possible for the MSI to get corrupted if shpc and acpi
- * are used together on certain PXH-based systems.
+ * It's possible for the MSI to get corrupted if SHPC and ACPI are used
+ * together on certain PXH-based systems.
*/
static void quirk_pcie_pxh(struct pci_dev *dev)
{
@@ -1653,15 +1809,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pc
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
/*
- * Some Intel PCI Express chipsets have trouble with downstream
- * device power management.
+ * Some Intel PCI Express chipsets have trouble with downstream device
+ * power management.
*/
static void quirk_intel_pcie_pm(struct pci_dev *dev)
{
pci_pm_d3_delay = 120;
dev->no_d1d2 = 1;
}
-
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
@@ -1723,7 +1878,7 @@ static const struct dmi_system_id boot_interrupt_dmi_table[] = {
/*
* Boot interrupts on some chipsets cannot be turned off. For these chipsets,
- * remap the original interrupt in the linux kernel to the boot interrupt, so
+ * remap the original interrupt in the Linux kernel to the boot interrupt, so
* that a PCI device's interrupt handler is installed on the boot interrupt
* line instead.
*/
@@ -1760,7 +1915,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk
*/
/*
- * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
+ * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
* 300641-004US, section 5.7.3.
*/
#define INTEL_6300_IOAPIC_ABAR 0x40
@@ -1783,9 +1938,7 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
-/*
- * disable boot interrupts on HT-1000
- */
+/* Disable boot interrupts on HT-1000 */
#define BC_HT1000_FEATURE_REG 0x64
#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
#define BC_HT1000_MAP_IDX 0xC00
@@ -1816,9 +1969,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
-/*
- * disable boot interrupts on AMD and ATI chipsets
- */
+/* Disable boot interrupts on AMD and ATI chipsets */
+
/*
* NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
* rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
@@ -1894,7 +2046,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
quirk_tc86c001_ide);
/*
- * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
+ * PLX PCI 9050 PCI Target bridge controller has an erratum that prevents the
* local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
* being read correctly if bit 7 of the base address is set.
* The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
@@ -2087,15 +2239,17 @@ static void quirk_p64h2_1k_io(struct pci_dev *dev)
dev->io_window_1k = 1;
}
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
-/* Under some circumstances, AER is not linked with extended capabilities.
+/*
+ * Under some circumstances, AER is not linked with extended capabilities.
* Force it to be linked by setting the corresponding control bit in the
* config space.
*/
static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
{
uint8_t b;
+
if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
if (!(b & 0x20)) {
pci_write_config_byte(dev, 0xf41, b | 0x20);
@@ -2125,8 +2279,10 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
uint8_t b;
- /* p should contain the first (internal) VT6212L -- see if we have
- an external one by searching again */
+ /*
+ * p should contain the first (internal) VT6212L -- see if we have
+ * an external one by searching again.
+ */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
if (!p)
return;
@@ -2171,7 +2327,6 @@ static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
pcie_set_readrq(dev, 2048);
}
}
-
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5719,
quirk_brcm_5719_limit_mrrs);
@@ -2179,14 +2334,16 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
#ifdef CONFIG_PCIE_IPROC_PLATFORM
static void quirk_paxc_bridge(struct pci_dev *pdev)
{
- /* The PCI config space is shared with the PAXC root port and the first
+ /*
+ * The PCI config space is shared with the PAXC root port and the first
* Ethernet device. So, we need to workaround this by telling the PCI
* code that the bridge is not an Ethernet device.
*/
if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
- /* MPSS is not being set properly (as it is currently 0). This is
+ /*
+ * MPSS is not being set properly (as it is currently 0). This is
* because that area of the PCI config space is hard coded to zero, and
* is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS)
* so that the MPS can be set to the real max value.
@@ -2197,10 +2354,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
#endif
-/* Originally in EDAC sources for i82875P:
- * Intel tells BIOS developers to hide device 6 which
- * configures the overflow device access containing
- * the DRBs - this is where we expose device 6.
+/*
+ * Originally in EDAC sources for i82875P: Intel tells BIOS developers to
+ * hide device 6 which configures the overflow device access containing the
+ * DRBs - this is where we expose device 6.
* http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
*/
static void quirk_unhide_mch_dev6(struct pci_dev *dev)
@@ -2212,18 +2369,18 @@ static void quirk_unhide_mch_dev6(struct pci_dev *dev)
pci_write_config_byte(dev, 0xF4, reg | 0x02);
}
}
-
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
quirk_unhide_mch_dev6);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
#ifdef CONFIG_PCI_MSI
-/* Some chipsets do not support MSI. We cannot easily rely on setting
- * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
- * some other buses controlled by the chipset even if Linux is not
- * aware of it. Instead of setting the flag on all buses in the
- * machine, simply disable MSI globally.
+/*
+ * Some chipsets do not support MSI. We cannot easily rely on setting
+ * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually some
+ * other buses controlled by the chipset even if Linux is not aware of it.
+ * Instead of setting the flag on all buses in the machine, simply disable
+ * MSI globally.
*/
static void quirk_disable_all_msi(struct pci_dev *dev)
{
@@ -2271,8 +2428,10 @@ static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
-/* Go through the list of Hypertransport capabilities and
- * return 1 if a HT MSI capability is found and enabled */
+/*
+ * Go through the list of HyperTransport capabilities and return 1 if a HT
+ * MSI capability is found and enabled.
+ */
static int msi_ht_cap_enabled(struct pci_dev *dev)
{
int pos, ttl = PCI_FIND_CAP_TTL;
@@ -2295,7 +2454,7 @@ static int msi_ht_cap_enabled(struct pci_dev *dev)
return 0;
}
-/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
+/* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */
static void quirk_msi_ht_cap(struct pci_dev *dev)
{
if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
@@ -2306,8 +2465,9 @@ static void quirk_msi_ht_cap(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
quirk_msi_ht_cap);
-/* The nVidia CK804 chipset may have 2 HT MSI mappings.
- * MSI are supported if the MSI capability set in any of these mappings.
+/*
+ * The nVidia CK804 chipset may have 2 HT MSI mappings. MSI is supported
+ * if the MSI capability is set in any of these mappings.
*/
static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
@@ -2316,8 +2476,9 @@ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
if (!dev->subordinate)
return;
- /* check HT MSI cap on this chipset and the root one.
- * a single one having MSI is enough to be sure that MSI are supported.
+ /*
+ * Check HT MSI cap on this chipset and the root one. A single one
+ * having MSI is enough to be sure that MSI is supported.
*/
pdev = pci_get_slot(dev->bus, 0);
if (!pdev)
@@ -2354,13 +2515,13 @@ static void ht_enable_msi_mapping(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
ht_enable_msi_mapping);
-
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);
-/* The P5N32-SLI motherboards from Asus have a problem with msi
- * for the MCP55 NIC. It is not yet determined whether the msi problem
- * also affects other devices. As for now, turn off msi for this device.
+/*
+ * The P5N32-SLI motherboards from Asus have a problem with MSI
+ * for the MCP55 NIC. It is not yet determined whether the MSI problem
+ * also affects other devices. As for now, turn off MSI for this device.
*/
static void nvenet_msi_disable(struct pci_dev *dev)
{
@@ -2397,16 +2558,14 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
pci_read_config_dword(dev, 0x74, &cfg);
if (cfg & ((1 << 2) | (1 << 15))) {
- printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
+ printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n");
cfg &= ~((1 << 2) | (1 << 15));
pci_write_config_dword(dev, 0x74, cfg);
}
}
-
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
nvbridge_check_legacy_irq_routing);
-
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
nvbridge_check_legacy_irq_routing);
@@ -2416,7 +2575,7 @@ static int ht_check_msi_mapping(struct pci_dev *dev)
int pos, ttl = PCI_FIND_CAP_TTL;
int found = 0;
- /* check if there is HT MSI cap or enabled on this device */
+ /* Check if there is HT MSI cap or enabled on this device */
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
@@ -2452,7 +2611,7 @@ static int host_bridge_with_leaf(struct pci_dev *host_bridge)
if (!dev)
continue;
- /* found next host bridge ?*/
+ /* found next host bridge? */
pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
if (pos != 0) {
pci_dev_put(dev);
@@ -2611,27 +2770,27 @@ static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 1);
}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 0);
}
-
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
-
static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
}
+
static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
{
struct pci_dev *p;
- /* SB700 MSI issue will be fixed at HW level from revision A21,
+ /*
+ * SB700 MSI issue will be fixed at HW level from revision A21;
* we need check PCI REVISION ID of SMBus controller to get SB700
* revision.
*/
@@ -2644,6 +2803,7 @@ static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
pci_dev_put(p);
}
+
static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
{
/* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
@@ -2713,55 +2873,56 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
quirk_msi_intx_disable_qca_bug);
#endif /* CONFIG_PCI_MSI */
-/* Allow manual resource allocation for PCI hotplug bridges
- * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
- * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
- * kernel fails to allocate resources when hotplug device is
- * inserted and PCI bus is rescanned.
+/*
+ * Allow manual resource allocation for PCI hotplug bridges via
+ * pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For some PCI-PCI
+ * hotplug bridges, like PLX 6254 (former HINT HB6), kernel fails to
+ * allocate resources when hotplug device is inserted and PCI bus is
+ * rescanned.
*/
static void quirk_hotplug_bridge(struct pci_dev *dev)
{
dev->is_hotplug_bridge = 1;
}
-
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
/*
- * This is a quirk for the Ricoh MMC controller found as a part of
- * some mulifunction chips.
-
+ * This is a quirk for the Ricoh MMC controller found as a part of some
+ * multifunction chips.
+ *
* This is very similar and based on the ricoh_mmc driver written by
* Philip Langdale. Thank you for these magic sequences.
*
- * These chips implement the four main memory card controllers (SD, MMC, MS, xD)
- * and one or both of cardbus or firewire.
+ * These chips implement the four main memory card controllers (SD, MMC,
+ * MS, xD) and one or both of CardBus or FireWire.
*
- * It happens that they implement SD and MMC
- * support as separate controllers (and PCI functions). The linux SDHCI
- * driver supports MMC cards but the chip detects MMC cards in hardware
- * and directs them to the MMC controller - so the SDHCI driver never sees
- * them.
+ * It happens that they implement SD and MMC support as separate
+ * controllers (and PCI functions). The Linux SDHCI driver supports MMC
+ * cards but the chip detects MMC cards in hardware and directs them to the
+ * MMC controller - so the SDHCI driver never sees them.
*
- * To get around this, we must disable the useless MMC controller.
- * At that point, the SDHCI controller will start seeing them
- * It seems to be the case that the relevant PCI registers to deactivate the
- * MMC controller live on PCI function 0, which might be the cardbus controller
- * or the firewire controller, depending on the particular chip in question
+ * To get around this, we must disable the useless MMC controller. At that
+ * point, the SDHCI controller will start seeing them. It seems to be the
+ * case that the relevant PCI registers to deactivate the MMC controller
+ * live on PCI function 0, which might be the CardBus controller or the
+ * FireWire controller, depending on the particular chip in question
*
* This has to be done early, because as soon as we disable the MMC controller
- * other pci functions shift up one level, e.g. function #2 becomes function
- * #1, and this will confuse the pci core.
+ * other PCI functions shift up one level, e.g. function #2 becomes function
+ * #1, and this will confuse the PCI core.
*/
-
#ifdef CONFIG_MMC_RICOH_MMC
static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
{
- /* disable via cardbus interface */
u8 write_enable;
u8 write_target;
u8 disable;
- /* disable must be done via function #0 */
+ /*
+ * Disable via CardBus interface
+ *
+ * This must be done via function #0
+ */
if (PCI_FUNC(dev->devfn))
return;
@@ -2777,7 +2938,7 @@ static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
pci_write_config_byte(dev, 0x8E, write_enable);
pci_write_config_byte(dev, 0x8D, write_target);
- pci_notice(dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
+ pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
@@ -2785,17 +2946,20 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
{
- /* disable via firewire interface */
u8 write_enable;
u8 disable;
- /* disable must be done via function #0 */
+ /*
+ * Disable via FireWire interface
+ *
+ * This must be done via function #0
+ */
if (PCI_FUNC(dev->devfn))
return;
/*
* RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
- * certain types of SD/MMC cards. Lowering the SD base
- * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ * certain types of SD/MMC cards. Lowering the SD base clock
+ * frequency from 200Mhz to 50Mhz fixes this issue.
*
* 0x150 - SD2.0 mode enable for changing base clock
* frequency to 50Mhz
@@ -2826,7 +2990,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
pci_write_config_byte(dev, 0xCB, disable | 0x02);
pci_write_config_byte(dev, 0xCA, write_enable);
- pci_notice(dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
+ pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
}
@@ -2842,13 +3006,13 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823,
#define VTUNCERRMSK_REG 0x1ac
#define VTD_MSK_SPEC_ERRORS (1 << 31)
/*
- * This is a quirk for masking vt-d spec defined errors to platform error
- * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
+ * This is a quirk for masking VT-d spec-defined errors to platform error
+ * handling logic. Without this, platforms using Intel 7500, 5500 chipsets
* (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
- * on the RAS config settings of the platform) when a vt-d fault happens.
+ * on the RAS config settings of the platform) when a VT-d fault happens.
* The resulting SMI caused the system to hang.
*
- * VT-d spec related errors are already handled by the VT-d OS code, so no
+ * VT-d spec-related errors are already handled by the VT-d OS code, so no
* need to report the same error through other channels.
*/
static void vtd_mask_spec_errors(struct pci_dev *dev)
@@ -2874,7 +3038,8 @@ static void fixup_ti816x_class(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
-/* Some PCIe devices do not work reliably with the claimed maximum
+/*
+ * Some PCIe devices do not work reliably with the claimed maximum
* payload size supported.
*/
static void fixup_mpss_256(struct pci_dev *dev)
@@ -2888,9 +3053,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
-/* Intel 5000 and 5100 Memory controllers have an errata with read completion
+/*
+ * Intel 5000 and 5100 Memory controllers have an erratum with read completion
* coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
- * Since there is no way of knowing what the PCIE MPS on each fabric will be
+ * Since there is no way of knowing what the PCIe MPS on each fabric will be
* until all of the devices are discovered and buses walked, read completion
* coalescing must be disabled. Unfortunately, it cannot be re-enabled because
* it is possible to hotplug a device with MPS of 256B.
@@ -2904,9 +3070,10 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
pcie_bus_config == PCIE_BUS_DEFAULT)
return;
- /* Intel errata specifies bits to change but does not say what they are.
- * Keeping them magical until such time as the registers and values can
- * be explained.
+ /*
+ * Intel erratum specifies bits to change but does not say what
+ * they are. Keeping them magical until such time as the registers
+ * and values can be explained.
*/
err = pci_read_config_word(dev, 0x48, &rcc);
if (err) {
@@ -2925,7 +3092,7 @@ static void quirk_intel_mc_errata(struct pci_dev *dev)
return;
}
- pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
+ pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
}
/* Intel 5000 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
@@ -2955,11 +3122,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
-
/*
- * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To
- * work around this, query the size it should be configured to by the device and
- * modify the resource end to correspond to this new size.
+ * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.
+ * To work around this, query the size it should be configured to by the
+ * device and modify the resource end to correspond to this new size.
*/
static void quirk_intel_ntb(struct pci_dev *dev)
{
@@ -2981,39 +3147,17 @@ static void quirk_intel_ntb(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
-static ktime_t fixup_debug_start(struct pci_dev *dev,
- void (*fn)(struct pci_dev *dev))
-{
- if (initcall_debug)
- pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current));
-
- return ktime_get();
-}
-
-static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
- void (*fn)(struct pci_dev *dev))
-{
- ktime_t delta, rettime;
- unsigned long long duration;
-
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- if (initcall_debug || duration > 10000)
- pci_info(dev, "%pF took %lld usecs\n", fn, duration);
-}
-
/*
- * Some BIOS implementations leave the Intel GPU interrupts enabled,
- * even though no one is handling them (f.e. i915 driver is never loaded).
- * Additionally the interrupt destination is not set up properly
+ * Some BIOS implementations leave the Intel GPU interrupts enabled, even
+ * though no one is handling them (e.g., if the i915 driver is never
+ * loaded). Additionally the interrupt destination is not set up properly
* and the interrupt ends up -somewhere-.
*
- * These spurious interrupts are "sticky" and the kernel disables
- * the (shared) interrupt line after 100.000+ generated interrupts.
+ * These spurious interrupts are "sticky" and the kernel disables the
+ * (shared) interrupt line after 100,000+ generated interrupts.
*
- * Fix it by disabling the still enabled interrupts.
- * This resolves crashes often seen on monitor unplug.
+ * Fix it by disabling the still enabled interrupts. This resolves crashes
+ * often seen on monitor unplug.
*/
#define I915_DEIER_REG 0x4400c
static void disable_igfx_irq(struct pci_dev *dev)
@@ -3101,38 +3245,22 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
* Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
* DisINTx can be set but the interrupt status bit is non-functional.
*/
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
static u16 mellanox_broken_intx_devs[] = {
PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
@@ -3177,7 +3305,8 @@ static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
}
}
- /* Getting here means Connect-IB cards and up. Connect-IB has no INTx
+ /*
+ * Getting here means Connect-IB cards and up. Connect-IB has no INTx
* support so shouldn't be checked further
*/
if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
@@ -3297,8 +3426,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
* shutdown before suspend. Otherwise the native host interface (NHI) will not
* be present after resume if a device was plugged in before suspend.
*
- * The thunderbolt controller consists of a pcie switch with downstream
- * bridges leading to the NHI and to the tunnel pci bridges.
+ * The Thunderbolt controller consists of a PCIe switch with downstream
+ * bridges leading to the NHI and to the tunnel PCI bridges.
*
* This quirk cuts power to the whole chip. Therefore we have to apply it
* during suspend_noirq of the upstream bridge.
@@ -3316,17 +3445,19 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
bridge = ACPI_HANDLE(&dev->dev);
if (!bridge)
return;
+
/*
* SXIO and SXLV are present only on machines requiring this quirk.
- * TB bridges in external devices might have the same device id as those
- * on the host, but they will not have the associated ACPI methods. This
- * implicitly checks that we are at the right bridge.
+ * Thunderbolt bridges in external devices might have the same
+ * device ID as those on the host, but they will not have the
+ * associated ACPI methods. This implicitly checks that we are at
+ * the right bridge.
*/
if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
|| ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
|| ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
return;
- pci_info(dev, "quirk: cutting power to thunderbolt controller...\n");
+ pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
/* magic sequence */
acpi_execute_simple_method(SXIO, NULL, 1);
@@ -3341,9 +3472,9 @@ DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
quirk_apple_poweroff_thunderbolt);
/*
- * Apple: Wait for the thunderbolt controller to reestablish pci tunnels.
+ * Apple: Wait for the Thunderbolt controller to reestablish PCI tunnels
*
- * During suspend the thunderbolt controller is reset and all pci
+ * During suspend the Thunderbolt controller is reset and all PCI
* tunnels are lost. The NHI driver will try to reestablish all tunnels
* during resume. We have to manually wait for the NHI since there is
* no parent child relationship between the NHI and the tunneled
@@ -3358,9 +3489,10 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
return;
+
/*
- * Find the NHI and confirm that we are a bridge on the tb host
- * controller and not on a tb endpoint.
+ * Find the NHI and confirm that we are a bridge on the Thunderbolt
+ * host controller and not on a Thunderbolt endpoint.
*/
sibling = pci_get_slot(dev->bus, 0x0);
if (sibling == dev)
@@ -3377,7 +3509,7 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
|| nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
goto out;
- pci_info(dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
+ pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
out:
pci_dev_put(nhi);
@@ -3397,142 +3529,6 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
quirk_apple_wait_for_thunderbolt);
#endif
-static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
- struct pci_fixup *end)
-{
- ktime_t calltime;
-
- for (; f < end; f++)
- if ((f->class == (u32) (dev->class >> f->class_shift) ||
- f->class == (u32) PCI_ANY_ID) &&
- (f->vendor == dev->vendor ||
- f->vendor == (u16) PCI_ANY_ID) &&
- (f->device == dev->device ||
- f->device == (u16) PCI_ANY_ID)) {
- calltime = fixup_debug_start(dev, f->hook);
- f->hook(dev);
- fixup_debug_report(dev, calltime, f->hook);
- }
-}
-
-extern struct pci_fixup __start_pci_fixups_early[];
-extern struct pci_fixup __end_pci_fixups_early[];
-extern struct pci_fixup __start_pci_fixups_header[];
-extern struct pci_fixup __end_pci_fixups_header[];
-extern struct pci_fixup __start_pci_fixups_final[];
-extern struct pci_fixup __end_pci_fixups_final[];
-extern struct pci_fixup __start_pci_fixups_enable[];
-extern struct pci_fixup __end_pci_fixups_enable[];
-extern struct pci_fixup __start_pci_fixups_resume[];
-extern struct pci_fixup __end_pci_fixups_resume[];
-extern struct pci_fixup __start_pci_fixups_resume_early[];
-extern struct pci_fixup __end_pci_fixups_resume_early[];
-extern struct pci_fixup __start_pci_fixups_suspend[];
-extern struct pci_fixup __end_pci_fixups_suspend[];
-extern struct pci_fixup __start_pci_fixups_suspend_late[];
-extern struct pci_fixup __end_pci_fixups_suspend_late[];
-
-static bool pci_apply_fixup_final_quirks;
-
-void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
-{
- struct pci_fixup *start, *end;
-
- switch (pass) {
- case pci_fixup_early:
- start = __start_pci_fixups_early;
- end = __end_pci_fixups_early;
- break;
-
- case pci_fixup_header:
- start = __start_pci_fixups_header;
- end = __end_pci_fixups_header;
- break;
-
- case pci_fixup_final:
- if (!pci_apply_fixup_final_quirks)
- return;
- start = __start_pci_fixups_final;
- end = __end_pci_fixups_final;
- break;
-
- case pci_fixup_enable:
- start = __start_pci_fixups_enable;
- end = __end_pci_fixups_enable;
- break;
-
- case pci_fixup_resume:
- start = __start_pci_fixups_resume;
- end = __end_pci_fixups_resume;
- break;
-
- case pci_fixup_resume_early:
- start = __start_pci_fixups_resume_early;
- end = __end_pci_fixups_resume_early;
- break;
-
- case pci_fixup_suspend:
- start = __start_pci_fixups_suspend;
- end = __end_pci_fixups_suspend;
- break;
-
- case pci_fixup_suspend_late:
- start = __start_pci_fixups_suspend_late;
- end = __end_pci_fixups_suspend_late;
- break;
-
- default:
- /* stupid compiler warning, you would think with an enum... */
- return;
- }
- pci_do_fixups(dev, start, end);
-}
-EXPORT_SYMBOL(pci_fixup_device);
-
-
-static int __init pci_apply_final_quirks(void)
-{
- struct pci_dev *dev = NULL;
- u8 cls = 0;
- u8 tmp;
-
- if (pci_cache_line_size)
- printk(KERN_DEBUG "PCI: CLS %u bytes\n",
- pci_cache_line_size << 2);
-
- pci_apply_fixup_final_quirks = true;
- for_each_pci_dev(dev) {
- pci_fixup_device(pci_fixup_final, dev);
- /*
- * If arch hasn't set it explicitly yet, use the CLS
- * value shared by all PCI devices. If there's a
- * mismatch, fall back to the default value.
- */
- if (!pci_cache_line_size) {
- pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
- if (!cls)
- cls = tmp;
- if (!tmp || cls == tmp)
- continue;
-
- printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
- cls << 2, tmp << 2,
- pci_dfl_cache_line_size << 2);
- pci_cache_line_size = pci_dfl_cache_line_size;
- }
- }
-
- if (!pci_cache_line_size) {
- printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
- cls << 2, pci_dfl_cache_line_size << 2);
- pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
- }
-
- return 0;
-}
-
-fs_initcall_sync(pci_apply_final_quirks);
-
/*
* Following are device-specific reset methods which can be used to
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
@@ -3602,9 +3598,7 @@ reset_complete:
return 0;
}
-/*
- * Device-specific reset method for Chelsio T4-based adapters.
- */
+/* Device-specific reset method for Chelsio T4-based adapters */
static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
{
u16 old_command;
@@ -3887,7 +3881,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
/*
* Some devices have problems with Transaction Layer Packets with the Relaxed
* Ordering Attribute set. Such devices should mark themselves and other
- * Device Drivers should check before sending TLPs with RO set.
+ * device drivers should check before sending TLPs with RO set.
*/
static void quirk_relaxedordering_disable(struct pci_dev *dev)
{
@@ -3897,7 +3891,7 @@ static void quirk_relaxedordering_disable(struct pci_dev *dev)
/*
* Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
- * Complex has a Flow Control Credit issue which can cause performance
+ * Complex have a Flow Control Credit issue which can cause performance
* problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
*/
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
@@ -3958,7 +3952,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED
quirk_relaxedordering_disable);
/*
- * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
+ * The AMD ARM A1100 (aka "SEATTLE") SoC has a bug in its PCIe Root Complex
* where Upstream Transaction Layer Packets with the Relaxed Ordering
* Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
* set. This is a violation of the PCIe 3.0 Transaction Ordering Rules
@@ -4022,7 +4016,7 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
* This mask/compare operation selects for Physical Function 4 on a
* T5. We only need to fix up the Root Port once for any of the
* PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
- * 0x54xx so we use that one,
+ * 0x54xx so we use that one.
*/
if ((pdev->device & 0xff00) == 0x5400)
quirk_disable_root_port_attributes(pdev);
@@ -4113,7 +4107,7 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
- * X-Gene root matching this quirk do not allow peer-to-peer
+ * X-Gene Root Ports matching this quirk do not allow peer-to-peer
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
@@ -4230,11 +4224,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
* 0xa290-0xa29f PCI Express Root port #{0-16}
* 0xa2e7-0xa2ee PCI Express Root port #{17-24}
*
+ * Mobile chipsets are also affected, 7th & 8th Generation
+ * Specification update confirms ACS errata 22, status no fix: (7th Generation
+ * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
+ * Processor Family I/O for U Quad Core Platforms Specification Update,
+ * August 2017, Revision 002, Document#: 334660-002)[6]
+ * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
+ * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
+ * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
+ *
+ * 0x9d10-0x9d1b PCI Express Root port #{1-12}
+ *
+ * The 300 series chipset suffers from the same bug so include those root
+ * ports here as well.
+ *
+ * 0xa32c-0xa343 PCI Express Root port #{0-24}
+ *
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
* [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
* [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
* [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
+ * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
+ * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
*/
static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
{
@@ -4244,6 +4256,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
switch (dev->device) {
case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
+ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
+ case 0xa32c ... 0xa343: /* 300 series */
return true;
}
@@ -4361,8 +4375,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
/* QCOM QDF2xxx root ports */
- { 0x17cb, 0x400, pci_quirk_qcom_rp_acs },
- { 0x17cb, 0x401, pci_quirk_qcom_rp_acs },
+ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
+ { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
@@ -4436,7 +4450,7 @@ static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
/*
* Read the RCBA register from the LPC (D31:F0). PCH root ports
* are D28:F* and therefore get probed before LPC, thus we can't
- * use pci_get_slot/pci_read_config_dword here.
+ * use pci_get_slot()/pci_read_config_dword() here.
*/
pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
INTEL_LPC_RCBA_REG, &rcba);
@@ -4569,7 +4583,7 @@ int pci_dev_specific_enable_acs(struct pci_dev *dev)
}
/*
- * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with
+ * The PCI capabilities list for Intel DH895xCC VFs (device ID 0x0443) with
* QuickAssist Technology (QAT) is prematurely terminated in hardware. The
* Next Capability pointer in the MSI Capability Structure should point to
* the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
@@ -4630,9 +4644,7 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
return;
- /*
- * Save PCIE cap
- */
+ /* Save PCIe cap */
state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
if (!state)
return;
@@ -4653,7 +4665,7 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
-/* FLR may cause some 82579 devices to hang. */
+/* FLR may cause some 82579 devices to hang */
static void quirk_intel_no_flr(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 072784f55ea5b..79b1824e83b47 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1943,56 +1943,56 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
/*
+ * There is only one bridge on the bus so it gets all available
+ * resources which it can then distribute to the possible
+ * hotplug bridges below.
+ */
+ if (hotplug_bridges + normal_bridges == 1) {
+ dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
+ if (dev->subordinate) {
+ pci_bus_distribute_available_resources(dev->subordinate,
+ add_list, available_io, available_mmio,
+ available_mmio_pref);
+ }
+ return;
+ }
+
+ /*
* Go over devices on this bus and distribute the remaining
* resource space between hotplug bridges.
*/
for_each_pci_bridge(dev, bus) {
+ resource_size_t align, io, mmio, mmio_pref;
struct pci_bus *b;
b = dev->subordinate;
- if (!b)
+ if (!b || !dev->is_hotplug_bridge)
continue;
- if (!hotplug_bridges && normal_bridges == 1) {
- /*
- * There is only one bridge on the bus (upstream
- * port) so it gets all available resources
- * which it can then distribute to the possible
- * hotplug bridges below.
- */
- pci_bus_distribute_available_resources(b, add_list,
- available_io, available_mmio,
- available_mmio_pref);
- } else if (dev->is_hotplug_bridge) {
- resource_size_t align, io, mmio, mmio_pref;
-
- /*
- * Distribute available extra resources equally
- * between hotplug-capable downstream ports
- * taking alignment into account.
- *
- * Here hotplug_bridges is always != 0.
- */
- align = pci_resource_alignment(bridge, io_res);
- io = div64_ul(available_io, hotplug_bridges);
- io = min(ALIGN(io, align), remaining_io);
- remaining_io -= io;
-
- align = pci_resource_alignment(bridge, mmio_res);
- mmio = div64_ul(available_mmio, hotplug_bridges);
- mmio = min(ALIGN(mmio, align), remaining_mmio);
- remaining_mmio -= mmio;
-
- align = pci_resource_alignment(bridge, mmio_pref_res);
- mmio_pref = div64_ul(available_mmio_pref,
- hotplug_bridges);
- mmio_pref = min(ALIGN(mmio_pref, align),
- remaining_mmio_pref);
- remaining_mmio_pref -= mmio_pref;
-
- pci_bus_distribute_available_resources(b, add_list, io,
- mmio, mmio_pref);
- }
+ /*
+ * Distribute available extra resources equally between
+ * hotplug-capable downstream ports taking alignment into
+ * account.
+ *
+ * Here hotplug_bridges is always != 0.
+ */
+ align = pci_resource_alignment(bridge, io_res);
+ io = div64_ul(available_io, hotplug_bridges);
+ io = min(ALIGN(io, align), remaining_io);
+ remaining_io -= io;
+
+ align = pci_resource_alignment(bridge, mmio_res);
+ mmio = div64_ul(available_mmio, hotplug_bridges);
+ mmio = min(ALIGN(mmio, align), remaining_mmio);
+ remaining_mmio -= mmio;
+
+ align = pci_resource_alignment(bridge, mmio_pref_res);
+ mmio_pref = div64_ul(available_mmio_pref, hotplug_bridges);
+ mmio_pref = min(ALIGN(mmio_pref, align), remaining_mmio_pref);
+ remaining_mmio_pref -= mmio_pref;
+
+ pci_bus_distribute_available_resources(b, add_list, io, mmio,
+ mmio_pref);
}
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 01fe8e0455a04..dd50371225bcc 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -337,6 +337,7 @@ config PINCTRL_OCELOT
select GENERIC_PINMUX_FUNCTIONS
select REGMAP_MMIO
+source "drivers/pinctrl/actions/Kconfig"
source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
source "drivers/pinctrl/berlin/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 657332b121fb7..de40863e72978 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
+obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
obj-y += bcm/
obj-$(CONFIG_PINCTRL_BERLIN) += berlin/
diff --git a/drivers/pinctrl/actions/Kconfig b/drivers/pinctrl/actions/Kconfig
new file mode 100644
index 0000000000000..490927b4ea76d
--- /dev/null
+++ b/drivers/pinctrl/actions/Kconfig
@@ -0,0 +1,15 @@
+config PINCTRL_OWL
+ bool "Actions Semi OWL pinctrl driver"
+ depends on (ARCH_ACTIONS || COMPILE_TEST) && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ help
+ Say Y here to enable Actions Semi OWL pinctrl driver
+
+config PINCTRL_S900
+ bool "Actions Semi S900 pinctrl driver"
+ depends on PINCTRL_OWL
+ help
+ Say Y here to enable Actions Semi S900 pinctrl driver
diff --git a/drivers/pinctrl/actions/Makefile b/drivers/pinctrl/actions/Makefile
new file mode 100644
index 0000000000000..bd232d28400fc
--- /dev/null
+++ b/drivers/pinctrl/actions/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_PINCTRL_OWL) += pinctrl-owl.o
+obj-$(CONFIG_PINCTRL_S900) += pinctrl-s900.o
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
new file mode 100644
index 0000000000000..76243caa08c63
--- /dev/null
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OWL SoC's Pinctrl driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-owl.h"
+
+/**
+ * struct owl_pinctrl - pinctrl state of the device
+ * @dev: device handle
+ * @pctrldev: pinctrl handle
+ * @chip: gpio chip
+ * @lock: spinlock to protect registers
+ * @soc: reference to soc_data
+ * @base: pinctrl register base address
+ */
+struct owl_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctrldev;
+ struct gpio_chip chip;
+ raw_spinlock_t lock;
+ struct clk *clk;
+ const struct owl_pinctrl_soc_data *soc;
+ void __iomem *base;
+};
+
+static void owl_update_bits(void __iomem *base, u32 mask, u32 val)
+{
+ u32 reg_val;
+
+ reg_val = readl_relaxed(base);
+
+ reg_val = (reg_val & ~mask) | (val & mask);
+
+ writel_relaxed(reg_val, base);
+}
+
+static u32 owl_read_field(struct owl_pinctrl *pctrl, u32 reg,
+ u32 bit, u32 width)
+{
+ u32 tmp, mask;
+
+ tmp = readl_relaxed(pctrl->base + reg);
+ mask = (1 << width) - 1;
+
+ return (tmp >> bit) & mask;
+}
+
+static void owl_write_field(struct owl_pinctrl *pctrl, u32 reg, u32 arg,
+ u32 bit, u32 width)
+{
+ u32 mask;
+
+ mask = (1 << width) - 1;
+ mask = mask << bit;
+
+ owl_update_bits(pctrl->base + reg, mask, (arg << bit));
+}
+
+static int owl_get_groups_count(struct pinctrl_dev *pctrldev)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ return pctrl->soc->ngroups;
+}
+
+static const char *owl_get_group_name(struct pinctrl_dev *pctrldev,
+ unsigned int group)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ return pctrl->soc->groups[group].name;
+}
+
+static int owl_get_group_pins(struct pinctrl_dev *pctrldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ *pins = pctrl->soc->groups[group].pads;
+ *num_pins = pctrl->soc->groups[group].npads;
+
+ return 0;
+}
+
+static void owl_pin_dbg_show(struct pinctrl_dev *pctrldev,
+ struct seq_file *s,
+ unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ seq_printf(s, "%s", dev_name(pctrl->dev));
+}
+
+static struct pinctrl_ops owl_pinctrl_ops = {
+ .get_groups_count = owl_get_groups_count,
+ .get_group_name = owl_get_group_name,
+ .get_group_pins = owl_get_group_pins,
+ .pin_dbg_show = owl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int owl_get_funcs_count(struct pinctrl_dev *pctrldev)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ return pctrl->soc->nfunctions;
+}
+
+static const char *owl_get_func_name(struct pinctrl_dev *pctrldev,
+ unsigned int function)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ return pctrl->soc->functions[function].name;
+}
+
+static int owl_get_func_groups(struct pinctrl_dev *pctrldev,
+ unsigned int function,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+
+ *groups = pctrl->soc->functions[function].groups;
+ *num_groups = pctrl->soc->functions[function].ngroups;
+
+ return 0;
+}
+
+static inline int get_group_mfp_mask_val(const struct owl_pingroup *g,
+ int function,
+ u32 *mask,
+ u32 *val)
+{
+ int id;
+ u32 option_num;
+ u32 option_mask;
+
+ for (id = 0; id < g->nfuncs; id++) {
+ if (g->funcs[id] == function)
+ break;
+ }
+ if (WARN_ON(id == g->nfuncs))
+ return -EINVAL;
+
+ option_num = (1 << g->mfpctl_width);
+ if (id > option_num)
+ id -= option_num;
+
+ option_mask = option_num - 1;
+ *mask = (option_mask << g->mfpctl_shift);
+ *val = (id << g->mfpctl_shift);
+
+ return 0;
+}
+
+static int owl_set_mux(struct pinctrl_dev *pctrldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ const struct owl_pingroup *g;
+ unsigned long flags;
+ u32 val, mask;
+
+ g = &pctrl->soc->groups[group];
+
+ if (get_group_mfp_mask_val(g, function, &mask, &val))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_update_bits(pctrl->base + g->mfpctl_reg, mask, val);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static struct pinmux_ops owl_pinmux_ops = {
+ .get_functions_count = owl_get_funcs_count,
+ .get_function_name = owl_get_func_name,
+ .get_function_groups = owl_get_func_groups,
+ .set_mux = owl_set_mux,
+};
+
+static int owl_pad_pinconf_reg(const struct owl_padinfo *info,
+ unsigned int param,
+ u32 *reg,
+ u32 *bit,
+ u32 *width)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (!info->pullctl)
+ return -EINVAL;
+ *reg = info->pullctl->reg;
+ *bit = info->pullctl->shift;
+ *width = info->pullctl->width;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (!info->st)
+ return -EINVAL;
+ *reg = info->st->reg;
+ *bit = info->st->shift;
+ *width = info->st->width;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_pad_pinconf_arg2val(const struct owl_padinfo *info,
+ unsigned int param,
+ u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ *arg = OWL_PINCONF_PULL_HOLD;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ *arg = OWL_PINCONF_PULL_HIZ;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *arg = OWL_PINCONF_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *arg = OWL_PINCONF_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ *arg = (*arg >= 1 ? 1 : 0);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_pad_pinconf_val2arg(const struct owl_padinfo *padinfo,
+ unsigned int param,
+ u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ *arg = *arg == OWL_PINCONF_PULL_HOLD;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ *arg = *arg == OWL_PINCONF_PULL_HIZ;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *arg = *arg == OWL_PINCONF_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *arg = *arg == OWL_PINCONF_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ *arg = *arg == 1;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_pin_config_get(struct pinctrl_dev *pctrldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ int ret = 0;
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ const struct owl_padinfo *info;
+ unsigned int param = pinconf_to_config_param(*config);
+ u32 reg, bit, width, arg;
+
+ info = &pctrl->soc->padinfo[pin];
+
+ ret = owl_pad_pinconf_reg(info, param, &reg, &bit, &width);
+ if (ret)
+ return ret;
+
+ arg = owl_read_field(pctrl, reg, bit, width);
+
+ ret = owl_pad_pinconf_val2arg(info, param, &arg);
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return ret;
+}
+
+static int owl_pin_config_set(struct pinctrl_dev *pctrldev,
+ unsigned int pin,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ const struct owl_padinfo *info;
+ unsigned long flags;
+ unsigned int param;
+ u32 reg, bit, width, arg;
+ int ret, i;
+
+ info = &pctrl->soc->padinfo[pin];
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ ret = owl_pad_pinconf_reg(info, param, &reg, &bit, &width);
+ if (ret)
+ return ret;
+
+ ret = owl_pad_pinconf_arg2val(info, param, &arg);
+ if (ret)
+ return ret;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_write_field(pctrl, reg, arg, bit, width);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ return ret;
+}
+
+static int owl_group_pinconf_reg(const struct owl_pingroup *g,
+ unsigned int param,
+ u32 *reg,
+ u32 *bit,
+ u32 *width)
+{
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ if (g->drv_reg < 0)
+ return -EINVAL;
+ *reg = g->drv_reg;
+ *bit = g->drv_shift;
+ *width = g->drv_width;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (g->sr_reg < 0)
+ return -EINVAL;
+ *reg = g->sr_reg;
+ *bit = g->sr_shift;
+ *width = g->sr_width;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_group_pinconf_arg2val(const struct owl_pingroup *g,
+ unsigned int param,
+ u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (*arg) {
+ case 2:
+ *arg = OWL_PINCONF_DRV_2MA;
+ break;
+ case 4:
+ *arg = OWL_PINCONF_DRV_4MA;
+ break;
+ case 8:
+ *arg = OWL_PINCONF_DRV_8MA;
+ break;
+ case 12:
+ *arg = OWL_PINCONF_DRV_12MA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (*arg)
+ *arg = OWL_PINCONF_SLEW_FAST;
+ else
+ *arg = OWL_PINCONF_SLEW_SLOW;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_group_pinconf_val2arg(const struct owl_pingroup *g,
+ unsigned int param,
+ u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (*arg) {
+ case OWL_PINCONF_DRV_2MA:
+ *arg = 2;
+ break;
+ case OWL_PINCONF_DRV_4MA:
+ *arg = 4;
+ break;
+ case OWL_PINCONF_DRV_8MA:
+ *arg = 8;
+ break;
+ case OWL_PINCONF_DRV_12MA:
+ *arg = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (*arg)
+ *arg = 1;
+ else
+ *arg = 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int owl_group_config_get(struct pinctrl_dev *pctrldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const struct owl_pingroup *g;
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ unsigned int param = pinconf_to_config_param(*config);
+ u32 reg, bit, width, arg;
+ int ret;
+
+ g = &pctrl->soc->groups[group];
+
+ ret = owl_group_pinconf_reg(g, param, &reg, &bit, &width);
+ if (ret)
+ return ret;
+
+ arg = owl_read_field(pctrl, reg, bit, width);
+
+ ret = owl_group_pinconf_val2arg(g, param, &arg);
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return ret;
+
+}
+
+static int owl_group_config_set(struct pinctrl_dev *pctrldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ const struct owl_pingroup *g;
+ struct owl_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrldev);
+ unsigned long flags;
+ unsigned int param;
+ u32 reg, bit, width, arg;
+ int ret, i;
+
+ g = &pctrl->soc->groups[group];
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ ret = owl_group_pinconf_reg(g, param, &reg, &bit, &width);
+ if (ret)
+ return ret;
+
+ ret = owl_group_pinconf_arg2val(g, param, &arg);
+ if (ret)
+ return ret;
+
+ /* Update register */
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ owl_write_field(pctrl, reg, arg, bit, width);
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops owl_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = owl_pin_config_get,
+ .pin_config_set = owl_pin_config_set,
+ .pin_config_group_get = owl_group_config_get,
+ .pin_config_group_set = owl_group_config_set,
+};
+
+static struct pinctrl_desc owl_pinctrl_desc = {
+ .pctlops = &owl_pinctrl_ops,
+ .pmxops = &owl_pinmux_ops,
+ .confops = &owl_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct owl_gpio_port *
+owl_gpio_get_port(struct owl_pinctrl *pctrl, unsigned int *pin)
+{
+ unsigned int start = 0, i;
+
+ for (i = 0; i < pctrl->soc->nports; i++) {
+ const struct owl_gpio_port *port = &pctrl->soc->ports[i];
+
+ if (*pin >= start && *pin < start + port->pins) {
+ *pin -= start;
+ return port;
+ }
+
+ start += port->pins;
+ }
+
+ return NULL;
+}
+
+static void owl_gpio_update_reg(void __iomem *base, unsigned int pin, int flag)
+{
+ u32 val;
+
+ val = readl_relaxed(base);
+
+ if (flag)
+ val |= BIT(pin);
+ else
+ val &= ~BIT(pin);
+
+ writel_relaxed(val, base);
+}
+
+static int owl_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return -ENODEV;
+
+ gpio_base = pctrl->base + port->offset;
+
+ /*
+ * GPIOs have higher priority over other modules, so either setting
+ * them as OUT or IN is sufficient
+ */
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ owl_gpio_update_reg(gpio_base + port->outen, offset, true);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static void owl_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ /* disable gpio output */
+ owl_gpio_update_reg(gpio_base + port->outen, offset, false);
+
+ /* disable gpio input */
+ owl_gpio_update_reg(gpio_base + port->inen, offset, false);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int owl_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+ u32 val;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return -ENODEV;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ val = readl_relaxed(gpio_base + port->dat);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return !!(val & BIT(offset));
+}
+
+static void owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ owl_gpio_update_reg(gpio_base + port->dat, offset, value);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
+static int owl_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return -ENODEV;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ owl_gpio_update_reg(gpio_base + port->outen, offset, false);
+ owl_gpio_update_reg(gpio_base + port->inen, offset, true);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int owl_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct owl_gpio_port *port;
+ void __iomem *gpio_base;
+ unsigned long flags;
+
+ port = owl_gpio_get_port(pctrl, &offset);
+ if (WARN_ON(port == NULL))
+ return -ENODEV;
+
+ gpio_base = pctrl->base + port->offset;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+ owl_gpio_update_reg(gpio_base + port->inen, offset, false);
+ owl_gpio_update_reg(gpio_base + port->outen, offset, true);
+ owl_gpio_update_reg(gpio_base + port->dat, offset, value);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
+static int owl_gpio_init(struct owl_pinctrl *pctrl)
+{
+ struct gpio_chip *chip;
+ int ret;
+
+ chip = &pctrl->chip;
+ chip->base = -1;
+ chip->ngpio = pctrl->soc->ngpios;
+ chip->label = dev_name(pctrl->dev);
+ chip->parent = pctrl->dev;
+ chip->owner = THIS_MODULE;
+ chip->of_node = pctrl->dev->of_node;
+
+ ret = gpiochip_add_data(&pctrl->chip, pctrl);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to register gpiochip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int owl_pinctrl_probe(struct platform_device *pdev,
+ struct owl_pinctrl_soc_data *soc_data)
+{
+ struct resource *res;
+ struct owl_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->base))
+ return PTR_ERR(pctrl->base);
+
+ /* enable GPIO/MFP clock */
+ pctrl->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pctrl->clk)) {
+ dev_err(&pdev->dev, "no clock defined\n");
+ return PTR_ERR(pctrl->clk);
+ }
+
+ ret = clk_prepare_enable(pctrl->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return ret;
+ }
+
+ raw_spin_lock_init(&pctrl->lock);
+
+ owl_pinctrl_desc.name = dev_name(&pdev->dev);
+ owl_pinctrl_desc.pins = soc_data->pins;
+ owl_pinctrl_desc.npins = soc_data->npins;
+
+ pctrl->chip.direction_input = owl_gpio_direction_input;
+ pctrl->chip.direction_output = owl_gpio_direction_output;
+ pctrl->chip.get = owl_gpio_get;
+ pctrl->chip.set = owl_gpio_set;
+ pctrl->chip.request = owl_gpio_request;
+ pctrl->chip.free = owl_gpio_free;
+
+ pctrl->soc = soc_data;
+ pctrl->dev = &pdev->dev;
+
+ pctrl->pctrldev = devm_pinctrl_register(&pdev->dev,
+ &owl_pinctrl_desc, pctrl);
+ if (IS_ERR(pctrl->pctrldev)) {
+ dev_err(&pdev->dev, "could not register Actions OWL pinmux driver\n");
+ return PTR_ERR(pctrl->pctrldev);
+ }
+
+ ret = owl_gpio_init(pctrl);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pctrl);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/actions/pinctrl-owl.h b/drivers/pinctrl/actions/pinctrl-owl.h
new file mode 100644
index 0000000000000..74342378937c4
--- /dev/null
+++ b/drivers/pinctrl/actions/pinctrl-owl.h
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OWL SoC's Pinctrl definitions
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef __PINCTRL_OWL_H__
+#define __PINCTRL_OWL_H__
+
+#define OWL_PINCONF_SLEW_SLOW 0
+#define OWL_PINCONF_SLEW_FAST 1
+
+enum owl_pinconf_pull {
+ OWL_PINCONF_PULL_HIZ,
+ OWL_PINCONF_PULL_DOWN,
+ OWL_PINCONF_PULL_UP,
+ OWL_PINCONF_PULL_HOLD,
+};
+
+enum owl_pinconf_drv {
+ OWL_PINCONF_DRV_2MA,
+ OWL_PINCONF_DRV_4MA,
+ OWL_PINCONF_DRV_8MA,
+ OWL_PINCONF_DRV_12MA,
+};
+
+/**
+ * struct owl_pullctl - Actions pad pull control register
+ * @reg: offset to the pull control register
+ * @shift: shift value of the register
+ * @width: width of the register
+ */
+struct owl_pullctl {
+ int reg;
+ unsigned int shift;
+ unsigned int width;
+};
+
+/**
+ * struct owl_st - Actions pad schmitt trigger enable register
+ * @reg: offset to the schmitt trigger enable register
+ * @shift: shift value of the register
+ * @width: width of the register
+ */
+struct owl_st {
+ int reg;
+ unsigned int shift;
+ unsigned int width;
+};
+
+/**
+ * struct owl_pingroup - Actions pingroup definition
+ * @name: name of the pin group
+ * @pads: list of pins assigned to this pingroup
+ * @npads: size of @pads array
+ * @funcs: list of pinmux functions for this pingroup
+ * @nfuncs: size of @funcs array
+ * @mfpctl_reg: multiplexing control register offset
+ * @mfpctl_shift: multiplexing control register bit mask
+ * @mfpctl_width: multiplexing control register width
+ * @drv_reg: drive control register offset
+ * @drv_shift: drive control register bit mask
+ * @drv_width: driver control register width
+ * @sr_reg: slew rate control register offset
+ * @sr_shift: slew rate control register bit mask
+ * @sr_width: slew rate control register width
+ */
+struct owl_pingroup {
+ const char *name;
+ unsigned int *pads;
+ unsigned int npads;
+ unsigned int *funcs;
+ unsigned int nfuncs;
+
+ int mfpctl_reg;
+ unsigned int mfpctl_shift;
+ unsigned int mfpctl_width;
+
+ int drv_reg;
+ unsigned int drv_shift;
+ unsigned int drv_width;
+
+ int sr_reg;
+ unsigned int sr_shift;
+ unsigned int sr_width;
+};
+
+/**
+ * struct owl_padinfo - Actions pinctrl pad info
+ * @pad: pad name of the SoC
+ * @pullctl: pull control register info
+ * @st: schmitt trigger register info
+ */
+struct owl_padinfo {
+ int pad;
+ struct owl_pullctl *pullctl;
+ struct owl_st *st;
+};
+
+/**
+ * struct owl_pinmux_func - Actions pinctrl mux functions
+ * @name: name of the pinmux function.
+ * @groups: array of pin groups that may select this function.
+ * @ngroups: number of entries in @groups.
+ */
+struct owl_pinmux_func {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct owl_gpio_port - Actions GPIO port info
+ * @offset: offset of the GPIO port.
+ * @pins: number of pins belongs to the GPIO port.
+ * @outen: offset of the output enable register.
+ * @inen: offset of the input enable register.
+ * @dat: offset of the data register.
+ */
+struct owl_gpio_port {
+ unsigned int offset;
+ unsigned int pins;
+ unsigned int outen;
+ unsigned int inen;
+ unsigned int dat;
+};
+
+/**
+ * struct owl_pinctrl_soc_data - Actions pin controller driver configuration
+ * @pins: array describing all pins of the pin controller.
+ * @npins: number of entries in @pins.
+ * @functions: array describing all mux functions of this SoC.
+ * @nfunction: number of entries in @functions.
+ * @groups: array describing all pin groups of this SoC.
+ * @ngroups: number of entries in @groups.
+ * @padinfo: array describing the pad info of this SoC.
+ * @ngpios: number of pingroups the driver should expose as GPIOs.
+ * @port: array describing all GPIO ports of this SoC.
+ * @nports: number of GPIO ports in this SoC.
+ */
+struct owl_pinctrl_soc_data {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ const struct owl_pinmux_func *functions;
+ unsigned int nfunctions;
+ const struct owl_pingroup *groups;
+ unsigned int ngroups;
+ const struct owl_padinfo *padinfo;
+ unsigned int ngpios;
+ const struct owl_gpio_port *ports;
+ unsigned int nports;
+};
+
+int owl_pinctrl_probe(struct platform_device *pdev,
+ struct owl_pinctrl_soc_data *soc_data);
+
+#endif /* __PINCTRL_OWL_H__ */
diff --git a/drivers/pinctrl/actions/pinctrl-s900.c b/drivers/pinctrl/actions/pinctrl-s900.c
new file mode 100644
index 0000000000000..5503c79457647
--- /dev/null
+++ b/drivers/pinctrl/actions/pinctrl-s900.c
@@ -0,0 +1,1888 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OWL S900 Pinctrl driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-owl.h"
+
+/* Pinctrl registers offset */
+#define MFCTL0 (0x0040)
+#define MFCTL1 (0x0044)
+#define MFCTL2 (0x0048)
+#define MFCTL3 (0x004C)
+#define PAD_PULLCTL0 (0x0060)
+#define PAD_PULLCTL1 (0x0064)
+#define PAD_PULLCTL2 (0x0068)
+#define PAD_ST0 (0x006C)
+#define PAD_ST1 (0x0070)
+#define PAD_CTL (0x0074)
+#define PAD_DRV0 (0x0080)
+#define PAD_DRV1 (0x0084)
+#define PAD_DRV2 (0x0088)
+#define PAD_SR0 (0x0270)
+#define PAD_SR1 (0x0274)
+#define PAD_SR2 (0x0278)
+
+#define OWL_GPIO_PORT_A 0
+#define OWL_GPIO_PORT_B 1
+#define OWL_GPIO_PORT_C 2
+#define OWL_GPIO_PORT_D 3
+#define OWL_GPIO_PORT_E 4
+#define OWL_GPIO_PORT_F 5
+
+#define _GPIOA(offset) (offset)
+#define _GPIOB(offset) (32 + (offset))
+#define _GPIOC(offset) (64 + (offset))
+#define _GPIOD(offset) (76 + (offset))
+#define _GPIOE(offset) (106 + (offset))
+#define _GPIOF(offset) (138 + (offset))
+
+#define NUM_GPIOS (_GPIOF(7) + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
+
+#define ETH_TXD0 _GPIOA(0)
+#define ETH_TXD1 _GPIOA(1)
+#define ETH_TXEN _GPIOA(2)
+#define ETH_RXER _GPIOA(3)
+#define ETH_CRS_DV _GPIOA(4)
+#define ETH_RXD1 _GPIOA(5)
+#define ETH_RXD0 _GPIOA(6)
+#define ETH_REF_CLK _GPIOA(7)
+#define ETH_MDC _GPIOA(8)
+#define ETH_MDIO _GPIOA(9)
+#define SIRQ0 _GPIOA(10)
+#define SIRQ1 _GPIOA(11)
+#define SIRQ2 _GPIOA(12)
+#define I2S_D0 _GPIOA(13)
+#define I2S_BCLK0 _GPIOA(14)
+#define I2S_LRCLK0 _GPIOA(15)
+#define I2S_MCLK0 _GPIOA(16)
+#define I2S_D1 _GPIOA(17)
+#define I2S_BCLK1 _GPIOA(18)
+#define I2S_LRCLK1 _GPIOA(19)
+#define I2S_MCLK1 _GPIOA(20)
+#define ERAM_A5 _GPIOA(21)
+#define ERAM_A6 _GPIOA(22)
+#define ERAM_A7 _GPIOA(23)
+#define ERAM_A8 _GPIOA(24)
+#define ERAM_A9 _GPIOA(25)
+#define ERAM_A10 _GPIOA(26)
+#define ERAM_A11 _GPIOA(27)
+#define SD0_D0 _GPIOA(28)
+#define SD0_D1 _GPIOA(29)
+#define SD0_D2 _GPIOA(30)
+#define SD0_D3 _GPIOA(31)
+
+#define SD1_D0 _GPIOB(0)
+#define SD1_D1 _GPIOB(1)
+#define SD1_D2 _GPIOB(2)
+#define SD1_D3 _GPIOB(3)
+#define SD0_CMD _GPIOB(4)
+#define SD0_CLK _GPIOB(5)
+#define SD1_CMD _GPIOB(6)
+#define SD1_CLK _GPIOB(7)
+#define SPI0_SCLK _GPIOB(8)
+#define SPI0_SS _GPIOB(9)
+#define SPI0_MISO _GPIOB(10)
+#define SPI0_MOSI _GPIOB(11)
+#define UART0_RX _GPIOB(12)
+#define UART0_TX _GPIOB(13)
+#define UART2_RX _GPIOB(14)
+#define UART2_TX _GPIOB(15)
+#define UART2_RTSB _GPIOB(16)
+#define UART2_CTSB _GPIOB(17)
+#define UART4_RX _GPIOB(18)
+#define UART4_TX _GPIOB(19)
+#define I2C0_SCLK _GPIOB(20)
+#define I2C0_SDATA _GPIOB(21)
+#define I2C1_SCLK _GPIOB(22)
+#define I2C1_SDATA _GPIOB(23)
+#define I2C2_SCLK _GPIOB(24)
+#define I2C2_SDATA _GPIOB(25)
+#define CSI0_DN0 _GPIOB(26)
+#define CSI0_DP0 _GPIOB(27)
+#define CSI0_DN1 _GPIOB(28)
+#define CSI0_DP1 _GPIOB(29)
+#define CSI0_CN _GPIOB(30)
+#define CSI0_CP _GPIOB(31)
+
+#define CSI0_DN2 _GPIOC(0)
+#define CSI0_DP2 _GPIOC(1)
+#define CSI0_DN3 _GPIOC(2)
+#define CSI0_DP3 _GPIOC(3)
+#define SENSOR0_PCLK _GPIOC(4)
+#define CSI1_DN0 _GPIOC(5)
+#define CSI1_DP0 _GPIOC(6)
+#define CSI1_DN1 _GPIOC(7)
+#define CSI1_DP1 _GPIOC(8)
+#define CSI1_CN _GPIOC(9)
+#define CSI1_CP _GPIOC(10)
+#define SENSOR0_CKOUT _GPIOC(11)
+
+#define LVDS_OEP _GPIOD(0)
+#define LVDS_OEN _GPIOD(1)
+#define LVDS_ODP _GPIOD(2)
+#define LVDS_ODN _GPIOD(3)
+#define LVDS_OCP _GPIOD(4)
+#define LVDS_OCN _GPIOD(5)
+#define LVDS_OBP _GPIOD(6)
+#define LVDS_OBN _GPIOD(7)
+#define LVDS_OAP _GPIOD(8)
+#define LVDS_OAN _GPIOD(9)
+#define LVDS_EEP _GPIOD(10)
+#define LVDS_EEN _GPIOD(11)
+#define LVDS_EDP _GPIOD(12)
+#define LVDS_EDN _GPIOD(13)
+#define LVDS_ECP _GPIOD(14)
+#define LVDS_ECN _GPIOD(15)
+#define LVDS_EBP _GPIOD(16)
+#define LVDS_EBN _GPIOD(17)
+#define LVDS_EAP _GPIOD(18)
+#define LVDS_EAN _GPIOD(19)
+#define DSI_DP3 _GPIOD(20)
+#define DSI_DN3 _GPIOD(21)
+#define DSI_DP1 _GPIOD(22)
+#define DSI_DN1 _GPIOD(23)
+#define DSI_CP _GPIOD(24)
+#define DSI_CN _GPIOD(25)
+#define DSI_DP0 _GPIOD(26)
+#define DSI_DN0 _GPIOD(27)
+#define DSI_DP2 _GPIOD(28)
+#define DSI_DN2 _GPIOD(29)
+
+#define NAND0_D0 _GPIOE(0)
+#define NAND0_D1 _GPIOE(1)
+#define NAND0_D2 _GPIOE(2)
+#define NAND0_D3 _GPIOE(3)
+#define NAND0_D4 _GPIOE(4)
+#define NAND0_D5 _GPIOE(5)
+#define NAND0_D6 _GPIOE(6)
+#define NAND0_D7 _GPIOE(7)
+#define NAND0_DQS _GPIOE(8)
+#define NAND0_DQSN _GPIOE(9)
+#define NAND0_ALE _GPIOE(10)
+#define NAND0_CLE _GPIOE(11)
+#define NAND0_CEB0 _GPIOE(12)
+#define NAND0_CEB1 _GPIOE(13)
+#define NAND0_CEB2 _GPIOE(14)
+#define NAND0_CEB3 _GPIOE(15)
+#define NAND1_D0 _GPIOE(16)
+#define NAND1_D1 _GPIOE(17)
+#define NAND1_D2 _GPIOE(18)
+#define NAND1_D3 _GPIOE(19)
+#define NAND1_D4 _GPIOE(20)
+#define NAND1_D5 _GPIOE(21)
+#define NAND1_D6 _GPIOE(22)
+#define NAND1_D7 _GPIOE(23)
+#define NAND1_DQS _GPIOE(24)
+#define NAND1_DQSN _GPIOE(25)
+#define NAND1_ALE _GPIOE(26)
+#define NAND1_CLE _GPIOE(27)
+#define NAND1_CEB0 _GPIOE(28)
+#define NAND1_CEB1 _GPIOE(29)
+#define NAND1_CEB2 _GPIOE(30)
+#define NAND1_CEB3 _GPIOE(31)
+
+#define PCM1_IN _GPIOF(0)
+#define PCM1_CLK _GPIOF(1)
+#define PCM1_SYNC _GPIOF(2)
+#define PCM1_OUT _GPIOF(3)
+#define UART3_RX _GPIOF(4)
+#define UART3_TX _GPIOF(5)
+#define UART3_RTSB _GPIOF(6)
+#define UART3_CTSB _GPIOF(7)
+
+/* System */
+#define SGPIO0 _PIN(0)
+#define SGPIO1 _PIN(1)
+#define SGPIO2 _PIN(2)
+#define SGPIO3 _PIN(3)
+
+#define NUM_PADS (_PIN(3) + 1)
+
+/* Pad names as specified in datasheet */
+static const struct pinctrl_pin_desc s900_pads[] = {
+ PINCTRL_PIN(ETH_TXD0, "eth_txd0"),
+ PINCTRL_PIN(ETH_TXD1, "eth_txd1"),
+ PINCTRL_PIN(ETH_TXEN, "eth_txen"),
+ PINCTRL_PIN(ETH_RXER, "eth_rxer"),
+ PINCTRL_PIN(ETH_CRS_DV, "eth_crs_dv"),
+ PINCTRL_PIN(ETH_RXD1, "eth_rxd1"),
+ PINCTRL_PIN(ETH_RXD0, "eth_rxd0"),
+ PINCTRL_PIN(ETH_REF_CLK, "eth_ref_clk"),
+ PINCTRL_PIN(ETH_MDC, "eth_mdc"),
+ PINCTRL_PIN(ETH_MDIO, "eth_mdio"),
+ PINCTRL_PIN(SIRQ0, "sirq0"),
+ PINCTRL_PIN(SIRQ1, "sirq1"),
+ PINCTRL_PIN(SIRQ2, "sirq2"),
+ PINCTRL_PIN(I2S_D0, "i2s_d0"),
+ PINCTRL_PIN(I2S_BCLK0, "i2s_bclk0"),
+ PINCTRL_PIN(I2S_LRCLK0, "i2s_lrclk0"),
+ PINCTRL_PIN(I2S_MCLK0, "i2s_mclk0"),
+ PINCTRL_PIN(I2S_D1, "i2s_d1"),
+ PINCTRL_PIN(I2S_BCLK1, "i2s_bclk1"),
+ PINCTRL_PIN(I2S_LRCLK1, "i2s_lrclk1"),
+ PINCTRL_PIN(I2S_MCLK1, "i2s_mclk1"),
+ PINCTRL_PIN(PCM1_IN, "pcm1_in"),
+ PINCTRL_PIN(PCM1_CLK, "pcm1_clk"),
+ PINCTRL_PIN(PCM1_SYNC, "pcm1_sync"),
+ PINCTRL_PIN(PCM1_OUT, "pcm1_out"),
+ PINCTRL_PIN(ERAM_A5, "eram_a5"),
+ PINCTRL_PIN(ERAM_A6, "eram_a6"),
+ PINCTRL_PIN(ERAM_A7, "eram_a7"),
+ PINCTRL_PIN(ERAM_A8, "eram_a8"),
+ PINCTRL_PIN(ERAM_A9, "eram_a9"),
+ PINCTRL_PIN(ERAM_A10, "eram_a10"),
+ PINCTRL_PIN(ERAM_A11, "eram_a11"),
+ PINCTRL_PIN(LVDS_OEP, "lvds_oep"),
+ PINCTRL_PIN(LVDS_OEN, "lvds_oen"),
+ PINCTRL_PIN(LVDS_ODP, "lvds_odp"),
+ PINCTRL_PIN(LVDS_ODN, "lvds_odn"),
+ PINCTRL_PIN(LVDS_OCP, "lvds_ocp"),
+ PINCTRL_PIN(LVDS_OCN, "lvds_ocn"),
+ PINCTRL_PIN(LVDS_OBP, "lvds_obp"),
+ PINCTRL_PIN(LVDS_OBN, "lvds_obn"),
+ PINCTRL_PIN(LVDS_OAP, "lvds_oap"),
+ PINCTRL_PIN(LVDS_OAN, "lvds_oan"),
+ PINCTRL_PIN(LVDS_EEP, "lvds_eep"),
+ PINCTRL_PIN(LVDS_EEN, "lvds_een"),
+ PINCTRL_PIN(LVDS_EDP, "lvds_edp"),
+ PINCTRL_PIN(LVDS_EDN, "lvds_edn"),
+ PINCTRL_PIN(LVDS_ECP, "lvds_ecp"),
+ PINCTRL_PIN(LVDS_ECN, "lvds_ecn"),
+ PINCTRL_PIN(LVDS_EBP, "lvds_ebp"),
+ PINCTRL_PIN(LVDS_EBN, "lvds_ebn"),
+ PINCTRL_PIN(LVDS_EAP, "lvds_eap"),
+ PINCTRL_PIN(LVDS_EAN, "lvds_ean"),
+ PINCTRL_PIN(SD0_D0, "sd0_d0"),
+ PINCTRL_PIN(SD0_D1, "sd0_d1"),
+ PINCTRL_PIN(SD0_D2, "sd0_d2"),
+ PINCTRL_PIN(SD0_D3, "sd0_d3"),
+ PINCTRL_PIN(SD1_D0, "sd1_d0"),
+ PINCTRL_PIN(SD1_D1, "sd1_d1"),
+ PINCTRL_PIN(SD1_D2, "sd1_d2"),
+ PINCTRL_PIN(SD1_D3, "sd1_d3"),
+ PINCTRL_PIN(SD0_CMD, "sd0_cmd"),
+ PINCTRL_PIN(SD0_CLK, "sd0_clk"),
+ PINCTRL_PIN(SD1_CMD, "sd1_cmd"),
+ PINCTRL_PIN(SD1_CLK, "sd1_clk"),
+ PINCTRL_PIN(SPI0_SCLK, "spi0_sclk"),
+ PINCTRL_PIN(SPI0_SS, "spi0_ss"),
+ PINCTRL_PIN(SPI0_MISO, "spi0_miso"),
+ PINCTRL_PIN(SPI0_MOSI, "spi0_mosi"),
+ PINCTRL_PIN(UART0_RX, "uart0_rx"),
+ PINCTRL_PIN(UART0_TX, "uart0_tx"),
+ PINCTRL_PIN(UART2_RX, "uart2_rx"),
+ PINCTRL_PIN(UART2_TX, "uart2_tx"),
+ PINCTRL_PIN(UART2_RTSB, "uart2_rtsb"),
+ PINCTRL_PIN(UART2_CTSB, "uart2_ctsb"),
+ PINCTRL_PIN(UART3_RX, "uart3_rx"),
+ PINCTRL_PIN(UART3_TX, "uart3_tx"),
+ PINCTRL_PIN(UART3_RTSB, "uart3_rtsb"),
+ PINCTRL_PIN(UART3_CTSB, "uart3_ctsb"),
+ PINCTRL_PIN(UART4_RX, "uart4_rx"),
+ PINCTRL_PIN(UART4_TX, "uart4_tx"),
+ PINCTRL_PIN(I2C0_SCLK, "i2c0_sclk"),
+ PINCTRL_PIN(I2C0_SDATA, "i2c0_sdata"),
+ PINCTRL_PIN(I2C1_SCLK, "i2c1_sclk"),
+ PINCTRL_PIN(I2C1_SDATA, "i2c1_sdata"),
+ PINCTRL_PIN(I2C2_SCLK, "i2c2_sclk"),
+ PINCTRL_PIN(I2C2_SDATA, "i2c2_sdata"),
+ PINCTRL_PIN(CSI0_DN0, "csi0_dn0"),
+ PINCTRL_PIN(CSI0_DP0, "csi0_dp0"),
+ PINCTRL_PIN(CSI0_DN1, "csi0_dn1"),
+ PINCTRL_PIN(CSI0_DP1, "csi0_dp1"),
+ PINCTRL_PIN(CSI0_CN, "csi0_cn"),
+ PINCTRL_PIN(CSI0_CP, "csi0_cp"),
+ PINCTRL_PIN(CSI0_DN2, "csi0_dn2"),
+ PINCTRL_PIN(CSI0_DP2, "csi0_dp2"),
+ PINCTRL_PIN(CSI0_DN3, "csi0_dn3"),
+ PINCTRL_PIN(CSI0_DP3, "csi0_dp3"),
+ PINCTRL_PIN(DSI_DP3, "dsi_dp3"),
+ PINCTRL_PIN(DSI_DN3, "dsi_dn3"),
+ PINCTRL_PIN(DSI_DP1, "dsi_dp1"),
+ PINCTRL_PIN(DSI_DN1, "dsi_dn1"),
+ PINCTRL_PIN(DSI_CP, "dsi_cp"),
+ PINCTRL_PIN(DSI_CN, "dsi_cn"),
+ PINCTRL_PIN(DSI_DP0, "dsi_dp0"),
+ PINCTRL_PIN(DSI_DN0, "dsi_dn0"),
+ PINCTRL_PIN(DSI_DP2, "dsi_dp2"),
+ PINCTRL_PIN(DSI_DN2, "dsi_dn2"),
+ PINCTRL_PIN(SENSOR0_PCLK, "sensor0_pclk"),
+ PINCTRL_PIN(CSI1_DN0, "csi1_dn0"),
+ PINCTRL_PIN(CSI1_DP0, "csi1_dp0"),
+ PINCTRL_PIN(CSI1_DN1, "csi1_dn1"),
+ PINCTRL_PIN(CSI1_DP1, "csi1_dp1"),
+ PINCTRL_PIN(CSI1_CN, "csi1_cn"),
+ PINCTRL_PIN(CSI1_CP, "csi1_cp"),
+ PINCTRL_PIN(SENSOR0_CKOUT, "sensor0_ckout"),
+ PINCTRL_PIN(NAND0_D0, "nand0_d0"),
+ PINCTRL_PIN(NAND0_D1, "nand0_d1"),
+ PINCTRL_PIN(NAND0_D2, "nand0_d2"),
+ PINCTRL_PIN(NAND0_D3, "nand0_d3"),
+ PINCTRL_PIN(NAND0_D4, "nand0_d4"),
+ PINCTRL_PIN(NAND0_D5, "nand0_d5"),
+ PINCTRL_PIN(NAND0_D6, "nand0_d6"),
+ PINCTRL_PIN(NAND0_D7, "nand0_d7"),
+ PINCTRL_PIN(NAND0_DQS, "nand0_dqs"),
+ PINCTRL_PIN(NAND0_DQSN, "nand0_dqsn"),
+ PINCTRL_PIN(NAND0_ALE, "nand0_ale"),
+ PINCTRL_PIN(NAND0_CLE, "nand0_cle"),
+ PINCTRL_PIN(NAND0_CEB0, "nand0_ceb0"),
+ PINCTRL_PIN(NAND0_CEB1, "nand0_ceb1"),
+ PINCTRL_PIN(NAND0_CEB2, "nand0_ceb2"),
+ PINCTRL_PIN(NAND0_CEB3, "nand0_ceb3"),
+ PINCTRL_PIN(NAND1_D0, "nand1_d0"),
+ PINCTRL_PIN(NAND1_D1, "nand1_d1"),
+ PINCTRL_PIN(NAND1_D2, "nand1_d2"),
+ PINCTRL_PIN(NAND1_D3, "nand1_d3"),
+ PINCTRL_PIN(NAND1_D4, "nand1_d4"),
+ PINCTRL_PIN(NAND1_D5, "nand1_d5"),
+ PINCTRL_PIN(NAND1_D6, "nand1_d6"),
+ PINCTRL_PIN(NAND1_D7, "nand1_d7"),
+ PINCTRL_PIN(NAND1_DQS, "nand1_dqs"),
+ PINCTRL_PIN(NAND1_DQSN, "nand1_dqsn"),
+ PINCTRL_PIN(NAND1_ALE, "nand1_ale"),
+ PINCTRL_PIN(NAND1_CLE, "nand1_cle"),
+ PINCTRL_PIN(NAND1_CEB0, "nand1_ceb0"),
+ PINCTRL_PIN(NAND1_CEB1, "nand1_ceb1"),
+ PINCTRL_PIN(NAND1_CEB2, "nand1_ceb2"),
+ PINCTRL_PIN(NAND1_CEB3, "nand1_ceb3"),
+ PINCTRL_PIN(SGPIO0, "sgpio0"),
+ PINCTRL_PIN(SGPIO1, "sgpio1"),
+ PINCTRL_PIN(SGPIO2, "sgpio2"),
+ PINCTRL_PIN(SGPIO3, "sgpio3")
+};
+
+enum s900_pinmux_functions {
+ S900_MUX_ERAM,
+ S900_MUX_ETH_RMII,
+ S900_MUX_ETH_SMII,
+ S900_MUX_SPI0,
+ S900_MUX_SPI1,
+ S900_MUX_SPI2,
+ S900_MUX_SPI3,
+ S900_MUX_SENS0,
+ S900_MUX_UART0,
+ S900_MUX_UART1,
+ S900_MUX_UART2,
+ S900_MUX_UART3,
+ S900_MUX_UART4,
+ S900_MUX_UART5,
+ S900_MUX_UART6,
+ S900_MUX_I2S0,
+ S900_MUX_I2S1,
+ S900_MUX_PCM0,
+ S900_MUX_PCM1,
+ S900_MUX_JTAG,
+ S900_MUX_PWM0,
+ S900_MUX_PWM1,
+ S900_MUX_PWM2,
+ S900_MUX_PWM3,
+ S900_MUX_PWM4,
+ S900_MUX_PWM5,
+ S900_MUX_SD0,
+ S900_MUX_SD1,
+ S900_MUX_SD2,
+ S900_MUX_SD3,
+ S900_MUX_I2C0,
+ S900_MUX_I2C1,
+ S900_MUX_I2C2,
+ S900_MUX_I2C3,
+ S900_MUX_I2C4,
+ S900_MUX_I2C5,
+ S900_MUX_LVDS,
+ S900_MUX_USB20,
+ S900_MUX_USB30,
+ S900_MUX_GPU,
+ S900_MUX_MIPI_CSI0,
+ S900_MUX_MIPI_CSI1,
+ S900_MUX_MIPI_DSI,
+ S900_MUX_NAND0,
+ S900_MUX_NAND1,
+ S900_MUX_SPDIF,
+ S900_MUX_SIRQ0,
+ S900_MUX_SIRQ1,
+ S900_MUX_SIRQ2,
+ S900_MUX_AUX_START,
+ S900_MUX_MAX,
+ S900_MUX_RESERVED
+};
+
+/* mfp0_22 */
+static unsigned int lvds_oxx_uart4_mfp_pads[] = { LVDS_OAP, LVDS_OAN };
+static unsigned int lvds_oxx_uart4_mfp_funcs[] = { S900_MUX_ERAM,
+ S900_MUX_UART4 };
+/* mfp0_21_20 */
+static unsigned int rmii_mdc_mfp_pads[] = { ETH_MDC };
+static unsigned int rmii_mdc_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_PWM2,
+ S900_MUX_UART2,
+ S900_MUX_RESERVED };
+static unsigned int rmii_mdio_mfp_pads[] = { ETH_MDIO };
+static unsigned int rmii_mdio_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_PWM3,
+ S900_MUX_UART2,
+ S900_MUX_RESERVED };
+/* mfp0_19 */
+static unsigned int sirq0_mfp_pads[] = { SIRQ0 };
+static unsigned int sirq0_mfp_funcs[] = { S900_MUX_SIRQ0,
+ S900_MUX_PWM0 };
+static unsigned int sirq1_mfp_pads[] = { SIRQ1 };
+static unsigned int sirq1_mfp_funcs[] = { S900_MUX_SIRQ1,
+ S900_MUX_PWM1 };
+/* mfp0_18_16 */
+static unsigned int rmii_txd0_mfp_pads[] = { ETH_TXD0 };
+static unsigned int rmii_txd0_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_ETH_SMII,
+ S900_MUX_SPI2,
+ S900_MUX_UART6,
+ S900_MUX_SENS0,
+ S900_MUX_PWM0 };
+static unsigned int rmii_txd1_mfp_pads[] = { ETH_TXD1 };
+static unsigned int rmii_txd1_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_ETH_SMII,
+ S900_MUX_SPI2,
+ S900_MUX_UART6,
+ S900_MUX_SENS0,
+ S900_MUX_PWM1 };
+/* mfp0_15_13 */
+static unsigned int rmii_txen_mfp_pads[] = { ETH_TXEN };
+static unsigned int rmii_txen_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART2,
+ S900_MUX_SPI3,
+ S900_MUX_RESERVED,
+ S900_MUX_RESERVED,
+ S900_MUX_PWM2,
+ S900_MUX_SENS0 };
+
+static unsigned int rmii_rxer_mfp_pads[] = { ETH_RXER };
+static unsigned int rmii_rxer_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART2,
+ S900_MUX_SPI3,
+ S900_MUX_RESERVED,
+ S900_MUX_RESERVED,
+ S900_MUX_PWM3,
+ S900_MUX_SENS0 };
+/* mfp0_12_11 */
+static unsigned int rmii_crs_dv_mfp_pads[] = { ETH_CRS_DV };
+static unsigned int rmii_crs_dv_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_ETH_SMII,
+ S900_MUX_SPI2,
+ S900_MUX_UART4 };
+/* mfp0_10_8 */
+static unsigned int rmii_rxd1_mfp_pads[] = { ETH_RXD1 };
+static unsigned int rmii_rxd1_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART2,
+ S900_MUX_SPI3,
+ S900_MUX_RESERVED,
+ S900_MUX_UART5,
+ S900_MUX_PWM0,
+ S900_MUX_SENS0 };
+static unsigned int rmii_rxd0_mfp_pads[] = { ETH_RXD0 };
+static unsigned int rmii_rxd0_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART2,
+ S900_MUX_SPI3,
+ S900_MUX_RESERVED,
+ S900_MUX_UART5,
+ S900_MUX_PWM1,
+ S900_MUX_SENS0 };
+/* mfp0_7_6 */
+static unsigned int rmii_ref_clk_mfp_pads[] = { ETH_REF_CLK };
+static unsigned int rmii_ref_clk_mfp_funcs[] = { S900_MUX_ETH_RMII,
+ S900_MUX_UART4,
+ S900_MUX_SPI2,
+ S900_MUX_RESERVED };
+/* mfp0_5 */
+static unsigned int i2s_d0_mfp_pads[] = { I2S_D0 };
+static unsigned int i2s_d0_mfp_funcs[] = { S900_MUX_I2S0,
+ S900_MUX_PCM0 };
+static unsigned int i2s_d1_mfp_pads[] = { I2S_D1 };
+static unsigned int i2s_d1_mfp_funcs[] = { S900_MUX_I2S1,
+ S900_MUX_PCM0 };
+
+/* mfp0_4_3 */
+static unsigned int i2s_lr_m_clk0_mfp_pads[] = { I2S_LRCLK0,
+ I2S_MCLK0 };
+static unsigned int i2s_lr_m_clk0_mfp_funcs[] = { S900_MUX_I2S0,
+ S900_MUX_PCM0,
+ S900_MUX_PCM1,
+ S900_MUX_RESERVED };
+/* mfp0_2 */
+static unsigned int i2s_bclk0_mfp_pads[] = { I2S_BCLK0 };
+static unsigned int i2s_bclk0_mfp_funcs[] = { S900_MUX_I2S0,
+ S900_MUX_PCM0 };
+static unsigned int i2s_bclk1_mclk1_mfp_pads[] = { I2S_BCLK1,
+ I2S_LRCLK1,
+ I2S_MCLK1 };
+static unsigned int i2s_bclk1_mclk1_mfp_funcs[] = { S900_MUX_I2S1,
+ S900_MUX_PCM0 };
+/* mfp0_1_0 */
+static unsigned int pcm1_in_out_mfp_pads[] = { PCM1_IN,
+ PCM1_OUT };
+static unsigned int pcm1_in_out_mfp_funcs[] = { S900_MUX_PCM1,
+ S900_MUX_SPI1,
+ S900_MUX_I2C3,
+ S900_MUX_UART4 };
+static unsigned int pcm1_clk_mfp_pads[] = { PCM1_CLK };
+static unsigned int pcm1_clk_mfp_funcs[] = { S900_MUX_PCM1,
+ S900_MUX_SPI1,
+ S900_MUX_PWM4,
+ S900_MUX_UART4 };
+static unsigned int pcm1_sync_mfp_pads[] = { PCM1_SYNC };
+static unsigned int pcm1_sync_mfp_funcs[] = { S900_MUX_PCM1,
+ S900_MUX_SPI1,
+ S900_MUX_PWM5,
+ S900_MUX_UART4 };
+/* mfp1_31_29 */
+static unsigned int eram_a5_mfp_pads[] = { ERAM_A5 };
+static unsigned int eram_a5_mfp_funcs[] = { S900_MUX_UART4,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_PWM0,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0 };
+static unsigned int eram_a6_mfp_pads[] = { ERAM_A6 };
+static unsigned int eram_a6_mfp_funcs[] = { S900_MUX_UART4,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_PWM1,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0,
+};
+static unsigned int eram_a7_mfp_pads[] = { ERAM_A7 };
+static unsigned int eram_a7_mfp_funcs[] = { S900_MUX_RESERVED,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_RESERVED,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0 };
+/* mfp1_28_26 */
+static unsigned int eram_a8_mfp_pads[] = { ERAM_A8 };
+static unsigned int eram_a8_mfp_funcs[] = { S900_MUX_RESERVED,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_PWM1,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0 };
+static unsigned int eram_a9_mfp_pads[] = { ERAM_A9 };
+static unsigned int eram_a9_mfp_funcs[] = { S900_MUX_USB20,
+ S900_MUX_UART5,
+ S900_MUX_ERAM,
+ S900_MUX_PWM2,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0 };
+static unsigned int eram_a10_mfp_pads[] = { ERAM_A10 };
+static unsigned int eram_a10_mfp_funcs[] = { S900_MUX_USB30,
+ S900_MUX_JTAG,
+ S900_MUX_ERAM,
+ S900_MUX_PWM3,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0,
+ S900_MUX_RESERVED,
+ S900_MUX_RESERVED };
+/* mfp1_25_23 */
+static unsigned int eram_a11_mfp_pads[] = { ERAM_A11 };
+static unsigned int eram_a11_mfp_funcs[] = { S900_MUX_RESERVED,
+ S900_MUX_RESERVED,
+ S900_MUX_ERAM,
+ S900_MUX_PWM2,
+ S900_MUX_UART5,
+ S900_MUX_RESERVED,
+ S900_MUX_SENS0,
+ S900_MUX_RESERVED };
+/* mfp1_22 */
+static unsigned int lvds_oep_odn_mfp_pads[] = { LVDS_OEP,
+ LVDS_OEN,
+ LVDS_ODP,
+ LVDS_ODN };
+static unsigned int lvds_oep_odn_mfp_funcs[] = { S900_MUX_LVDS,
+ S900_MUX_UART2 };
+static unsigned int lvds_ocp_obn_mfp_pads[] = { LVDS_OCP,
+ LVDS_OCN,
+ LVDS_OBP,
+ LVDS_OBN };
+static unsigned int lvds_ocp_obn_mfp_funcs[] = { S900_MUX_LVDS,
+ S900_MUX_PCM1 };
+static unsigned int lvds_oap_oan_mfp_pads[] = { LVDS_OAP,
+ LVDS_OAN };
+static unsigned int lvds_oap_oan_mfp_funcs[] = { S900_MUX_LVDS,
+ S900_MUX_ERAM };
+/* mfp1_21 */
+static unsigned int lvds_e_mfp_pads[] = { LVDS_EEP,
+ LVDS_EEN,
+ LVDS_EDP,
+ LVDS_EDN,
+ LVDS_ECP,
+ LVDS_ECN,
+ LVDS_EBP,
+ LVDS_EBN,
+ LVDS_EAP,
+ LVDS_EAN };
+static unsigned int lvds_e_mfp_funcs[] = { S900_MUX_LVDS,
+ S900_MUX_ERAM };
+/* mfp1_5_4 */
+static unsigned int spi0_sclk_mosi_mfp_pads[] = { SPI0_SCLK,
+ SPI0_MOSI };
+static unsigned int spi0_sclk_mosi_mfp_funcs[] = { S900_MUX_SPI0,
+ S900_MUX_ERAM,
+ S900_MUX_I2C3,
+ S900_MUX_PCM0 };
+/* mfp1_3_1 */
+static unsigned int spi0_ss_mfp_pads[] = { SPI0_SS };
+static unsigned int spi0_ss_mfp_funcs[] = { S900_MUX_SPI0,
+ S900_MUX_ERAM,
+ S900_MUX_I2S1,
+ S900_MUX_PCM1,
+ S900_MUX_PCM0,
+ S900_MUX_PWM4 };
+static unsigned int spi0_miso_mfp_pads[] = { SPI0_MISO };
+static unsigned int spi0_miso_mfp_funcs[] = { S900_MUX_SPI0,
+ S900_MUX_ERAM,
+ S900_MUX_I2S1,
+ S900_MUX_PCM1,
+ S900_MUX_PCM0,
+ S900_MUX_PWM5 };
+/* mfp2_23 */
+static unsigned int uart2_rtsb_mfp_pads[] = { UART2_RTSB };
+static unsigned int uart2_rtsb_mfp_funcs[] = { S900_MUX_UART2,
+ S900_MUX_UART0 };
+/* mfp2_22 */
+static unsigned int uart2_ctsb_mfp_pads[] = { UART2_CTSB };
+static unsigned int uart2_ctsb_mfp_funcs[] = { S900_MUX_UART2,
+ S900_MUX_UART0 };
+/* mfp2_21 */
+static unsigned int uart3_rtsb_mfp_pads[] = { UART3_RTSB };
+static unsigned int uart3_rtsb_mfp_funcs[] = { S900_MUX_UART3,
+ S900_MUX_UART5 };
+/* mfp2_20 */
+static unsigned int uart3_ctsb_mfp_pads[] = { UART3_CTSB };
+static unsigned int uart3_ctsb_mfp_funcs[] = { S900_MUX_UART3,
+ S900_MUX_UART5 };
+/* mfp2_19_17 */
+static unsigned int sd0_d0_mfp_pads[] = { SD0_D0 };
+static unsigned int sd0_d0_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_RESERVED,
+ S900_MUX_JTAG,
+ S900_MUX_UART2,
+ S900_MUX_UART5,
+ S900_MUX_GPU };
+/* mfp2_16_14 */
+static unsigned int sd0_d1_mfp_pads[] = { SD0_D1 };
+static unsigned int sd0_d1_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_GPU,
+ S900_MUX_RESERVED,
+ S900_MUX_UART2,
+ S900_MUX_UART5 };
+/* mfp_13_11 */
+static unsigned int sd0_d2_d3_mfp_pads[] = { SD0_D2,
+ SD0_D3 };
+static unsigned int sd0_d2_d3_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_RESERVED,
+ S900_MUX_JTAG,
+ S900_MUX_UART2,
+ S900_MUX_UART1,
+ S900_MUX_GPU };
+/* mfp2_10_9 */
+static unsigned int sd1_d0_d3_mfp_pads[] = { SD1_D0, SD1_D1,
+ SD1_D2, SD1_D3 };
+static unsigned int sd1_d0_d3_mfp_funcs[] = { S900_MUX_SD1,
+ S900_MUX_ERAM };
+/* mfp2_8_7 */
+static unsigned int sd0_cmd_mfp_pads[] = { SD0_CMD };
+static unsigned int sd0_cmd_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_GPU,
+ S900_MUX_JTAG };
+/* mfp2_6_5 */
+static unsigned int sd0_clk_mfp_pads[] = { SD0_CLK };
+static unsigned int sd0_clk_mfp_funcs[] = { S900_MUX_SD0,
+ S900_MUX_ERAM,
+ S900_MUX_JTAG,
+ S900_MUX_GPU };
+/* mfp2_4_3 */
+static unsigned int sd1_cmd_clk_mfp_pads[] = { SD1_CMD, SD1_CLK };
+static unsigned int sd1_cmd_clk_mfp_funcs[] = { S900_MUX_SD1,
+ S900_MUX_ERAM };
+/* mfp2_2_0 */
+static unsigned int uart0_rx_mfp_pads[] = { UART0_RX };
+static unsigned int uart0_rx_mfp_funcs[] = { S900_MUX_UART0,
+ S900_MUX_UART2,
+ S900_MUX_SPI1,
+ S900_MUX_I2C5,
+ S900_MUX_PCM1,
+ S900_MUX_I2S1 };
+/* mfp3_27 */
+static unsigned int nand0_d0_ceb3_mfp_pads[] = { NAND0_D0, NAND0_D1,
+ NAND0_D2, NAND0_D3,
+ NAND0_D4, NAND0_D5,
+ NAND0_D6, NAND0_D7,
+ NAND0_DQSN, NAND0_CEB3 };
+static unsigned int nand0_d0_ceb3_mfp_funcs[] = { S900_MUX_NAND0,
+ S900_MUX_SD2 };
+/* mfp3_21_19 */
+static unsigned int uart0_tx_mfp_pads[] = { UART0_TX };
+static unsigned int uart0_tx_mfp_funcs[] = { S900_MUX_UART0,
+ S900_MUX_UART2,
+ S900_MUX_SPI1,
+ S900_MUX_I2C5,
+ S900_MUX_SPDIF,
+ S900_MUX_PCM1,
+ S900_MUX_I2S1 };
+/* mfp3_18_16 */
+static unsigned int i2c0_mfp_pads[] = { I2C0_SCLK, I2C0_SDATA };
+static unsigned int i2c0_mfp_funcs[] = { S900_MUX_I2C0,
+ S900_MUX_UART2,
+ S900_MUX_I2C1,
+ S900_MUX_UART1,
+ S900_MUX_SPI1 };
+/* mfp3_15 */
+static unsigned int csi0_cn_cp_mfp_pads[] = { CSI0_CN, CSI0_CP };
+static unsigned int csi0_cn_cp_mfp_funcs[] = { S900_MUX_SENS0,
+ S900_MUX_SENS0 };
+/* mfp3_14 */
+static unsigned int csi0_dn0_dp3_mfp_pads[] = { CSI0_DN0, CSI0_DP0,
+ CSI0_DN1, CSI0_DP1,
+ CSI0_CN, CSI0_CP,
+ CSI0_DP2, CSI0_DN2,
+ CSI0_DN3, CSI0_DP3 };
+static unsigned int csi0_dn0_dp3_mfp_funcs[] = { S900_MUX_MIPI_CSI0,
+ S900_MUX_SENS0 };
+/* mfp3_13 */
+static unsigned int csi1_dn0_cp_mfp_pads[] = { CSI1_DN0, CSI1_DP0,
+ CSI1_DN1, CSI1_DP1,
+ CSI1_CN, CSI1_CP };
+static unsigned int csi1_dn0_cp_mfp_funcs[] = { S900_MUX_MIPI_CSI1,
+ S900_MUX_SENS0 };
+/* mfp3_12_dsi */
+static unsigned int dsi_dp3_dn1_mfp_pads[] = { DSI_DP3, DSI_DN2,
+ DSI_DP1, DSI_DN1 };
+static unsigned int dsi_dp3_dn1_mfp_funcs[] = { S900_MUX_MIPI_DSI,
+ S900_MUX_UART2 };
+static unsigned int dsi_cp_dn0_mfp_pads[] = { DSI_CP, DSI_CN,
+ DSI_DP0, DSI_DN0 };
+static unsigned int dsi_cp_dn0_mfp_funcs[] = { S900_MUX_MIPI_DSI,
+ S900_MUX_PCM1 };
+static unsigned int dsi_dp2_dn2_mfp_pads[] = { DSI_DP2, DSI_DN2 };
+static unsigned int dsi_dp2_dn2_mfp_funcs[] = { S900_MUX_MIPI_DSI,
+ S900_MUX_UART4 };
+/* mfp3_11 */
+static unsigned int nand1_d0_ceb1_mfp_pads[] = { NAND1_D0, NAND1_D1,
+ NAND1_D2, NAND1_D3,
+ NAND1_D4, NAND1_D5,
+ NAND1_D6, NAND1_D7,
+ NAND1_DQSN, NAND1_CEB1 };
+static unsigned int nand1_d0_ceb1_mfp_funcs[] = { S900_MUX_NAND1,
+ S900_MUX_SD3 };
+/* mfp3_10 */
+static unsigned int nand1_ceb3_mfp_pads[] = { NAND1_CEB3 };
+static unsigned int nand1_ceb3_mfp_funcs[] = { S900_MUX_NAND1,
+ S900_MUX_PWM0 };
+static unsigned int nand1_ceb0_mfp_pads[] = { NAND1_CEB0 };
+static unsigned int nand1_ceb0_mfp_funcs[] = { S900_MUX_NAND1,
+ S900_MUX_PWM1 };
+/* mfp3_9 */
+static unsigned int csi1_dn0_dp0_mfp_pads[] = { CSI1_DN0, CSI1_DP0 };
+static unsigned int csi1_dn0_dp0_mfp_funcs[] = { S900_MUX_SENS0,
+ S900_MUX_SENS0 };
+/* mfp3_8 */
+static unsigned int uart4_rx_tx_mfp_pads[] = { UART4_RX, UART4_TX };
+static unsigned int uart4_rx_tx_mfp_funcs[] = { S900_MUX_UART4,
+ S900_MUX_I2C4 };
+/* PADDRV group data */
+/* drv0 */
+static unsigned int sgpio3_drv_pads[] = { SGPIO3 };
+static unsigned int sgpio2_drv_pads[] = { SGPIO2 };
+static unsigned int sgpio1_drv_pads[] = { SGPIO1 };
+static unsigned int sgpio0_drv_pads[] = { SGPIO0 };
+static unsigned int rmii_tx_d0_d1_drv_pads[] = { ETH_TXD0, ETH_TXD1 };
+static unsigned int rmii_txen_rxer_drv_pads[] = { ETH_TXEN, ETH_RXER };
+static unsigned int rmii_crs_dv_drv_pads[] = { ETH_CRS_DV };
+static unsigned int rmii_rx_d1_d0_drv_pads[] = { ETH_RXD1, ETH_RXD0 };
+static unsigned int rmii_ref_clk_drv_pads[] = { ETH_REF_CLK };
+static unsigned int rmii_mdc_mdio_drv_pads[] = { ETH_MDC, ETH_MDIO };
+static unsigned int sirq_0_1_drv_pads[] = { SIRQ0, SIRQ1 };
+static unsigned int sirq2_drv_pads[] = { SIRQ2 };
+static unsigned int i2s_d0_d1_drv_pads[] = { I2S_D0, I2S_D1 };
+static unsigned int i2s_lr_m_clk0_drv_pads[] = { I2S_LRCLK0, I2S_MCLK0 };
+static unsigned int i2s_blk1_mclk1_drv_pads[] = { I2S_BCLK0, I2S_BCLK1,
+ I2S_LRCLK1, I2S_MCLK1 };
+static unsigned int pcm1_in_out_drv_pads[] = { PCM1_IN, PCM1_CLK,
+ PCM1_SYNC, PCM1_OUT };
+/* drv1 */
+static unsigned int lvds_oap_oan_drv_pads[] = { LVDS_OAP, LVDS_OAN };
+static unsigned int lvds_oep_odn_drv_pads[] = { LVDS_OEP, LVDS_OEN,
+ LVDS_ODP, LVDS_ODN };
+static unsigned int lvds_ocp_obn_drv_pads[] = { LVDS_OCP, LVDS_OCN,
+ LVDS_OBP, LVDS_OBN };
+static unsigned int lvds_e_drv_pads[] = { LVDS_EEP, LVDS_EEN,
+ LVDS_EDP, LVDS_EDN,
+ LVDS_ECP, LVDS_ECN,
+ LVDS_EBP, LVDS_EBN };
+static unsigned int sd0_d3_d0_drv_pads[] = { SD0_D3, SD0_D2,
+ SD0_D1, SD0_D0 };
+static unsigned int sd1_d3_d0_drv_pads[] = { SD1_D3, SD1_D2,
+ SD1_D1, SD1_D0 };
+static unsigned int sd0_sd1_cmd_clk_drv_pads[] = { SD0_CLK, SD0_CMD,
+ SD1_CLK, SD1_CMD };
+static unsigned int spi0_sclk_mosi_drv_pads[] = { SPI0_SCLK, SPI0_MOSI };
+static unsigned int spi0_ss_miso_drv_pads[] = { SPI0_SS, SPI0_MISO };
+static unsigned int uart0_rx_tx_drv_pads[] = { UART0_RX, UART0_TX };
+static unsigned int uart4_rx_tx_drv_pads[] = { UART4_RX, UART4_TX };
+static unsigned int uart2_drv_pads[] = { UART2_RX, UART2_TX,
+ UART2_RTSB, UART2_CTSB };
+static unsigned int uart3_drv_pads[] = { UART3_RX, UART3_TX,
+ UART3_RTSB, UART3_CTSB };
+/* drv2 */
+static unsigned int i2c0_drv_pads[] = { I2C0_SCLK, I2C0_SDATA };
+static unsigned int i2c1_drv_pads[] = { I2C1_SCLK, I2C1_SDATA };
+static unsigned int i2c2_drv_pads[] = { I2C2_SCLK, I2C2_SDATA };
+static unsigned int sensor0_drv_pads[] = { SENSOR0_PCLK,
+ SENSOR0_CKOUT };
+/* SR group data */
+/* sr0 */
+static unsigned int sgpio3_sr_pads[] = { SGPIO3 };
+static unsigned int sgpio2_sr_pads[] = { SGPIO2 };
+static unsigned int sgpio1_sr_pads[] = { SGPIO1 };
+static unsigned int sgpio0_sr_pads[] = { SGPIO0 };
+static unsigned int rmii_tx_d0_d1_sr_pads[] = { ETH_TXD0, ETH_TXD1 };
+static unsigned int rmii_txen_rxer_sr_pads[] = { ETH_TXEN, ETH_RXER };
+static unsigned int rmii_crs_dv_sr_pads[] = { ETH_CRS_DV };
+static unsigned int rmii_rx_d1_d0_sr_pads[] = { ETH_RXD1, ETH_RXD0 };
+static unsigned int rmii_ref_clk_sr_pads[] = { ETH_REF_CLK };
+static unsigned int rmii_mdc_mdio_sr_pads[] = { ETH_MDC, ETH_MDIO };
+static unsigned int sirq_0_1_sr_pads[] = { SIRQ0, SIRQ1 };
+static unsigned int sirq2_sr_pads[] = { SIRQ2 };
+static unsigned int i2s_do_d1_sr_pads[] = { I2S_D0, I2S_D1 };
+static unsigned int i2s_lr_m_clk0_sr_pads[] = { I2S_LRCLK0, I2S_MCLK0 };
+static unsigned int i2s_bclk0_mclk1_sr_pads[] = { I2S_BCLK0, I2S_BCLK1,
+ I2S_LRCLK1, I2S_MCLK1 };
+static unsigned int pcm1_in_out_sr_pads[] = { PCM1_IN, PCM1_CLK,
+ PCM1_SYNC, PCM1_OUT };
+/* sr1 */
+static unsigned int sd1_d3_d0_sr_pads[] = { SD1_D3, SD1_D2,
+ SD1_D1, SD1_D0 };
+static unsigned int sd0_sd1_clk_cmd_sr_pads[] = { SD0_CLK, SD0_CMD,
+ SD1_CLK, SD1_CMD };
+static unsigned int spi0_sclk_mosi_sr_pads[] = { SPI0_SCLK, SPI0_MOSI };
+static unsigned int spi0_ss_miso_sr_pads[] = { SPI0_SS, SPI0_MISO };
+static unsigned int uart0_rx_tx_sr_pads[] = { UART0_RX, UART0_TX };
+static unsigned int uart4_rx_tx_sr_pads[] = { UART4_RX, UART4_TX };
+static unsigned int uart2_sr_pads[] = { UART2_RX, UART2_TX,
+ UART2_RTSB, UART2_CTSB };
+static unsigned int uart3_sr_pads[] = { UART3_RX, UART3_TX,
+ UART3_RTSB, UART3_CTSB };
+/* sr2 */
+static unsigned int i2c0_sr_pads[] = { I2C0_SCLK, I2C0_SDATA };
+static unsigned int i2c1_sr_pads[] = { I2C1_SCLK, I2C1_SDATA };
+static unsigned int i2c2_sr_pads[] = { I2C2_SCLK, I2C2_SDATA };
+static unsigned int sensor0_sr_pads[] = { SENSOR0_PCLK,
+ SENSOR0_CKOUT };
+
+#define MUX_PG(group_name, reg, shift, width) \
+ { \
+ .name = #group_name, \
+ .pads = group_name##_pads, \
+ .npads = ARRAY_SIZE(group_name##_pads), \
+ .funcs = group_name##_funcs, \
+ .nfuncs = ARRAY_SIZE(group_name##_funcs), \
+ .mfpctl_reg = MFCTL##reg, \
+ .mfpctl_shift = shift, \
+ .mfpctl_width = width, \
+ .drv_reg = -1, \
+ .drv_shift = -1, \
+ .drv_width = -1, \
+ .sr_reg = -1, \
+ .sr_shift = -1, \
+ .sr_width = -1, \
+ }
+
+#define DRV_PG(group_name, reg, shift, width) \
+ { \
+ .name = #group_name, \
+ .pads = group_name##_pads, \
+ .npads = ARRAY_SIZE(group_name##_pads), \
+ .mfpctl_reg = -1, \
+ .mfpctl_shift = -1, \
+ .mfpctl_width = -1, \
+ .drv_reg = PAD_DRV##reg, \
+ .drv_shift = shift, \
+ .drv_width = width, \
+ .sr_reg = -1, \
+ .sr_shift = -1, \
+ .sr_width = -1, \
+ }
+
+#define SR_PG(group_name, reg, shift, width) \
+ { \
+ .name = #group_name, \
+ .pads = group_name##_pads, \
+ .npads = ARRAY_SIZE(group_name##_pads), \
+ .mfpctl_reg = -1, \
+ .mfpctl_shift = -1, \
+ .mfpctl_width = -1, \
+ .drv_reg = -1, \
+ .drv_shift = -1, \
+ .drv_width = -1, \
+ .sr_reg = PAD_SR##reg, \
+ .sr_shift = shift, \
+ .sr_width = width, \
+ }
+
+/* Pinctrl groups */
+static const struct owl_pingroup s900_groups[] = {
+ MUX_PG(lvds_oxx_uart4_mfp, 0, 22, 1),
+ MUX_PG(rmii_mdc_mfp, 0, 20, 2),
+ MUX_PG(rmii_mdio_mfp, 0, 20, 2),
+ MUX_PG(sirq0_mfp, 0, 19, 1),
+ MUX_PG(sirq1_mfp, 0, 19, 1),
+ MUX_PG(rmii_txd0_mfp, 0, 16, 3),
+ MUX_PG(rmii_txd1_mfp, 0, 16, 3),
+ MUX_PG(rmii_txen_mfp, 0, 13, 3),
+ MUX_PG(rmii_rxer_mfp, 0, 13, 3),
+ MUX_PG(rmii_crs_dv_mfp, 0, 11, 2),
+ MUX_PG(rmii_rxd1_mfp, 0, 8, 3),
+ MUX_PG(rmii_rxd0_mfp, 0, 8, 3),
+ MUX_PG(rmii_ref_clk_mfp, 0, 6, 2),
+ MUX_PG(i2s_d0_mfp, 0, 5, 1),
+ MUX_PG(i2s_d1_mfp, 0, 5, 1),
+ MUX_PG(i2s_lr_m_clk0_mfp, 0, 3, 2),
+ MUX_PG(i2s_bclk0_mfp, 0, 2, 1),
+ MUX_PG(i2s_bclk1_mclk1_mfp, 0, 2, 1),
+ MUX_PG(pcm1_in_out_mfp, 0, 0, 2),
+ MUX_PG(pcm1_clk_mfp, 0, 0, 2),
+ MUX_PG(pcm1_sync_mfp, 0, 0, 2),
+ MUX_PG(eram_a5_mfp, 1, 29, 3),
+ MUX_PG(eram_a6_mfp, 1, 29, 3),
+ MUX_PG(eram_a7_mfp, 1, 29, 3),
+ MUX_PG(eram_a8_mfp, 1, 26, 3),
+ MUX_PG(eram_a9_mfp, 1, 26, 3),
+ MUX_PG(eram_a10_mfp, 1, 26, 3),
+ MUX_PG(eram_a11_mfp, 1, 23, 3),
+ MUX_PG(lvds_oep_odn_mfp, 1, 22, 1),
+ MUX_PG(lvds_ocp_obn_mfp, 1, 22, 1),
+ MUX_PG(lvds_oap_oan_mfp, 1, 22, 1),
+ MUX_PG(lvds_e_mfp, 1, 21, 1),
+ MUX_PG(spi0_sclk_mosi_mfp, 1, 4, 2),
+ MUX_PG(spi0_ss_mfp, 1, 1, 3),
+ MUX_PG(spi0_miso_mfp, 1, 1, 3),
+ MUX_PG(uart2_rtsb_mfp, 2, 23, 1),
+ MUX_PG(uart2_ctsb_mfp, 2, 22, 1),
+ MUX_PG(uart3_rtsb_mfp, 2, 21, 1),
+ MUX_PG(uart3_ctsb_mfp, 2, 20, 1),
+ MUX_PG(sd0_d0_mfp, 2, 17, 3),
+ MUX_PG(sd0_d1_mfp, 2, 14, 3),
+ MUX_PG(sd0_d2_d3_mfp, 2, 11, 3),
+ MUX_PG(sd1_d0_d3_mfp, 2, 9, 2),
+ MUX_PG(sd0_cmd_mfp, 2, 7, 2),
+ MUX_PG(sd0_clk_mfp, 2, 5, 2),
+ MUX_PG(sd1_cmd_clk_mfp, 2, 3, 2),
+ MUX_PG(uart0_rx_mfp, 2, 0, 3),
+ MUX_PG(nand0_d0_ceb3_mfp, 3, 27, 1),
+ MUX_PG(uart0_tx_mfp, 3, 19, 3),
+ MUX_PG(i2c0_mfp, 3, 16, 3),
+ MUX_PG(csi0_cn_cp_mfp, 3, 15, 1),
+ MUX_PG(csi0_dn0_dp3_mfp, 3, 14, 1),
+ MUX_PG(csi1_dn0_cp_mfp, 3, 13, 1),
+ MUX_PG(dsi_dp3_dn1_mfp, 3, 12, 1),
+ MUX_PG(dsi_cp_dn0_mfp, 3, 12, 1),
+ MUX_PG(dsi_dp2_dn2_mfp, 3, 12, 1),
+ MUX_PG(nand1_d0_ceb1_mfp, 3, 11, 1),
+ MUX_PG(nand1_ceb3_mfp, 3, 10, 1),
+ MUX_PG(nand1_ceb0_mfp, 3, 10, 1),
+ MUX_PG(csi1_dn0_dp0_mfp, 3, 9, 1),
+ MUX_PG(uart4_rx_tx_mfp, 3, 8, 1),
+
+ DRV_PG(sgpio3_drv, 0, 30, 2),
+ DRV_PG(sgpio2_drv, 0, 28, 2),
+ DRV_PG(sgpio1_drv, 0, 26, 2),
+ DRV_PG(sgpio0_drv, 0, 24, 2),
+ DRV_PG(rmii_tx_d0_d1_drv, 0, 22, 2),
+ DRV_PG(rmii_txen_rxer_drv, 0, 20, 2),
+ DRV_PG(rmii_crs_dv_drv, 0, 18, 2),
+ DRV_PG(rmii_rx_d1_d0_drv, 0, 16, 2),
+ DRV_PG(rmii_ref_clk_drv, 0, 14, 2),
+ DRV_PG(rmii_mdc_mdio_drv, 0, 12, 2),
+ DRV_PG(sirq_0_1_drv, 0, 10, 2),
+ DRV_PG(sirq2_drv, 0, 8, 2),
+ DRV_PG(i2s_d0_d1_drv, 0, 6, 2),
+ DRV_PG(i2s_lr_m_clk0_drv, 0, 4, 2),
+ DRV_PG(i2s_blk1_mclk1_drv, 0, 2, 2),
+ DRV_PG(pcm1_in_out_drv, 0, 0, 2),
+ DRV_PG(lvds_oap_oan_drv, 1, 28, 2),
+ DRV_PG(lvds_oep_odn_drv, 1, 26, 2),
+ DRV_PG(lvds_ocp_obn_drv, 1, 24, 2),
+ DRV_PG(lvds_e_drv, 1, 22, 2),
+ DRV_PG(sd0_d3_d0_drv, 1, 20, 2),
+ DRV_PG(sd1_d3_d0_drv, 1, 18, 2),
+ DRV_PG(sd0_sd1_cmd_clk_drv, 1, 16, 2),
+ DRV_PG(spi0_sclk_mosi_drv, 1, 14, 2),
+ DRV_PG(spi0_ss_miso_drv, 1, 12, 2),
+ DRV_PG(uart0_rx_tx_drv, 1, 10, 2),
+ DRV_PG(uart4_rx_tx_drv, 1, 8, 2),
+ DRV_PG(uart2_drv, 1, 6, 2),
+ DRV_PG(uart3_drv, 1, 4, 2),
+ DRV_PG(i2c0_drv, 2, 30, 2),
+ DRV_PG(i2c1_drv, 2, 28, 2),
+ DRV_PG(i2c2_drv, 2, 26, 2),
+ DRV_PG(sensor0_drv, 2, 20, 2),
+
+ SR_PG(sgpio3_sr, 0, 15, 1),
+ SR_PG(sgpio2_sr, 0, 14, 1),
+ SR_PG(sgpio1_sr, 0, 13, 1),
+ SR_PG(sgpio0_sr, 0, 12, 1),
+ SR_PG(rmii_tx_d0_d1_sr, 0, 11, 1),
+ SR_PG(rmii_txen_rxer_sr, 0, 10, 1),
+ SR_PG(rmii_crs_dv_sr, 0, 9, 1),
+ SR_PG(rmii_rx_d1_d0_sr, 0, 8, 1),
+ SR_PG(rmii_ref_clk_sr, 0, 7, 1),
+ SR_PG(rmii_mdc_mdio_sr, 0, 6, 1),
+ SR_PG(sirq_0_1_sr, 0, 5, 1),
+ SR_PG(sirq2_sr, 0, 4, 1),
+ SR_PG(i2s_do_d1_sr, 0, 3, 1),
+ SR_PG(i2s_lr_m_clk0_sr, 0, 2, 1),
+ SR_PG(i2s_bclk0_mclk1_sr, 0, 1, 1),
+ SR_PG(pcm1_in_out_sr, 0, 0, 1),
+ SR_PG(sd1_d3_d0_sr, 1, 25, 1),
+ SR_PG(sd0_sd1_clk_cmd_sr, 1, 24, 1),
+ SR_PG(spi0_sclk_mosi_sr, 1, 23, 1),
+ SR_PG(spi0_ss_miso_sr, 1, 22, 1),
+ SR_PG(uart0_rx_tx_sr, 1, 21, 1),
+ SR_PG(uart4_rx_tx_sr, 1, 20, 1),
+ SR_PG(uart2_sr, 1, 19, 1),
+ SR_PG(uart3_sr, 1, 18, 1),
+ SR_PG(i2c0_sr, 2, 31, 1),
+ SR_PG(i2c1_sr, 2, 30, 1),
+ SR_PG(i2c2_sr, 2, 29, 1),
+ SR_PG(sensor0_sr, 2, 25, 1)
+};
+
+static const char * const eram_groups[] = {
+ "lvds_oxx_uart4_mfp",
+ "eram_a5_mfp",
+ "eram_a6_mfp",
+ "eram_a7_mfp",
+ "eram_a8_mfp",
+ "eram_a9_mfp",
+ "eram_a10_mfp",
+ "eram_a11_mfp",
+ "lvds_oap_oan_mfp",
+ "lvds_e_mfp",
+ "spi0_sclk_mosi_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd1_d0_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+ "sd1_cmd_clk_mfp",
+};
+
+static const char * const eth_rmii_groups[] = {
+ "rmii_mdc_mfp",
+ "rmii_mdio_mfp",
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_txen_mfp",
+ "rmii_rxer_mfp",
+ "rmii_crs_dv_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "rmii_ref_clk_mfp",
+ "eth_smi_dummy",
+};
+
+static const char * const eth_smii_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_crs_dv_mfp",
+ "eth_smi_dummy",
+};
+
+static const char * const spi0_groups[] = {
+ "spi0_sclk_mosi_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+ "spi0_sclk_mosi_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+};
+
+static const char * const spi1_groups[] = {
+ "pcm1_in_out_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "i2c0_mfp",
+};
+
+static const char * const spi2_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_crs_dv_mfp",
+ "rmii_ref_clk_mfp",
+};
+
+static const char * const spi3_groups[] = {
+ "rmii_txen_mfp",
+ "rmii_rxer_mfp",
+};
+
+static const char * const sens0_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_txen_mfp",
+ "rmii_rxer_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "eram_a5_mfp",
+ "eram_a6_mfp",
+ "eram_a7_mfp",
+ "eram_a8_mfp",
+ "eram_a9_mfp",
+ "csi0_cn_cp_mfp",
+ "csi0_dn0_dp3_mfp",
+ "csi1_dn0_cp_mfp",
+ "csi1_dn0_dp0_mfp",
+};
+
+static const char * const uart0_groups[] = {
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+};
+
+static const char * const uart1_groups[] = {
+ "sd0_d2_d3_mfp",
+ "i2c0_mfp",
+};
+
+static const char * const uart2_groups[] = {
+ "rmii_mdc_mfp",
+ "rmii_mdio_mfp",
+ "rmii_txen_mfp",
+ "rmii_rxer_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "lvds_oep_odn_mfp",
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp_pads",
+ "i2c0_mfp_pads",
+ "dsi_dp3_dn1_mfp",
+ "uart2_dummy"
+};
+
+static const char * const uart3_groups[] = {
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
+ "uart3_dummy"
+};
+
+static const char * const uart4_groups[] = {
+ "lvds_oxx_uart4_mfp",
+ "rmii_crs_dv_mfp",
+ "rmii_ref_clk_mfp",
+ "pcm1_in_out_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "eram_a5_mfp",
+ "eram_a6_mfp",
+ "dsi_dp2_dn2_mfp",
+ "uart4_rx_tx_mfp_pads",
+ "uart4_dummy"
+};
+
+static const char * const uart5_groups[] = {
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "eram_a9_mfp",
+ "eram_a11_mfp",
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+};
+
+static const char * const uart6_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+};
+
+static const char * const i2s0_groups[] = {
+ "i2s_d0_mfp",
+ "i2s_lr_m_clk0_mfp",
+ "i2s_bclk0_mfp",
+ "i2s0_dummy",
+};
+
+static const char * const i2s1_groups[] = {
+ "i2s_d1_mfp",
+ "i2s_bclk1_mclk1_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "i2s1_dummy",
+};
+
+static const char * const pcm0_groups[] = {
+ "i2s_d0_mfp",
+ "i2s_d1_mfp",
+ "i2s_lr_m_clk0_mfp",
+ "i2s_bclk0_mfp",
+ "i2s_bclk1_mclk1_mfp",
+ "spi0_sclk_mosi_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+};
+
+static const char * const pcm1_groups[] = {
+ "i2s_lr_m_clk0_mfp",
+ "pcm1_in_out_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "lvds_oep_odn_mfp",
+ "spi0_ss_mfp",
+ "spi0_miso_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "dsi_cp_dn0_mfp",
+ "pcm1_dummy",
+};
+
+static const char * const jtag_groups[] = {
+ "eram_a5_mfp",
+ "eram_a6_mfp",
+ "eram_a7_mfp",
+ "eram_a8_mfp",
+ "eram_a10_mfp",
+ "eram_a10_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+};
+
+static const char * const pwm0_groups[] = {
+ "sirq0_mfp",
+ "rmii_txd0_mfp",
+ "rmii_rxd1_mfp",
+ "eram_a5_mfp",
+ "nand1_ceb3_mfp",
+};
+
+static const char * const pwm1_groups[] = {
+ "sirq1_mfp",
+ "rmii_txd1_mfp",
+ "rmii_rxd0_mfp",
+ "eram_a6_mfp",
+ "eram_a8_mfp",
+ "nand1_ceb0_mfp",
+};
+
+static const char * const pwm2_groups[] = {
+ "rmii_mdc_mfp",
+ "rmii_txen_mfp",
+ "eram_a9_mfp",
+ "eram_a11_mfp",
+};
+
+static const char * const pwm3_groups[] = {
+ "rmii_mdio_mfp",
+ "rmii_rxer_mfp",
+ "eram_a10_mfp",
+};
+
+static const char * const pwm4_groups[] = {
+ "pcm1_clk_mfp",
+ "spi0_ss_mfp",
+};
+
+static const char * const pwm5_groups[] = {
+ "pcm1_sync_mfp",
+ "spi0_miso_mfp",
+};
+
+static const char * const sd0_groups[] = {
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+};
+
+static const char * const sd1_groups[] = {
+ "sd1_d0_d3_mfp",
+ "sd1_cmd_clk_mfp",
+ "sd1_dummy",
+};
+
+static const char * const sd2_groups[] = {
+ "nand0_d0_ceb3_mfp",
+};
+
+static const char * const sd3_groups[] = {
+ "nand1_d0_ceb1_mfp",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0_mfp",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c0_mfp",
+ "i2c1_dummy"
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_dummy"
+};
+
+static const char * const i2c3_groups[] = {
+ "pcm1_in_out_mfp",
+ "spi0_sclk_mosi_mfp",
+};
+
+static const char * const i2c4_groups[] = {
+ "uart4_rx_tx_mfp",
+};
+
+static const char * const i2c5_groups[] = {
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+};
+
+
+static const char * const lvds_groups[] = {
+ "lvds_oep_odn_mfp",
+ "lvds_ocp_obn_mfp",
+ "lvds_oap_oan_mfp",
+ "lvds_e_mfp",
+};
+
+static const char * const usb20_groups[] = {
+ "eram_a9_mfp",
+};
+
+static const char * const usb30_groups[] = {
+ "eram_a10_mfp",
+};
+
+static const char * const gpu_groups[] = {
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+};
+
+static const char * const mipi_csi0_groups[] = {
+ "csi0_dn0_dp3_mfp",
+};
+
+static const char * const mipi_csi1_groups[] = {
+ "csi1_dn0_cp_mfp",
+};
+
+static const char * const mipi_dsi_groups[] = {
+ "dsi_dp3_dn1_mfp",
+ "dsi_cp_dn0_mfp",
+ "dsi_dp2_dn2_mfp",
+ "mipi_dsi_dummy",
+};
+
+static const char * const nand0_groups[] = {
+ "nand0_d0_ceb3_mfp",
+ "nand0_dummy",
+};
+
+static const char * const nand1_groups[] = {
+ "nand1_d0_ceb1_mfp",
+ "nand1_ceb3_mfp",
+ "nand1_ceb0_mfp",
+ "nand1_dummy",
+};
+
+static const char * const spdif_groups[] = {
+ "uart0_tx_mfp",
+};
+
+static const char * const sirq0_groups[] = {
+ "sirq0_mfp",
+ "sirq0_dummy",
+};
+
+static const char * const sirq1_groups[] = {
+ "sirq1_mfp",
+ "sirq1_dummy",
+};
+
+static const char * const sirq2_groups[] = {
+ "sirq2_dummy",
+};
+
+#define FUNCTION(fname) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+static const struct owl_pinmux_func s900_functions[] = {
+ [S900_MUX_ERAM] = FUNCTION(eram),
+ [S900_MUX_ETH_RMII] = FUNCTION(eth_rmii),
+ [S900_MUX_ETH_SMII] = FUNCTION(eth_smii),
+ [S900_MUX_SPI0] = FUNCTION(spi0),
+ [S900_MUX_SPI1] = FUNCTION(spi1),
+ [S900_MUX_SPI2] = FUNCTION(spi2),
+ [S900_MUX_SPI3] = FUNCTION(spi3),
+ [S900_MUX_SENS0] = FUNCTION(sens0),
+ [S900_MUX_UART0] = FUNCTION(uart0),
+ [S900_MUX_UART1] = FUNCTION(uart1),
+ [S900_MUX_UART2] = FUNCTION(uart2),
+ [S900_MUX_UART3] = FUNCTION(uart3),
+ [S900_MUX_UART4] = FUNCTION(uart4),
+ [S900_MUX_UART5] = FUNCTION(uart5),
+ [S900_MUX_UART6] = FUNCTION(uart6),
+ [S900_MUX_I2S0] = FUNCTION(i2s0),
+ [S900_MUX_I2S1] = FUNCTION(i2s1),
+ [S900_MUX_PCM0] = FUNCTION(pcm0),
+ [S900_MUX_PCM1] = FUNCTION(pcm1),
+ [S900_MUX_JTAG] = FUNCTION(jtag),
+ [S900_MUX_PWM0] = FUNCTION(pwm0),
+ [S900_MUX_PWM1] = FUNCTION(pwm1),
+ [S900_MUX_PWM2] = FUNCTION(pwm2),
+ [S900_MUX_PWM3] = FUNCTION(pwm3),
+ [S900_MUX_PWM4] = FUNCTION(pwm4),
+ [S900_MUX_PWM5] = FUNCTION(pwm5),
+ [S900_MUX_SD0] = FUNCTION(sd0),
+ [S900_MUX_SD1] = FUNCTION(sd1),
+ [S900_MUX_SD2] = FUNCTION(sd2),
+ [S900_MUX_SD3] = FUNCTION(sd3),
+ [S900_MUX_I2C0] = FUNCTION(i2c0),
+ [S900_MUX_I2C1] = FUNCTION(i2c1),
+ [S900_MUX_I2C2] = FUNCTION(i2c2),
+ [S900_MUX_I2C3] = FUNCTION(i2c3),
+ [S900_MUX_I2C4] = FUNCTION(i2c4),
+ [S900_MUX_I2C5] = FUNCTION(i2c5),
+ [S900_MUX_LVDS] = FUNCTION(lvds),
+ [S900_MUX_USB30] = FUNCTION(usb30),
+ [S900_MUX_USB20] = FUNCTION(usb20),
+ [S900_MUX_GPU] = FUNCTION(gpu),
+ [S900_MUX_MIPI_CSI0] = FUNCTION(mipi_csi0),
+ [S900_MUX_MIPI_CSI1] = FUNCTION(mipi_csi1),
+ [S900_MUX_MIPI_DSI] = FUNCTION(mipi_dsi),
+ [S900_MUX_NAND0] = FUNCTION(nand0),
+ [S900_MUX_NAND1] = FUNCTION(nand1),
+ [S900_MUX_SPDIF] = FUNCTION(spdif),
+ [S900_MUX_SIRQ0] = FUNCTION(sirq0),
+ [S900_MUX_SIRQ1] = FUNCTION(sirq1),
+ [S900_MUX_SIRQ2] = FUNCTION(sirq2)
+};
+/* PAD PULL UP/DOWN CONFIGURES */
+#define PULLCTL_CONF(pull_reg, pull_sft, pull_wdt) \
+ { \
+ .reg = PAD_PULLCTL##pull_reg, \
+ .shift = pull_sft, \
+ .width = pull_wdt, \
+ }
+
+#define PAD_PULLCTL_CONF(pad_name, pull_reg, pull_sft, pull_wdt) \
+ struct owl_pullctl pad_name##_pullctl_conf \
+ = PULLCTL_CONF(pull_reg, pull_sft, pull_wdt)
+
+#define ST_CONF(st_reg, st_sft, st_wdt) \
+ { \
+ .reg = PAD_ST##st_reg, \
+ .shift = st_sft, \
+ .width = st_wdt, \
+ }
+
+#define PAD_ST_CONF(pad_name, st_reg, st_sft, st_wdt) \
+ struct owl_st pad_name##_st_conf \
+ = ST_CONF(st_reg, st_sft, st_wdt)
+
+/* PAD_PULLCTL0 */
+static PAD_PULLCTL_CONF(ETH_RXER, 0, 18, 2);
+static PAD_PULLCTL_CONF(SIRQ0, 0, 16, 2);
+static PAD_PULLCTL_CONF(SIRQ1, 0, 14, 2);
+static PAD_PULLCTL_CONF(SIRQ2, 0, 12, 2);
+static PAD_PULLCTL_CONF(I2C0_SDATA, 0, 10, 2);
+static PAD_PULLCTL_CONF(I2C0_SCLK, 0, 8, 2);
+static PAD_PULLCTL_CONF(ERAM_A5, 0, 6, 2);
+static PAD_PULLCTL_CONF(ERAM_A6, 0, 4, 2);
+static PAD_PULLCTL_CONF(ERAM_A7, 0, 2, 2);
+static PAD_PULLCTL_CONF(ERAM_A10, 0, 0, 2);
+
+/* PAD_PULLCTL1 */
+static PAD_PULLCTL_CONF(PCM1_IN, 1, 30, 2);
+static PAD_PULLCTL_CONF(PCM1_OUT, 1, 28, 2);
+static PAD_PULLCTL_CONF(SD0_D0, 1, 26, 2);
+static PAD_PULLCTL_CONF(SD0_D1, 1, 24, 2);
+static PAD_PULLCTL_CONF(SD0_D2, 1, 22, 2);
+static PAD_PULLCTL_CONF(SD0_D3, 1, 20, 2);
+static PAD_PULLCTL_CONF(SD0_CMD, 1, 18, 2);
+static PAD_PULLCTL_CONF(SD0_CLK, 1, 16, 2);
+static PAD_PULLCTL_CONF(SD1_CMD, 1, 14, 2);
+static PAD_PULLCTL_CONF(SD1_D0, 1, 12, 2);
+static PAD_PULLCTL_CONF(SD1_D1, 1, 10, 2);
+static PAD_PULLCTL_CONF(SD1_D2, 1, 8, 2);
+static PAD_PULLCTL_CONF(SD1_D3, 1, 6, 2);
+static PAD_PULLCTL_CONF(UART0_RX, 1, 4, 2);
+static PAD_PULLCTL_CONF(UART0_TX, 1, 2, 2);
+
+/* PAD_PULLCTL2 */
+static PAD_PULLCTL_CONF(I2C2_SDATA, 2, 26, 2);
+static PAD_PULLCTL_CONF(I2C2_SCLK, 2, 24, 2);
+static PAD_PULLCTL_CONF(SPI0_SCLK, 2, 22, 2);
+static PAD_PULLCTL_CONF(SPI0_MOSI, 2, 20, 2);
+static PAD_PULLCTL_CONF(I2C1_SDATA, 2, 18, 2);
+static PAD_PULLCTL_CONF(I2C1_SCLK, 2, 16, 2);
+static PAD_PULLCTL_CONF(NAND0_D0, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D1, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D2, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D3, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D4, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D5, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D6, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_D7, 2, 15, 1);
+static PAD_PULLCTL_CONF(NAND0_DQSN, 2, 14, 1);
+static PAD_PULLCTL_CONF(NAND0_DQS, 2, 13, 1);
+static PAD_PULLCTL_CONF(NAND1_D0, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D1, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D2, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D3, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D4, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D5, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D6, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_D7, 2, 12, 1);
+static PAD_PULLCTL_CONF(NAND1_DQSN, 2, 11, 1);
+static PAD_PULLCTL_CONF(NAND1_DQS, 2, 10, 1);
+static PAD_PULLCTL_CONF(SGPIO2, 2, 8, 2);
+static PAD_PULLCTL_CONF(SGPIO3, 2, 6, 2);
+static PAD_PULLCTL_CONF(UART4_RX, 2, 4, 2);
+static PAD_PULLCTL_CONF(UART4_TX, 2, 2, 2);
+
+/* PAD_ST0 */
+static PAD_ST_CONF(I2C0_SDATA, 0, 30, 1);
+static PAD_ST_CONF(UART0_RX, 0, 29, 1);
+static PAD_ST_CONF(ETH_MDC, 0, 28, 1);
+static PAD_ST_CONF(I2S_MCLK1, 0, 23, 1);
+static PAD_ST_CONF(ETH_REF_CLK, 0, 22, 1);
+static PAD_ST_CONF(ETH_TXEN, 0, 21, 1);
+static PAD_ST_CONF(ETH_TXD0, 0, 20, 1);
+static PAD_ST_CONF(I2S_LRCLK1, 0, 19, 1);
+static PAD_ST_CONF(SGPIO2, 0, 18, 1);
+static PAD_ST_CONF(SGPIO3, 0, 17, 1);
+static PAD_ST_CONF(UART4_TX, 0, 16, 1);
+static PAD_ST_CONF(I2S_D1, 0, 15, 1);
+static PAD_ST_CONF(UART0_TX, 0, 14, 1);
+static PAD_ST_CONF(SPI0_SCLK, 0, 13, 1);
+static PAD_ST_CONF(SD0_CLK, 0, 12, 1);
+static PAD_ST_CONF(ERAM_A5, 0, 11, 1);
+static PAD_ST_CONF(I2C0_SCLK, 0, 7, 1);
+static PAD_ST_CONF(ERAM_A9, 0, 6, 1);
+static PAD_ST_CONF(LVDS_OEP, 0, 5, 1);
+static PAD_ST_CONF(LVDS_ODN, 0, 4, 1);
+static PAD_ST_CONF(LVDS_OAP, 0, 3, 1);
+static PAD_ST_CONF(I2S_BCLK1, 0, 2, 1);
+
+/* PAD_ST1 */
+static PAD_ST_CONF(I2S_LRCLK0, 1, 29, 1);
+static PAD_ST_CONF(UART4_RX, 1, 28, 1);
+static PAD_ST_CONF(UART3_CTSB, 1, 27, 1);
+static PAD_ST_CONF(UART3_RTSB, 1, 26, 1);
+static PAD_ST_CONF(UART3_RX, 1, 25, 1);
+static PAD_ST_CONF(UART2_RTSB, 1, 24, 1);
+static PAD_ST_CONF(UART2_CTSB, 1, 23, 1);
+static PAD_ST_CONF(UART2_RX, 1, 22, 1);
+static PAD_ST_CONF(ETH_RXD0, 1, 21, 1);
+static PAD_ST_CONF(ETH_RXD1, 1, 20, 1);
+static PAD_ST_CONF(ETH_CRS_DV, 1, 19, 1);
+static PAD_ST_CONF(ETH_RXER, 1, 18, 1);
+static PAD_ST_CONF(ETH_TXD1, 1, 17, 1);
+static PAD_ST_CONF(LVDS_OCP, 1, 16, 1);
+static PAD_ST_CONF(LVDS_OBP, 1, 15, 1);
+static PAD_ST_CONF(LVDS_OBN, 1, 14, 1);
+static PAD_ST_CONF(PCM1_OUT, 1, 12, 1);
+static PAD_ST_CONF(PCM1_CLK, 1, 11, 1);
+static PAD_ST_CONF(PCM1_IN, 1, 10, 1);
+static PAD_ST_CONF(PCM1_SYNC, 1, 9, 1);
+static PAD_ST_CONF(I2C1_SCLK, 1, 8, 1);
+static PAD_ST_CONF(I2C1_SDATA, 1, 7, 1);
+static PAD_ST_CONF(I2C2_SCLK, 1, 6, 1);
+static PAD_ST_CONF(I2C2_SDATA, 1, 5, 1);
+static PAD_ST_CONF(SPI0_MOSI, 1, 4, 1);
+static PAD_ST_CONF(SPI0_MISO, 1, 3, 1);
+static PAD_ST_CONF(SPI0_SS, 1, 2, 1);
+static PAD_ST_CONF(I2S_BCLK0, 1, 1, 1);
+static PAD_ST_CONF(I2S_MCLK0, 1, 0, 1);
+
+#define PAD_INFO(name) \
+ { \
+ .pad = name, \
+ .pullctl = NULL, \
+ .st = NULL, \
+ }
+
+#define PAD_INFO_ST(name) \
+ { \
+ .pad = name, \
+ .pullctl = NULL, \
+ .st = &name##_st_conf, \
+ }
+
+#define PAD_INFO_PULLCTL(name) \
+ { \
+ .pad = name, \
+ .pullctl = &name##_pullctl_conf, \
+ .st = NULL, \
+ }
+
+#define PAD_INFO_PULLCTL_ST(name) \
+ { \
+ .pad = name, \
+ .pullctl = &name##_pullctl_conf, \
+ .st = &name##_st_conf, \
+ }
+
+/* Pad info table */
+static struct owl_padinfo s900_padinfo[NUM_PADS] = {
+ [ETH_TXD0] = PAD_INFO_ST(ETH_TXD0),
+ [ETH_TXD1] = PAD_INFO_ST(ETH_TXD1),
+ [ETH_TXEN] = PAD_INFO_ST(ETH_TXEN),
+ [ETH_RXER] = PAD_INFO_PULLCTL_ST(ETH_RXER),
+ [ETH_CRS_DV] = PAD_INFO_ST(ETH_CRS_DV),
+ [ETH_RXD1] = PAD_INFO_ST(ETH_RXD1),
+ [ETH_RXD0] = PAD_INFO_ST(ETH_RXD0),
+ [ETH_REF_CLK] = PAD_INFO_ST(ETH_REF_CLK),
+ [ETH_MDC] = PAD_INFO_ST(ETH_MDC),
+ [ETH_MDIO] = PAD_INFO(ETH_MDIO),
+ [SIRQ0] = PAD_INFO_PULLCTL(SIRQ0),
+ [SIRQ1] = PAD_INFO_PULLCTL(SIRQ1),
+ [SIRQ2] = PAD_INFO_PULLCTL(SIRQ2),
+ [I2S_D0] = PAD_INFO(I2S_D0),
+ [I2S_BCLK0] = PAD_INFO_ST(I2S_BCLK0),
+ [I2S_LRCLK0] = PAD_INFO_ST(I2S_LRCLK0),
+ [I2S_MCLK0] = PAD_INFO_ST(I2S_MCLK0),
+ [I2S_D1] = PAD_INFO_ST(I2S_D1),
+ [I2S_BCLK1] = PAD_INFO_ST(I2S_BCLK1),
+ [I2S_LRCLK1] = PAD_INFO_ST(I2S_LRCLK1),
+ [I2S_MCLK1] = PAD_INFO_ST(I2S_MCLK1),
+ [PCM1_IN] = PAD_INFO_PULLCTL_ST(PCM1_IN),
+ [PCM1_CLK] = PAD_INFO_ST(PCM1_CLK),
+ [PCM1_SYNC] = PAD_INFO_ST(PCM1_SYNC),
+ [PCM1_OUT] = PAD_INFO_PULLCTL_ST(PCM1_OUT),
+ [ERAM_A5] = PAD_INFO_PULLCTL_ST(ERAM_A5),
+ [ERAM_A6] = PAD_INFO_PULLCTL(ERAM_A6),
+ [ERAM_A7] = PAD_INFO_PULLCTL(ERAM_A7),
+ [ERAM_A8] = PAD_INFO(ERAM_A8),
+ [ERAM_A9] = PAD_INFO_ST(ERAM_A9),
+ [ERAM_A10] = PAD_INFO_PULLCTL(ERAM_A10),
+ [ERAM_A11] = PAD_INFO(ERAM_A11),
+ [LVDS_OEP] = PAD_INFO_ST(LVDS_OEP),
+ [LVDS_OEN] = PAD_INFO(LVDS_OEN),
+ [LVDS_ODP] = PAD_INFO(LVDS_ODP),
+ [LVDS_ODN] = PAD_INFO_ST(LVDS_ODN),
+ [LVDS_OCP] = PAD_INFO_ST(LVDS_OCP),
+ [LVDS_OCN] = PAD_INFO(LVDS_OCN),
+ [LVDS_OBP] = PAD_INFO_ST(LVDS_OBP),
+ [LVDS_OBN] = PAD_INFO_ST(LVDS_OBN),
+ [LVDS_OAP] = PAD_INFO_ST(LVDS_OAP),
+ [LVDS_OAN] = PAD_INFO(LVDS_OAN),
+ [LVDS_EEP] = PAD_INFO(LVDS_EEP),
+ [LVDS_EEN] = PAD_INFO(LVDS_EEN),
+ [LVDS_EDP] = PAD_INFO(LVDS_EDP),
+ [LVDS_EDN] = PAD_INFO(LVDS_EDN),
+ [LVDS_ECP] = PAD_INFO(LVDS_ECP),
+ [LVDS_ECN] = PAD_INFO(LVDS_ECN),
+ [LVDS_EBP] = PAD_INFO(LVDS_EBP),
+ [LVDS_EBN] = PAD_INFO(LVDS_EBN),
+ [LVDS_EAP] = PAD_INFO(LVDS_EAP),
+ [LVDS_EAN] = PAD_INFO(LVDS_EAN),
+ [SD0_D0] = PAD_INFO_PULLCTL(SD0_D0),
+ [SD0_D1] = PAD_INFO_PULLCTL(SD0_D1),
+ [SD0_D2] = PAD_INFO_PULLCTL(SD0_D2),
+ [SD0_D3] = PAD_INFO_PULLCTL(SD0_D3),
+ [SD1_D0] = PAD_INFO_PULLCTL(SD1_D0),
+ [SD1_D1] = PAD_INFO_PULLCTL(SD1_D1),
+ [SD1_D2] = PAD_INFO_PULLCTL(SD1_D2),
+ [SD1_D3] = PAD_INFO_PULLCTL(SD1_D3),
+ [SD0_CMD] = PAD_INFO_PULLCTL(SD0_CMD),
+ [SD0_CLK] = PAD_INFO_PULLCTL_ST(SD0_CLK),
+ [SD1_CMD] = PAD_INFO_PULLCTL(SD1_CMD),
+ [SD1_CLK] = PAD_INFO(SD1_CLK),
+ [SPI0_SCLK] = PAD_INFO_PULLCTL_ST(SPI0_SCLK),
+ [SPI0_SS] = PAD_INFO_ST(SPI0_SS),
+ [SPI0_MISO] = PAD_INFO_ST(SPI0_MISO),
+ [SPI0_MOSI] = PAD_INFO_PULLCTL_ST(SPI0_MOSI),
+ [UART0_RX] = PAD_INFO_PULLCTL_ST(UART0_RX),
+ [UART0_TX] = PAD_INFO_PULLCTL_ST(UART0_TX),
+ [UART2_RX] = PAD_INFO_ST(UART2_RX),
+ [UART2_TX] = PAD_INFO(UART2_TX),
+ [UART2_RTSB] = PAD_INFO_ST(UART2_RTSB),
+ [UART2_CTSB] = PAD_INFO_ST(UART2_CTSB),
+ [UART3_RX] = PAD_INFO_ST(UART3_RX),
+ [UART3_TX] = PAD_INFO(UART3_TX),
+ [UART3_RTSB] = PAD_INFO_ST(UART3_RTSB),
+ [UART3_CTSB] = PAD_INFO_ST(UART3_CTSB),
+ [UART4_RX] = PAD_INFO_PULLCTL_ST(UART4_RX),
+ [UART4_TX] = PAD_INFO_PULLCTL_ST(UART4_TX),
+ [I2C0_SCLK] = PAD_INFO_PULLCTL_ST(I2C0_SCLK),
+ [I2C0_SDATA] = PAD_INFO_PULLCTL_ST(I2C0_SDATA),
+ [I2C1_SCLK] = PAD_INFO_PULLCTL_ST(I2C1_SCLK),
+ [I2C1_SDATA] = PAD_INFO_PULLCTL_ST(I2C1_SDATA),
+ [I2C2_SCLK] = PAD_INFO_PULLCTL_ST(I2C2_SCLK),
+ [I2C2_SDATA] = PAD_INFO_PULLCTL_ST(I2C2_SDATA),
+ [CSI0_DN0] = PAD_INFO(CSI0_DN0),
+ [CSI0_DP0] = PAD_INFO(CSI0_DP0),
+ [CSI0_DN1] = PAD_INFO(CSI0_DN1),
+ [CSI0_DP1] = PAD_INFO(CSI0_DP1),
+ [CSI0_CN] = PAD_INFO(CSI0_CN),
+ [CSI0_CP] = PAD_INFO(CSI0_CP),
+ [CSI0_DN2] = PAD_INFO(CSI0_DN2),
+ [CSI0_DP2] = PAD_INFO(CSI0_DP2),
+ [CSI0_DN3] = PAD_INFO(CSI0_DN3),
+ [CSI0_DP3] = PAD_INFO(CSI0_DP3),
+ [DSI_DP3] = PAD_INFO(DSI_DP3),
+ [DSI_DN3] = PAD_INFO(DSI_DN3),
+ [DSI_DP1] = PAD_INFO(DSI_DP1),
+ [DSI_DN1] = PAD_INFO(DSI_DN1),
+ [DSI_CP] = PAD_INFO(DSI_CP),
+ [DSI_CN] = PAD_INFO(DSI_CN),
+ [DSI_DP0] = PAD_INFO(DSI_DP0),
+ [DSI_DN0] = PAD_INFO(DSI_DN0),
+ [DSI_DP2] = PAD_INFO(DSI_DP2),
+ [DSI_DN2] = PAD_INFO(DSI_DN2),
+ [SENSOR0_PCLK] = PAD_INFO(SENSOR0_PCLK),
+ [CSI1_DN0] = PAD_INFO(CSI1_DN0),
+ [CSI1_DP0] = PAD_INFO(CSI1_DP0),
+ [CSI1_DN1] = PAD_INFO(CSI1_DN1),
+ [CSI1_DP1] = PAD_INFO(CSI1_DP1),
+ [CSI1_CN] = PAD_INFO(CSI1_CN),
+ [CSI1_CP] = PAD_INFO(CSI1_CP),
+ [SENSOR0_CKOUT] = PAD_INFO(SENSOR0_CKOUT),
+ [NAND0_D0] = PAD_INFO_PULLCTL(NAND0_D0),
+ [NAND0_D1] = PAD_INFO_PULLCTL(NAND0_D1),
+ [NAND0_D2] = PAD_INFO_PULLCTL(NAND0_D2),
+ [NAND0_D3] = PAD_INFO_PULLCTL(NAND0_D3),
+ [NAND0_D4] = PAD_INFO_PULLCTL(NAND0_D4),
+ [NAND0_D5] = PAD_INFO_PULLCTL(NAND0_D5),
+ [NAND0_D6] = PAD_INFO_PULLCTL(NAND0_D6),
+ [NAND0_D7] = PAD_INFO_PULLCTL(NAND0_D7),
+ [NAND0_DQS] = PAD_INFO_PULLCTL(NAND0_DQS),
+ [NAND0_DQSN] = PAD_INFO_PULLCTL(NAND0_DQSN),
+ [NAND0_ALE] = PAD_INFO(NAND0_ALE),
+ [NAND0_CLE] = PAD_INFO(NAND0_CLE),
+ [NAND0_CEB0] = PAD_INFO(NAND0_CEB0),
+ [NAND0_CEB1] = PAD_INFO(NAND0_CEB1),
+ [NAND0_CEB2] = PAD_INFO(NAND0_CEB2),
+ [NAND0_CEB3] = PAD_INFO(NAND0_CEB3),
+ [NAND1_D0] = PAD_INFO_PULLCTL(NAND1_D0),
+ [NAND1_D1] = PAD_INFO_PULLCTL(NAND1_D1),
+ [NAND1_D2] = PAD_INFO_PULLCTL(NAND1_D2),
+ [NAND1_D3] = PAD_INFO_PULLCTL(NAND1_D3),
+ [NAND1_D4] = PAD_INFO_PULLCTL(NAND1_D4),
+ [NAND1_D5] = PAD_INFO_PULLCTL(NAND1_D5),
+ [NAND1_D6] = PAD_INFO_PULLCTL(NAND1_D6),
+ [NAND1_D7] = PAD_INFO_PULLCTL(NAND1_D7),
+ [NAND1_DQS] = PAD_INFO_PULLCTL(NAND1_DQS),
+ [NAND1_DQSN] = PAD_INFO_PULLCTL(NAND1_DQSN),
+ [NAND1_ALE] = PAD_INFO(NAND1_ALE),
+ [NAND1_CLE] = PAD_INFO(NAND1_CLE),
+ [NAND1_CEB0] = PAD_INFO(NAND1_CEB0),
+ [NAND1_CEB1] = PAD_INFO(NAND1_CEB1),
+ [NAND1_CEB2] = PAD_INFO(NAND1_CEB2),
+ [NAND1_CEB3] = PAD_INFO(NAND1_CEB3),
+ [SGPIO0] = PAD_INFO(SGPIO0),
+ [SGPIO1] = PAD_INFO(SGPIO1),
+ [SGPIO2] = PAD_INFO_PULLCTL_ST(SGPIO2),
+ [SGPIO3] = PAD_INFO_PULLCTL_ST(SGPIO3)
+};
+
+#define OWL_GPIO_PORT(port, base, count, _outen, _inen, _dat) \
+ [OWL_GPIO_PORT_##port] = { \
+ .offset = base, \
+ .pins = count, \
+ .outen = _outen, \
+ .inen = _inen, \
+ .dat = _dat, \
+ }
+
+static const struct owl_gpio_port s900_gpio_ports[] = {
+ OWL_GPIO_PORT(A, 0x0000, 32, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(B, 0x000C, 32, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(C, 0x0018, 12, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(D, 0x0024, 30, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(E, 0x0030, 32, 0x0, 0x4, 0x8),
+ OWL_GPIO_PORT(F, 0x00F0, 8, 0x0, 0x4, 0x8)
+};
+
+static struct owl_pinctrl_soc_data s900_pinctrl_data = {
+ .padinfo = s900_padinfo,
+ .pins = (const struct pinctrl_pin_desc *)s900_pads,
+ .npins = ARRAY_SIZE(s900_pads),
+ .functions = s900_functions,
+ .nfunctions = ARRAY_SIZE(s900_functions),
+ .groups = s900_groups,
+ .ngroups = ARRAY_SIZE(s900_groups),
+ .ngpios = NUM_GPIOS,
+ .ports = s900_gpio_ports,
+ .nports = ARRAY_SIZE(s900_gpio_ports)
+};
+
+static int s900_pinctrl_probe(struct platform_device *pdev)
+{
+ return owl_pinctrl_probe(pdev, &s900_pinctrl_data);
+}
+
+static const struct of_device_id s900_pinctrl_of_match[] = {
+ { .compatible = "actions,s900-pinctrl", },
+ { }
+};
+
+static struct platform_driver s900_pinctrl_driver = {
+ .driver = {
+ .name = "pinctrl-s900",
+ .of_match_table = of_match_ptr(s900_pinctrl_of_match),
+ },
+ .probe = s900_pinctrl_probe,
+};
+
+static int __init s900_pinctrl_init(void)
+{
+ return platform_driver_register(&s900_pinctrl_driver);
+}
+arch_initcall(s900_pinctrl_init);
+
+static void __exit s900_pinctrl_exit(void)
+{
+ platform_driver_unregister(&s900_pinctrl_driver);
+}
+module_exit(s900_pinctrl_exit);
+
+MODULE_AUTHOR("Actions Semi Inc.");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Actions Semi S900 SoC Pinctrl Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index e8c4e4f934a6d..0f38d51f47c64 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -20,6 +20,7 @@ config PINCTRL_BCM2835
bool
select PINMUX
select PINCONF
+ select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
config PINCTRL_IPROC_GPIO
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 785c366fd6d60..136ccaf53df8d 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -36,11 +36,13 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <dt-bindings/pinctrl/bcm2835.h>
#define MODULE_NAME "pinctrl-bcm2835"
#define BCM2835_NUM_GPIOS 54
@@ -72,13 +74,9 @@
enum bcm2835_pinconf_param {
/* argument: bcm2835_pinconf_pull */
- BCM2835_PINCONF_PARAM_PULL,
+ BCM2835_PINCONF_PARAM_PULL = (PIN_CONFIG_END + 1),
};
-#define BCM2835_PINCONF_PACK(_param_, _arg_) ((_param_) << 16 | (_arg_))
-#define BCM2835_PINCONF_UNPACK_PARAM(_conf_) ((_conf_) >> 16)
-#define BCM2835_PINCONF_UNPACK_ARG(_conf_) ((_conf_) & 0xffff)
-
struct bcm2835_pinctrl {
struct device *dev;
void __iomem *base;
@@ -213,14 +211,6 @@ static const char * const bcm2835_gpio_groups[] = {
};
enum bcm2835_fsel {
- BCM2835_FSEL_GPIO_IN = 0,
- BCM2835_FSEL_GPIO_OUT = 1,
- BCM2835_FSEL_ALT0 = 4,
- BCM2835_FSEL_ALT1 = 5,
- BCM2835_FSEL_ALT2 = 6,
- BCM2835_FSEL_ALT3 = 7,
- BCM2835_FSEL_ALT4 = 3,
- BCM2835_FSEL_ALT5 = 2,
BCM2835_FSEL_COUNT = 8,
BCM2835_FSEL_MASK = 0x7,
};
@@ -714,7 +704,7 @@ static int bcm2835_pctl_dt_node_to_map_pull(struct bcm2835_pinctrl *pc,
configs = kzalloc(sizeof(*configs), GFP_KERNEL);
if (!configs)
return -ENOMEM;
- configs[0] = BCM2835_PINCONF_PACK(BCM2835_PINCONF_PARAM_PULL, pull);
+ configs[0] = pinconf_to_config_packed(BCM2835_PINCONF_PARAM_PULL, pull);
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
map->data.configs.group_or_pin = bcm2835_gpio_pins[pin].name;
@@ -727,7 +717,7 @@ static int bcm2835_pctl_dt_node_to_map_pull(struct bcm2835_pinctrl *pc,
static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
- struct pinctrl_map **map, unsigned *num_maps)
+ struct pinctrl_map **map, unsigned int *num_maps)
{
struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
struct property *pins, *funcs, *pulls;
@@ -736,6 +726,12 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
int i, err;
u32 pin, func, pull;
+ /* Check for generic binding in this node */
+ err = pinconf_generic_dt_node_to_map_all(pctldev, np, map, num_maps);
+ if (err || *num_maps)
+ return err;
+
+ /* Generic binding did not find anything continue with legacy parse */
pins = of_find_property(np, "brcm,pins", NULL);
if (!pins) {
dev_err(pc->dev, "%pOF: missing brcm,pins property\n", np);
@@ -917,37 +913,67 @@ static int bcm2835_pinconf_get(struct pinctrl_dev *pctldev,
return -ENOTSUPP;
}
+static void bcm2835_pull_config_set(struct bcm2835_pinctrl *pc,
+ unsigned int pin, unsigned int arg)
+{
+ u32 off, bit;
+
+ off = GPIO_REG_OFFSET(pin);
+ bit = GPIO_REG_SHIFT(pin);
+
+ bcm2835_gpio_wr(pc, GPPUD, arg & 3);
+ /*
+ * BCM2835 datasheet say to wait 150 cycles, but not of what.
+ * But the VideoCore firmware delay for this operation
+ * based nearly on the same amount of VPU cycles and this clock
+ * runs at 250 MHz.
+ */
+ udelay(1);
+ bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit));
+ udelay(1);
+ bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0);
+}
+
static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin, unsigned long *configs,
- unsigned num_configs)
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
{
struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- enum bcm2835_pinconf_param param;
- u16 arg;
- u32 off, bit;
+ u32 param, arg;
int i;
for (i = 0; i < num_configs; i++) {
- param = BCM2835_PINCONF_UNPACK_PARAM(configs[i]);
- arg = BCM2835_PINCONF_UNPACK_ARG(configs[i]);
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
- if (param != BCM2835_PINCONF_PARAM_PULL)
- return -EINVAL;
+ switch (param) {
+ /* Set legacy brcm,pull */
+ case BCM2835_PINCONF_PARAM_PULL:
+ bcm2835_pull_config_set(pc, pin, arg);
+ break;
- off = GPIO_REG_OFFSET(pin);
- bit = GPIO_REG_SHIFT(pin);
+ /* Set pull generic bindings */
+ case PIN_CONFIG_BIAS_DISABLE:
+ bcm2835_pull_config_set(pc, pin, BCM2835_PUD_OFF);
+ break;
- bcm2835_gpio_wr(pc, GPPUD, arg & 3);
- /*
- * BCM2835 datasheet say to wait 150 cycles, but not of what.
- * But the VideoCore firmware delay for this operation
- * based nearly on the same amount of VPU cycles and this clock
- * runs at 250 MHz.
- */
- udelay(1);
- bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit));
- udelay(1);
- bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0);
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ bcm2835_pull_config_set(pc, pin, BCM2835_PUD_DOWN);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ bcm2835_pull_config_set(pc, pin, BCM2835_PUD_UP);
+ break;
+
+ /* Set output-high or output-low */
+ case PIN_CONFIG_OUTPUT:
+ bcm2835_gpio_set_bit(pc, arg ? GPSET0 : GPCLR0, pin);
+ break;
+
+ default:
+ return -EINVAL;
+
+ } /* switch param type */
} /* for each config */
return 0;
diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c
index bf2e17d0d6e41..acbd413340e8b 100644
--- a/drivers/pinctrl/berlin/berlin-bg2.c
+++ b/drivers/pinctrl/berlin/berlin-bg2.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin BG2 pinctrl driver.
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c
index 9bee7bd1650fa..c0f5d86d5d01d 100644
--- a/drivers/pinctrl/berlin/berlin-bg2cd.c
+++ b/drivers/pinctrl/berlin/berlin-bg2cd.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin BG2CD pinctrl driver.
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c
index eee6763f114c0..20a3216ede07a 100644
--- a/drivers/pinctrl/berlin/berlin-bg2q.c
+++ b/drivers/pinctrl/berlin/berlin-bg2q.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin BG2Q pinctrl driver
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index e6740656ee7c4..6a7fe929a68bd 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell berlin4ct pinctrl driver
*
* Copyright (C) 2015 Marvell Technology Group Ltd.
*
* Author: Jisheng Zhang <jszhang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index cc3bd2efafe3f..a620a8e8fa787 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Berlin SoC pinctrl core driver
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/io.h>
diff --git a/drivers/pinctrl/berlin/berlin.h b/drivers/pinctrl/berlin/berlin.h
index e9b30f95b03e3..d7787754d1edf 100644
--- a/drivers/pinctrl/berlin/berlin.h
+++ b/drivers/pinctrl/berlin/berlin.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Marvell Berlin SoC pinctrl driver.
*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Ténart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __PINCTRL_BERLIN_H
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 24aaddd760a08..e582a21cfe549 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -1,16 +1,11 @@
-/*
- * Core driver for the imx pin controller
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro Ltd.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Core driver for the imx pin controller
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro Ltd.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
@@ -371,7 +366,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
unsigned long config;
if (!pin_reg || pin_reg->conf_reg == -1) {
- seq_printf(s, "N/A");
+ seq_puts(s, "N/A");
return;
}
@@ -390,7 +385,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
if (group > pctldev->num_groups)
return;
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
grp = pinctrl_generic_get_group(pctldev, group);
if (!grp)
return;
@@ -414,11 +409,18 @@ static const struct pinconf_ops imx_pinconf_ops = {
};
/*
- * Each pin represented in fsl,pins consists of 5 u32 PIN_FUNC_ID and
- * 1 u32 CONFIG, so 24 types in total for each pin.
+ * Each pin represented in fsl,pins consists of a number of u32 PIN_FUNC_ID
+ * and 1 u32 CONFIG, the total size is PIN_FUNC_ID + CONFIG for each pin.
+ * For generic_pinconf case, there's no extra u32 CONFIG.
+ *
+ * PIN_FUNC_ID format:
+ * Default:
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ * SHARE_MUX_CONF_REG:
+ * <mux_conf_reg input_reg mux_mode input_val>
*/
#define FSL_PIN_SIZE 24
-#define SHARE_FSL_PIN_SIZE 20
+#define FSL_PIN_SHARE_SIZE 20
static int imx_pinctrl_parse_groups(struct device_node *np,
struct group_desc *grp,
@@ -434,7 +436,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
dev_dbg(ipctl->dev, "group(%d): %s\n", index, np->name);
if (info->flags & SHARE_MUX_CONF_REG)
- pin_size = SHARE_FSL_PIN_SIZE;
+ pin_size = FSL_PIN_SHARE_SIZE;
else
pin_size = FSL_PIN_SIZE;
@@ -617,7 +619,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
nfuncs = 1;
} else {
nfuncs = of_get_child_count(np);
- if (nfuncs <= 0) {
+ if (nfuncs == 0) {
dev_err(&pdev->dev, "no functions defined\n");
return -EINVAL;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 038e8c0e5b969..4b8225ccb03a4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* IMX pinmux core definitions
*
@@ -5,11 +6,6 @@
* Copyright (C) 2012 Linaro Ltd.
*
* Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DRIVERS_PINCTRL_IMX_H
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index a4e9f430d4526..5af89de0ff02c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -1,19 +1,14 @@
-/*
- * Core driver for the imx pin controller in imx1/21/27
- *
- * Copyright (C) 2013 Pengutronix
- * Author: Markus Pargmann <mpa@pengutronix.de>
- *
- * Based on pinctrl-imx.c:
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Core driver for the imx pin controller in imx1/21/27
+//
+// Copyright (C) 2013 Pengutronix
+// Author: Markus Pargmann <mpa@pengutronix.de>
+//
+// Based on pinctrl-imx.c:
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro Ltd.
#include <linux/bitops.h>
#include <linux/err.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index fc8efc7487342..faf770f13bc73 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
@@ -1,13 +1,8 @@
-/*
- * i.MX1 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// i.MX1 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.h b/drivers/pinctrl/freescale/pinctrl-imx1.h
index 174074308d6cf..f1b9dabf76014 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* IMX pinmux core definitions
*
@@ -5,11 +6,6 @@
* Copyright (C) 2012 Linaro Ltd.
*
* Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DRIVERS_PINCTRL_IMX1_H
diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c
index 73e26bc12f097..8a102275a0537 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx21.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx21.c
@@ -1,13 +1,8 @@
-/*
- * i.MX21 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// i.MX21 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
#include <linux/init.h>
#include <linux/of.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx23.c b/drivers/pinctrl/freescale/pinctrl-imx23.c
index c9405685971b0..144020764a4b0 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx23.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx23.c
@@ -1,16 +1,9 @@
-/*
- * Freescale i.MX23 pinctrl driver
- *
- * Author: Shawn Guo <shawn.guo@linaro.org>
- * Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Freescale i.MX23 pinctrl driver
+//
+// Author: Shawn Guo <shawn.guo@linaro.org>
+// Copyright 2012 Freescale Semiconductor, Inc.
#include <linux/init.h>
#include <linux/of_device.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index db6d9d1382f97..a899a398b6bb5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -1,19 +1,15 @@
-/*
- * imx25 pinctrl driver.
- *
- * Copyright 2013 Eukréa Electromatique <denis@eukrea.com>
- *
- * This driver was mostly copied from the imx51 pinctrl driver which has:
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Denis Carikli <denis@eukrea.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// imx25 pinctrl driver.
+//
+// Copyright 2013 Eukréa Electromatique <denis@eukrea.com>
+//
+// This driver was mostly copied from the imx51 pinctrl driver which has:
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Denis Carikli <denis@eukrea.com>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index e5992036fc6c0..b4dfc1676cbc6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -1,15 +1,10 @@
-/*
- * imx27 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2013 Pengutronix
- *
- * Author: Markus Pargmann <mpa@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx27 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2013 Pengutronix
+//
+// Author: Markus Pargmann <mpa@pengutronix.de>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx28.c b/drivers/pinctrl/freescale/pinctrl-imx28.c
index 87deb9ec938a7..13730dd193f16 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx28.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx28.c
@@ -1,16 +1,9 @@
-/*
- * Freescale i.MX28 pinctrl driver
- *
- * Author: Shawn Guo <shawn.guo@linaro.org>
- * Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Freescale i.MX28 pinctrl driver
+//
+// Author: Shawn Guo <shawn.guo@linaro.org>
+// Copyright 2012 Freescale Semiconductor, Inc.
#include <linux/init.h>
#include <linux/of_device.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 6927946ae4b59..871bb419e2f0c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -1,17 +1,13 @@
-/*
- * imx35 pinctrl driver.
- *
- * This driver was mostly copied from the imx51 pinctrl driver which has:
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// imx35 pinctrl driver.
+//
+// This driver was mostly copied from the imx51 pinctrl driver which has:
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index eb349b97290f9..cf182c040e0b2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -1,15 +1,10 @@
-/*
- * imx50 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2013 Greg Ungerer <gerg@uclinux.org>
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx50 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2013 Greg Ungerer <gerg@uclinux.org>
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index 49acd991b5fb3..e5c261e2bf1eb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -1,16 +1,11 @@
-/*
- * imx51 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx51 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index 6dd0c60eaea4d..64c97aaf20c74 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -1,16 +1,11 @@
-/*
- * imx53 pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx53 pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 91b85fc01de8b..0858b4d79ed2c 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx6dl pinctrl driver
- *
- * Author: Shawn Guo <shawn.guo@linaro.org>
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx6dl pinctrl driver
+//
+// Author: Shawn Guo <shawn.guo@linaro.org>
+// Copyright (C) 2013 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 5f653d69d0f51..078ed6a331fd8 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -1,16 +1,11 @@
-/*
- * imx6q pinctrl driver based on imx pinmux core
- *
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- * Copyright (C) 2012 Linaro, Inc.
- *
- * Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// imx6q pinctrl driver based on imx pinmux core
+//
+// Copyright (C) 2012 Freescale Semiconductor, Inc.
+// Copyright (C) 2012 Linaro, Inc.
+//
+// Author: Dong Aisheng <dong.aisheng@linaro.org>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index 1167dc273c045..9d2e6f987aa7f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx6sl pinctrl driver
- *
- * Author: Shawn Guo <shawn.guo@linaro.org>
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx6sl pinctrl driver
+//
+// Author: Shawn Guo <shawn.guo@linaro.org>
+// Copyright (C) 2013 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sll.c b/drivers/pinctrl/freescale/pinctrl-imx6sll.c
index 0fbea9cf536da..0618f4d887fd9 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sll.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sll.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2017-2018 NXP.
- *
- */
+//
+// Copyright (C) 2016 Freescale Semiconductor, Inc.
+// Copyright 2017-2018 NXP.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 15ea56c75f687..c7e2b1f94f01d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx6sx pinctrl driver
- *
- * Author: Anson Huang <Anson.Huang@freescale.com>
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx6sx pinctrl driver
+//
+// Author: Anson Huang <Anson.Huang@freescale.com>
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index 4580717ade19f..7e37627c63f5f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx6ul pinctrl driver
- *
- * Author: Anson Huang <Anson.Huang@freescale.com>
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx6ul pinctrl driver
+//
+// Author: Anson Huang <Anson.Huang@freescale.com>
+// Copyright (C) 2015 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 0b0a2f33b06a9..369d3e59fdd69 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -1,13 +1,9 @@
-/*
- * Freescale imx7d pinctrl driver
- *
- * Author: Anson Huang <Anson.Huang@freescale.com>
- * Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Freescale imx7d pinctrl driver
+//
+// Author: Anson Huang <Anson.Huang@freescale.com>
+// Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7ulp.c b/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
index f363e45fd2465..f521bdb53f62d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7ulp.c
@@ -1,14 +1,9 @@
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright (C) 2017 NXP
- *
- * Author: Dong Aisheng <aisheng.dong@nxp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2016 Freescale Semiconductor, Inc.
+// Copyright (C) 2017 NXP
+//
+// Author: Dong Aisheng <aisheng.dong@nxp.com>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 6852010a6d708..594f3e5ce9a9e 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -1,13 +1,6 @@
-/*
- * Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2012 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.h b/drivers/pinctrl/freescale/pinctrl-mxs.h
index 34dbf75208dc7..ab9f834b03e6b 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.h
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.h
@@ -1,12 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef __PINCTRL_MXS_H
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index c078f859ae15e..37602b053ed28 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -1,13 +1,8 @@
-/*
- * VF610 pinctrl driver based on imx pinmux and pinconf core
- *
- * Copyright 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// VF610 pinctrl driver based on imx pinmux and pinconf core
+//
+// Copyright 2013 Freescale Semiconductor, Inc.
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index fee9225ca559e..0f1019ae39930 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1527,6 +1527,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
},
{
@@ -1534,6 +1535,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
},
{
@@ -1541,6 +1543,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
},
{
@@ -1548,6 +1551,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
},
},
{}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 862c5dbc69774..9905dc672f6be 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -1,12 +1,18 @@
menu "MediaTek pinctrl drivers"
depends on ARCH_MEDIATEK || COMPILE_TEST
+config EINT_MTK
+ bool "MediaTek External Interrupt Support"
+ depends on PINCTRL_MTK || PINCTRL_MT7622 || COMPILE_TEST
+ select IRQ_DOMAIN
+
config PINCTRL_MTK
bool
depends on OF
select PINMUX
select GENERIC_PINCONF
select GPIOLIB
+ select EINT_MTK
select OF_GPIO
# For ARMv7 SoCs
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 7959e773533f4..3de7156df3454 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Core
+obj-$(CONFIG_EINT_MTK) += mtk-eint.o
obj-$(CONFIG_PINCTRL_MTK) += pinctrl-mtk-common.o
# SoC Drivers
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
new file mode 100644
index 0000000000000..30f3316747e2b
--- /dev/null
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 MediaTek Inc.
+
+/*
+ * Library for MediaTek External Interrupt Support
+ *
+ * Author: Maoguang Meng <maoguang.meng@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtk-eint.h"
+
+#define MTK_EINT_EDGE_SENSITIVE 0
+#define MTK_EINT_LEVEL_SENSITIVE 1
+#define MTK_EINT_DBNC_SET_DBNC_BITS 4
+#define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
+#define MTK_EINT_DBNC_SET_EN (0x1 << 0)
+
+static const struct mtk_eint_regs mtk_generic_eint_regs = {
+ .stat = 0x000,
+ .ack = 0x040,
+ .mask = 0x080,
+ .mask_set = 0x0c0,
+ .mask_clr = 0x100,
+ .sens = 0x140,
+ .sens_set = 0x180,
+ .sens_clr = 0x1c0,
+ .soft = 0x200,
+ .soft_set = 0x240,
+ .soft_clr = 0x280,
+ .pol = 0x300,
+ .pol_set = 0x340,
+ .pol_clr = 0x380,
+ .dom_en = 0x400,
+ .dbnc_ctrl = 0x500,
+ .dbnc_set = 0x600,
+ .dbnc_clr = 0x700,
+};
+
+static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
+ unsigned int eint_num,
+ unsigned int offset)
+{
+ unsigned int eint_base = 0;
+ void __iomem *reg;
+
+ if (eint_num >= eint->hw->ap_num)
+ eint_base = eint->hw->ap_num;
+
+ reg = eint->base + offset + ((eint_num - eint_base) / 32) * 4;
+
+ return reg;
+}
+
+static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
+ unsigned int eint_num)
+{
+ unsigned int sens;
+ unsigned int bit = BIT(eint_num % 32);
+ void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
+ eint->regs->sens);
+
+ if (readl(reg) & bit)
+ sens = MTK_EINT_LEVEL_SENSITIVE;
+ else
+ sens = MTK_EINT_EDGE_SENSITIVE;
+
+ if (eint_num < eint->hw->db_cnt && sens != MTK_EINT_EDGE_SENSITIVE)
+ return 1;
+ else
+ return 0;
+}
+
+static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
+{
+ int start_level, curr_level;
+ unsigned int reg_offset;
+ u32 mask = BIT(hwirq & 0x1f);
+ u32 port = (hwirq >> 5) & eint->hw->port_mask;
+ void __iomem *reg = eint->base + (port << 2);
+
+ curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
+
+ do {
+ start_level = curr_level;
+ if (start_level)
+ reg_offset = eint->regs->pol_clr;
+ else
+ reg_offset = eint->regs->pol_set;
+ writel(mask, reg + reg_offset);
+
+ curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl,
+ hwirq);
+ } while (start_level != curr_level);
+
+ return start_level;
+}
+
+static void mtk_eint_mask(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
+ eint->regs->mask_set);
+
+ writel(mask, reg);
+}
+
+static void mtk_eint_unmask(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
+ eint->regs->mask_clr);
+
+ writel(mask, reg);
+
+ if (eint->dual_edge[d->hwirq])
+ mtk_eint_flip_edge(eint, d->hwirq);
+}
+
+static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
+ unsigned int eint_num)
+{
+ unsigned int bit = BIT(eint_num % 32);
+ void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
+ eint->regs->mask);
+
+ return !!(readl(reg) & bit);
+}
+
+static void mtk_eint_ack(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
+ eint->regs->ack);
+
+ writel(mask, reg);
+}
+
+static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq & 0x1f);
+ void __iomem *reg;
+
+ if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
+ ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
+ dev_err(eint->dev,
+ "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
+ d->irq, d->hwirq, type);
+ return -EINVAL;
+ }
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ eint->dual_edge[d->hwirq] = 1;
+ else
+ eint->dual_edge[d->hwirq] = 0;
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
+ reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
+ writel(mask, reg);
+ } else {
+ reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_set);
+ writel(mask, reg);
+ }
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
+ reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_clr);
+ writel(mask, reg);
+ } else {
+ reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_set);
+ writel(mask, reg);
+ }
+
+ if (eint->dual_edge[d->hwirq])
+ mtk_eint_flip_edge(eint, d->hwirq);
+
+ return 0;
+}
+
+static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ int shift = d->hwirq & 0x1f;
+ int reg = d->hwirq >> 5;
+
+ if (on)
+ eint->wake_mask[reg] |= BIT(shift);
+ else
+ eint->wake_mask[reg] &= ~BIT(shift);
+
+ return 0;
+}
+
+static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
+ void __iomem *base, u32 *buf)
+{
+ int port;
+ void __iomem *reg;
+
+ for (port = 0; port < eint->hw->ports; port++) {
+ reg = base + (port << 2);
+ writel_relaxed(~buf[port], reg + eint->regs->mask_set);
+ writel_relaxed(buf[port], reg + eint->regs->mask_clr);
+ }
+}
+
+static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
+ void __iomem *base, u32 *buf)
+{
+ int port;
+ void __iomem *reg;
+
+ for (port = 0; port < eint->hw->ports; port++) {
+ reg = base + eint->regs->mask + (port << 2);
+ buf[port] = ~readl_relaxed(reg);
+ /* Mask is 0 when irq is enabled, and 1 when disabled. */
+ }
+}
+
+static int mtk_eint_irq_request_resources(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gpio_c;
+ unsigned int gpio_n;
+ int err;
+
+ err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq,
+ &gpio_n, &gpio_c);
+ if (err < 0) {
+ dev_err(eint->dev, "Can not find pin\n");
+ return err;
+ }
+
+ err = gpiochip_lock_as_irq(gpio_c, gpio_n);
+ if (err < 0) {
+ dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ return err;
+ }
+
+ err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq);
+ if (err < 0) {
+ dev_err(eint->dev, "Can not eint mode\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void mtk_eint_irq_release_resources(struct irq_data *d)
+{
+ struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gpio_c;
+ unsigned int gpio_n;
+
+ eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n,
+ &gpio_c);
+
+ gpiochip_unlock_as_irq(gpio_c, gpio_n);
+}
+
+static struct irq_chip mtk_eint_irq_chip = {
+ .name = "mt-eint",
+ .irq_disable = mtk_eint_mask,
+ .irq_mask = mtk_eint_mask,
+ .irq_unmask = mtk_eint_unmask,
+ .irq_ack = mtk_eint_ack,
+ .irq_set_type = mtk_eint_set_type,
+ .irq_set_wake = mtk_eint_irq_set_wake,
+ .irq_request_resources = mtk_eint_irq_request_resources,
+ .irq_release_resources = mtk_eint_irq_release_resources,
+};
+
+static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
+{
+ void __iomem *reg = eint->base + eint->regs->dom_en;
+ unsigned int i;
+
+ for (i = 0; i < eint->hw->ap_num; i += 32) {
+ writel(0xffffffff, reg);
+ reg += 4;
+ }
+
+ return 0;
+}
+
+static inline void
+mtk_eint_debounce_process(struct mtk_eint *eint, int index)
+{
+ unsigned int rst, ctrl_offset;
+ unsigned int bit, dbnc;
+
+ ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_ctrl;
+ dbnc = readl(eint->base + ctrl_offset);
+ bit = MTK_EINT_DBNC_SET_EN << ((index % 4) * 8);
+ if ((bit & dbnc) > 0) {
+ ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_set;
+ rst = MTK_EINT_DBNC_RST_BIT << ((index % 4) * 8);
+ writel(rst, eint->base + ctrl_offset);
+ }
+}
+
+static void mtk_eint_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mtk_eint *eint = irq_desc_get_handler_data(desc);
+ unsigned int status, eint_num;
+ int offset, index, virq;
+ void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
+ int dual_edge, start_level, curr_level;
+
+ chained_irq_enter(chip, desc);
+ for (eint_num = 0; eint_num < eint->hw->ap_num; eint_num += 32,
+ reg += 4) {
+ status = readl(reg);
+ while (status) {
+ offset = __ffs(status);
+ index = eint_num + offset;
+ virq = irq_find_mapping(eint->domain, index);
+ status &= ~BIT(offset);
+
+ dual_edge = eint->dual_edge[index];
+ if (dual_edge) {
+ /*
+ * Clear soft-irq in case we raised it last
+ * time.
+ */
+ writel(BIT(offset), reg - eint->regs->stat +
+ eint->regs->soft_clr);
+
+ start_level =
+ eint->gpio_xlate->get_gpio_state(eint->pctl,
+ index);
+ }
+
+ generic_handle_irq(virq);
+
+ if (dual_edge) {
+ curr_level = mtk_eint_flip_edge(eint, index);
+
+ /*
+ * If level changed, we might lost one edge
+ * interrupt, raised it through soft-irq.
+ */
+ if (start_level != curr_level)
+ writel(BIT(offset), reg -
+ eint->regs->stat +
+ eint->regs->soft_set);
+ }
+
+ if (index < eint->hw->db_cnt)
+ mtk_eint_debounce_process(eint, index);
+ }
+ }
+ chained_irq_exit(chip, desc);
+}
+
+int mtk_eint_do_suspend(struct mtk_eint *eint)
+{
+ mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
+ mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
+
+ return 0;
+}
+
+int mtk_eint_do_resume(struct mtk_eint *eint)
+{
+ mtk_eint_chip_write_mask(eint, eint->base, eint->cur_mask);
+
+ return 0;
+}
+
+int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
+ unsigned int debounce)
+{
+ int virq, eint_offset;
+ unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
+ dbnc;
+ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000,
+ 64000, 128000, 256000};
+ struct irq_data *d;
+
+ virq = irq_find_mapping(eint->domain, eint_num);
+ eint_offset = (eint_num % 4) * 8;
+ d = irq_get_irq_data(virq);
+
+ set_offset = (eint_num / 4) * 4 + eint->regs->dbnc_set;
+ clr_offset = (eint_num / 4) * 4 + eint->regs->dbnc_clr;
+
+ if (!mtk_eint_can_en_debounce(eint, eint_num))
+ return -EINVAL;
+
+ dbnc = ARRAY_SIZE(debounce_time);
+ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
+ if (debounce <= debounce_time[i]) {
+ dbnc = i;
+ break;
+ }
+ }
+
+ if (!mtk_eint_get_mask(eint, eint_num)) {
+ mtk_eint_mask(d);
+ unmask = 1;
+ } else {
+ unmask = 0;
+ }
+
+ clr_bit = 0xff << eint_offset;
+ writel(clr_bit, eint->base + clr_offset);
+
+ bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
+ eint_offset;
+ rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
+ writel(rst | bit, eint->base + set_offset);
+
+ /*
+ * Delay a while (more than 2T) to wait for hw debounce counter reset
+ * work correctly.
+ */
+ udelay(1);
+ if (unmask == 1)
+ mtk_eint_unmask(d);
+
+ return 0;
+}
+
+int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
+{
+ int irq;
+
+ irq = irq_find_mapping(eint->domain, eint_n);
+ if (!irq)
+ return -EINVAL;
+
+ return irq;
+}
+
+int mtk_eint_do_init(struct mtk_eint *eint)
+{
+ int i;
+
+ /* If clients don't assign a specific regs, let's use generic one */
+ if (!eint->regs)
+ eint->regs = &mtk_generic_eint_regs;
+
+ eint->wake_mask = devm_kcalloc(eint->dev, eint->hw->ports,
+ sizeof(*eint->wake_mask), GFP_KERNEL);
+ if (!eint->wake_mask)
+ return -ENOMEM;
+
+ eint->cur_mask = devm_kcalloc(eint->dev, eint->hw->ports,
+ sizeof(*eint->cur_mask), GFP_KERNEL);
+ if (!eint->cur_mask)
+ return -ENOMEM;
+
+ eint->dual_edge = devm_kcalloc(eint->dev, eint->hw->ap_num,
+ sizeof(int), GFP_KERNEL);
+ if (!eint->dual_edge)
+ return -ENOMEM;
+
+ eint->domain = irq_domain_add_linear(eint->dev->of_node,
+ eint->hw->ap_num,
+ &irq_domain_simple_ops, NULL);
+ if (!eint->domain)
+ return -ENOMEM;
+
+ mtk_eint_hw_init(eint);
+ for (i = 0; i < eint->hw->ap_num; i++) {
+ int virq = irq_create_mapping(eint->domain, i);
+
+ irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, eint);
+ }
+
+ irq_set_chained_handler_and_data(eint->irq, mtk_eint_irq_handler,
+ eint);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h
new file mode 100644
index 0000000000000..c286a9b940f26
--- /dev/null
+++ b/drivers/pinctrl/mediatek/mtk-eint.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2018 MediaTek Inc.
+ *
+ * Author: Maoguang Meng <maoguang.meng@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+#ifndef __MTK_EINT_H
+#define __MTK_EINT_H
+
+#include <linux/irqdomain.h>
+
+struct mtk_eint_regs {
+ unsigned int stat;
+ unsigned int ack;
+ unsigned int mask;
+ unsigned int mask_set;
+ unsigned int mask_clr;
+ unsigned int sens;
+ unsigned int sens_set;
+ unsigned int sens_clr;
+ unsigned int soft;
+ unsigned int soft_set;
+ unsigned int soft_clr;
+ unsigned int pol;
+ unsigned int pol_set;
+ unsigned int pol_clr;
+ unsigned int dom_en;
+ unsigned int dbnc_ctrl;
+ unsigned int dbnc_set;
+ unsigned int dbnc_clr;
+};
+
+struct mtk_eint_hw {
+ u8 port_mask;
+ u8 ports;
+ unsigned int ap_num;
+ unsigned int db_cnt;
+};
+
+struct mtk_eint;
+
+struct mtk_eint_xt {
+ int (*get_gpio_n)(void *data, unsigned long eint_n,
+ unsigned int *gpio_n,
+ struct gpio_chip **gpio_chip);
+ int (*get_gpio_state)(void *data, unsigned long eint_n);
+ int (*set_gpio_as_eint)(void *data, unsigned long eint_n);
+};
+
+struct mtk_eint {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *domain;
+ int irq;
+
+ int *dual_edge;
+ u32 *wake_mask;
+ u32 *cur_mask;
+
+ /* Used to fit into various EINT device */
+ const struct mtk_eint_hw *hw;
+ const struct mtk_eint_regs *regs;
+
+ /* Used to fit into various pinctrl device */
+ void *pctl;
+ const struct mtk_eint_xt *gpio_xlate;
+};
+
+#if IS_ENABLED(CONFIG_EINT_MTK)
+int mtk_eint_do_init(struct mtk_eint *eint);
+int mtk_eint_do_suspend(struct mtk_eint *eint);
+int mtk_eint_do_resume(struct mtk_eint *eint);
+int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
+ unsigned int debounce);
+int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n);
+
+#else
+static inline int mtk_eint_do_init(struct mtk_eint *eint)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mtk_eint_do_suspend(struct mtk_eint *eint)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mtk_eint_do_resume(struct mtk_eint *eint)
+{
+ return -EOPNOTSUPP;
+}
+
+int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
+ unsigned int debounce)
+{
+ return -EOPNOTSUPP;
+}
+
+int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif /* __MTK_EINT_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2701.c b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
index f86f3b379607d..e91c314f3b750 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2701.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
@@ -531,31 +531,12 @@ static const struct mtk_pinctrl_devdata mt2701_pinctrl_data = {
.port_shf = 4,
.port_mask = 0x1f,
.port_align = 4,
- .eint_offsets = {
- .name = "mt2701_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 6,
.ports = 6,
+ .ap_num = 169,
+ .db_cnt = 16,
},
- .ap_num = 169,
- .db_cnt = 16,
};
static int mt2701_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2712.c b/drivers/pinctrl/mediatek/pinctrl-mt2712.c
index 81e11f9e70f11..8398d55c01cb4 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2712.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2712.c
@@ -576,31 +576,12 @@ static const struct mtk_pinctrl_devdata mt2712_pinctrl_data = {
.port_shf = 4,
.port_mask = 0xf,
.port_align = 4,
- .eint_offsets = {
- .name = "mt2712_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 0xf,
.ports = 8,
+ .ap_num = 229,
+ .db_cnt = 40,
},
- .ap_num = 229,
- .db_cnt = 40,
};
static int mt2712_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 06e8406c44403..ad6da1184c9f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -30,6 +31,7 @@
#include "../core.h"
#include "../pinconf.h"
#include "../pinmux.h"
+#include "mtk-eint.h"
#define PINCTRL_PINCTRL_DEV KBUILD_MODNAME
#define MTK_RANGE(_a) { .range = (_a), .nranges = ARRAY_SIZE(_a), }
@@ -123,6 +125,8 @@ struct mtk_pin_soc {
unsigned int ngrps;
const struct function_desc *funcs;
unsigned int nfuncs;
+ const struct mtk_eint_regs *eint_regs;
+ const struct mtk_eint_hw *eint_hw;
};
struct mtk_pinctrl {
@@ -131,6 +135,7 @@ struct mtk_pinctrl {
struct device *dev;
struct gpio_chip chip;
const struct mtk_pin_soc *soc;
+ struct mtk_eint *eint;
};
static const struct mtk_pin_field_calc mt7622_pin_mode_range[] = {
@@ -913,6 +918,13 @@ static const struct pin_config_item mtk_conf_items[] = {
};
#endif
+static const struct mtk_eint_hw mt7622_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = ARRAY_SIZE(mt7622_pins),
+ .db_cnt = 20,
+};
+
static const struct mtk_pin_soc mt7622_data = {
.reg_cal = mt7622_reg_cals,
.pins = mt7622_pins,
@@ -921,6 +933,7 @@ static const struct mtk_pin_soc mt7622_data = {
.ngrps = ARRAY_SIZE(mt7622_groups),
.funcs = mt7622_functions,
.nfuncs = ARRAY_SIZE(mt7622_functions),
+ .eint_hw = &mt7622_eint_hw,
};
static void mtk_w32(struct mtk_pinctrl *pctl, u32 reg, u32 val)
@@ -1441,6 +1454,32 @@ static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
return pinctrl_gpio_direction_output(chip->base + gpio);
}
+static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ unsigned long eint_n;
+
+ eint_n = offset;
+
+ return mtk_eint_find_irq(hw->eint, eint_n);
+}
+
+static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ unsigned long eint_n;
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ eint_n = offset;
+
+ return mtk_eint_set_debounce(hw->eint, eint_n, debounce);
+}
+
static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
{
struct gpio_chip *chip = &hw->chip;
@@ -1454,6 +1493,8 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
chip->set = mtk_gpio_set;
+ chip->to_irq = mtk_gpio_to_irq,
+ chip->set_config = mtk_gpio_set_config,
chip->base = -1;
chip->ngpio = hw->soc->npins;
chip->of_node = np;
@@ -1514,6 +1555,103 @@ static int mtk_build_functions(struct mtk_pinctrl *hw)
return 0;
}
+static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
+ unsigned int *gpio_n,
+ struct gpio_chip **gpio_chip)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+
+ *gpio_chip = &hw->chip;
+ *gpio_n = eint_n;
+
+ return 0;
+}
+
+static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+ struct gpio_chip *gpio_chip;
+ unsigned int gpio_n;
+ int err;
+
+ err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
+ if (err)
+ return err;
+
+ return mtk_gpio_get(gpio_chip, gpio_n);
+}
+
+static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+ struct gpio_chip *gpio_chip;
+ unsigned int gpio_n;
+ int err;
+
+ err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_MODE,
+ MTK_GPIO_MODE);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_DIR, MTK_INPUT);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_SMT, MTK_ENABLE);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct mtk_eint_xt mtk_eint_xt = {
+ .get_gpio_n = mtk_xt_get_gpio_n,
+ .get_gpio_state = mtk_xt_get_gpio_state,
+ .set_gpio_as_eint = mtk_xt_set_gpio_as_eint,
+};
+
+static int
+mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+
+ if (!IS_ENABLED(CONFIG_EINT_MTK))
+ return 0;
+
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ return -ENODEV;
+
+ hw->eint = devm_kzalloc(hw->dev, sizeof(*hw->eint), GFP_KERNEL);
+ if (!hw->eint)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eint");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get eint resource\n");
+ return -ENODEV;
+ }
+
+ hw->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->eint->base))
+ return PTR_ERR(hw->eint->base);
+
+ hw->eint->irq = irq_of_parse_and_map(np, 0);
+ if (!hw->eint->irq)
+ return -EINVAL;
+
+ hw->eint->dev = &pdev->dev;
+ hw->eint->hw = hw->soc->eint_hw;
+ hw->eint->pctl = hw;
+ hw->eint->gpio_xlate = &mtk_eint_xt;
+
+ return mtk_eint_do_init(hw->eint);
+}
+
static const struct of_device_id mtk_pinctrl_of_match[] = {
{ .compatible = "mediatek,mt7622-pinctrl", .data = &mt7622_data},
{ }
@@ -1577,6 +1715,11 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
return err;
}
+ err = mtk_build_eint(hw, pdev);
+ if (err)
+ dev_warn(&pdev->dev,
+ "Failed to add EINT, but pinctrl still can work\n");
+
platform_set_drvdata(pdev, hw);
return 0;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8127.c b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
index d76491574841f..2e4cc9257e00c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8127.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
@@ -300,31 +300,12 @@ static const struct mtk_pinctrl_devdata mt8127_pinctrl_data = {
.port_shf = 4,
.port_mask = 0xf,
.port_align = 4,
- .eint_offsets = {
- .name = "mt8127_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 7,
.ports = 6,
+ .ap_num = 143,
+ .db_cnt = 16,
},
- .ap_num = 143,
- .db_cnt = 16,
};
static int mt8127_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8135.c b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
index d8c645f16f21e..7f5edfaffdc57 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8135.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
@@ -313,31 +313,12 @@ static const struct mtk_pinctrl_devdata mt8135_pinctrl_data = {
.port_shf = 4,
.port_mask = 0xf,
.port_align = 4,
- .eint_offsets = {
- .name = "mt8135_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 7,
.ports = 6,
+ .ap_num = 192,
+ .db_cnt = 16,
},
- .ap_num = 192,
- .db_cnt = 16,
};
static int mt8135_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index 8bfd427b9135b..c449c9a043da9 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -340,31 +340,12 @@ static const struct mtk_pinctrl_devdata mt8173_pinctrl_data = {
.port_shf = 4,
.port_mask = 0xf,
.port_align = 4,
- .eint_offsets = {
- .name = "mt8173_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
+ .eint_hw = {
.port_mask = 7,
.ports = 6,
+ .ap_num = 224,
+ .db_cnt = 16,
},
- .ap_num = 224,
- .db_cnt = 16,
};
static int mt8173_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index c3975a04d1cdd..b3799695d8db8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -38,6 +38,7 @@
#include "../core.h"
#include "../pinconf.h"
#include "../pinctrl-utils.h"
+#include "mtk-eint.h"
#include "pinctrl-mtk-common.h"
#define MAX_GPIO_MODE_PER_REG 5
@@ -831,243 +832,38 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- const struct mtk_desc_pin *pin;
struct mtk_pinctrl *pctl = gpiochip_get_data(chip);
- int irq;
-
- pin = pctl->devdata->pins + offset;
- if (pin->eint.eintnum == NO_EINT_SUPPORT)
- return -EINVAL;
-
- irq = irq_find_mapping(pctl->domain, pin->eint.eintnum);
- if (!irq)
- return -EINVAL;
-
- return irq;
-}
-
-static int mtk_pinctrl_irq_request_resources(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_desc_pin *pin;
- int ret;
-
- pin = mtk_find_pin_by_eint_num(pctl, d->hwirq);
-
- if (!pin) {
- dev_err(pctl->dev, "Can not find pin\n");
- return -EINVAL;
- }
-
- ret = gpiochip_lock_as_irq(pctl->chip, pin->pin.number);
- if (ret) {
- dev_err(pctl->dev, "unable to lock HW IRQ %lu for IRQ\n",
- irqd_to_hwirq(d));
- return ret;
- }
-
- /* set mux to INT mode */
- mtk_pmx_set_mode(pctl->pctl_dev, pin->pin.number, pin->eint.eintmux);
- /* set gpio direction to input */
- mtk_pmx_gpio_set_direction(pctl->pctl_dev, NULL, pin->pin.number, true);
- /* set input-enable */
- mtk_pconf_set_ies_smt(pctl, pin->pin.number, 1, PIN_CONFIG_INPUT_ENABLE);
-
- return 0;
-}
-
-static void mtk_pinctrl_irq_release_resources(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_desc_pin *pin;
-
- pin = mtk_find_pin_by_eint_num(pctl, d->hwirq);
-
- if (!pin) {
- dev_err(pctl->dev, "Can not find pin\n");
- return;
- }
-
- gpiochip_unlock_as_irq(pctl->chip, pin->pin.number);
-}
-
-static void __iomem *mtk_eint_get_offset(struct mtk_pinctrl *pctl,
- unsigned int eint_num, unsigned int offset)
-{
- unsigned int eint_base = 0;
- void __iomem *reg;
-
- if (eint_num >= pctl->devdata->ap_num)
- eint_base = pctl->devdata->ap_num;
-
- reg = pctl->eint_reg_base + offset + ((eint_num - eint_base) / 32) * 4;
-
- return reg;
-}
-
-/*
- * mtk_can_en_debounce: Check the EINT number is able to enable debounce or not
- * @eint_num: the EINT number to setmtk_pinctrl
- */
-static unsigned int mtk_eint_can_en_debounce(struct mtk_pinctrl *pctl,
- unsigned int eint_num)
-{
- unsigned int sens;
- unsigned int bit = BIT(eint_num % 32);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- void __iomem *reg = mtk_eint_get_offset(pctl, eint_num,
- eint_offsets->sens);
-
- if (readl(reg) & bit)
- sens = MT_LEVEL_SENSITIVE;
- else
- sens = MT_EDGE_SENSITIVE;
-
- if ((eint_num < pctl->devdata->db_cnt) && (sens != MT_EDGE_SENSITIVE))
- return 1;
- else
- return 0;
-}
-
-/*
- * mtk_eint_get_mask: To get the eint mask
- * @eint_num: the EINT number to get
- */
-static unsigned int mtk_eint_get_mask(struct mtk_pinctrl *pctl,
- unsigned int eint_num)
-{
- unsigned int bit = BIT(eint_num % 32);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- void __iomem *reg = mtk_eint_get_offset(pctl, eint_num,
- eint_offsets->mask);
-
- return !!(readl(reg) & bit);
-}
-
-static int mtk_eint_flip_edge(struct mtk_pinctrl *pctl, int hwirq)
-{
- int start_level, curr_level;
- unsigned int reg_offset;
- const struct mtk_eint_offsets *eint_offsets = &(pctl->devdata->eint_offsets);
- u32 mask = BIT(hwirq & 0x1f);
- u32 port = (hwirq >> 5) & eint_offsets->port_mask;
- void __iomem *reg = pctl->eint_reg_base + (port << 2);
const struct mtk_desc_pin *pin;
-
- pin = mtk_find_pin_by_eint_num(pctl, hwirq);
- curr_level = mtk_gpio_get(pctl->chip, pin->pin.number);
- do {
- start_level = curr_level;
- if (start_level)
- reg_offset = eint_offsets->pol_clr;
- else
- reg_offset = eint_offsets->pol_set;
- writel(mask, reg + reg_offset);
-
- curr_level = mtk_gpio_get(pctl->chip, pin->pin.number);
- } while (start_level != curr_level);
-
- return start_level;
-}
-
-static void mtk_eint_mask(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- u32 mask = BIT(d->hwirq & 0x1f);
- void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->mask_set);
-
- writel(mask, reg);
-}
-
-static void mtk_eint_unmask(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- u32 mask = BIT(d->hwirq & 0x1f);
- void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->mask_clr);
-
- writel(mask, reg);
-
- if (pctl->eint_dual_edges[d->hwirq])
- mtk_eint_flip_edge(pctl, d->hwirq);
-}
-
-static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
- unsigned debounce)
-{
- struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
- int eint_num, virq, eint_offset;
- unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
- static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
- 128000, 256000};
- const struct mtk_desc_pin *pin;
- struct irq_data *d;
+ unsigned long eint_n;
pin = pctl->devdata->pins + offset;
if (pin->eint.eintnum == NO_EINT_SUPPORT)
return -EINVAL;
- eint_num = pin->eint.eintnum;
- virq = irq_find_mapping(pctl->domain, eint_num);
- eint_offset = (eint_num % 4) * 8;
- d = irq_get_irq_data(virq);
+ eint_n = pin->eint.eintnum;
- set_offset = (eint_num / 4) * 4 + pctl->devdata->eint_offsets.dbnc_set;
- clr_offset = (eint_num / 4) * 4 + pctl->devdata->eint_offsets.dbnc_clr;
- if (!mtk_eint_can_en_debounce(pctl, eint_num))
- return -ENOSYS;
-
- dbnc = ARRAY_SIZE(debounce_time);
- for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
- if (debounce <= debounce_time[i]) {
- dbnc = i;
- break;
- }
- }
-
- if (!mtk_eint_get_mask(pctl, eint_num)) {
- mtk_eint_mask(d);
- unmask = 1;
- } else {
- unmask = 0;
- }
-
- clr_bit = 0xff << eint_offset;
- writel(clr_bit, pctl->eint_reg_base + clr_offset);
-
- bit = ((dbnc << EINT_DBNC_SET_DBNC_BITS) | EINT_DBNC_SET_EN) <<
- eint_offset;
- rst = EINT_DBNC_RST_BIT << eint_offset;
- writel(rst | bit, pctl->eint_reg_base + set_offset);
-
- /* Delay a while (more than 2T) to wait for hw debounce counter reset
- work correctly */
- udelay(1);
- if (unmask == 1)
- mtk_eint_unmask(d);
-
- return 0;
+ return mtk_eint_find_irq(pctl->eint, eint_n);
}
static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned offset,
unsigned long config)
{
+ struct mtk_pinctrl *pctl = gpiochip_get_data(chip);
+ const struct mtk_desc_pin *pin;
+ unsigned long eint_n;
u32 debounce;
if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
return -ENOTSUPP;
+ pin = pctl->devdata->pins + offset;
+ if (pin->eint.eintnum == NO_EINT_SUPPORT)
+ return -EINVAL;
+
debounce = pinconf_to_config_argument(config);
- return mtk_gpio_set_debounce(chip, offset, debounce);
+ eint_n = pin->eint.eintnum;
+
+ return mtk_eint_set_debounce(pctl->eint, eint_n, debounce);
}
static const struct gpio_chip mtk_gpio_chip = {
@@ -1084,117 +880,18 @@ static const struct gpio_chip mtk_gpio_chip = {
.of_gpio_n_cells = 2,
};
-static int mtk_eint_set_type(struct irq_data *d,
- unsigned int type)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- u32 mask = BIT(d->hwirq & 0x1f);
- void __iomem *reg;
-
- if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
- ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
- dev_err(pctl->dev, "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
- d->irq, d->hwirq, type);
- return -EINVAL;
- }
-
- if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
- pctl->eint_dual_edges[d->hwirq] = 1;
- else
- pctl->eint_dual_edges[d->hwirq] = 0;
-
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
- reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->pol_clr);
- writel(mask, reg);
- } else {
- reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->pol_set);
- writel(mask, reg);
- }
-
- if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
- reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->sens_clr);
- writel(mask, reg);
- } else {
- reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->sens_set);
- writel(mask, reg);
- }
-
- if (pctl->eint_dual_edges[d->hwirq])
- mtk_eint_flip_edge(pctl, d->hwirq);
-
- return 0;
-}
-
-static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- int shift = d->hwirq & 0x1f;
- int reg = d->hwirq >> 5;
-
- if (on)
- pctl->wake_mask[reg] |= BIT(shift);
- else
- pctl->wake_mask[reg] &= ~BIT(shift);
-
- return 0;
-}
-
-static void mtk_eint_chip_write_mask(const struct mtk_eint_offsets *chip,
- void __iomem *eint_reg_base, u32 *buf)
-{
- int port;
- void __iomem *reg;
-
- for (port = 0; port < chip->ports; port++) {
- reg = eint_reg_base + (port << 2);
- writel_relaxed(~buf[port], reg + chip->mask_set);
- writel_relaxed(buf[port], reg + chip->mask_clr);
- }
-}
-
-static void mtk_eint_chip_read_mask(const struct mtk_eint_offsets *chip,
- void __iomem *eint_reg_base, u32 *buf)
-{
- int port;
- void __iomem *reg;
-
- for (port = 0; port < chip->ports; port++) {
- reg = eint_reg_base + chip->mask + (port << 2);
- buf[port] = ~readl_relaxed(reg);
- /* Mask is 0 when irq is enabled, and 1 when disabled. */
- }
-}
-
static int mtk_eint_suspend(struct device *device)
{
- void __iomem *reg;
struct mtk_pinctrl *pctl = dev_get_drvdata(device);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- reg = pctl->eint_reg_base;
- mtk_eint_chip_read_mask(eint_offsets, reg, pctl->cur_mask);
- mtk_eint_chip_write_mask(eint_offsets, reg, pctl->wake_mask);
- return 0;
+ return mtk_eint_do_suspend(pctl->eint);
}
static int mtk_eint_resume(struct device *device)
{
struct mtk_pinctrl *pctl = dev_get_drvdata(device);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- mtk_eint_chip_write_mask(eint_offsets,
- pctl->eint_reg_base, pctl->cur_mask);
- return 0;
+ return mtk_eint_do_resume(pctl->eint);
}
const struct dev_pm_ops mtk_eint_pm_ops = {
@@ -1202,117 +899,6 @@ const struct dev_pm_ops mtk_eint_pm_ops = {
.resume_noirq = mtk_eint_resume,
};
-static void mtk_eint_ack(struct irq_data *d)
-{
- struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- u32 mask = BIT(d->hwirq & 0x1f);
- void __iomem *reg = mtk_eint_get_offset(pctl, d->hwirq,
- eint_offsets->ack);
-
- writel(mask, reg);
-}
-
-static struct irq_chip mtk_pinctrl_irq_chip = {
- .name = "mt-eint",
- .irq_disable = mtk_eint_mask,
- .irq_mask = mtk_eint_mask,
- .irq_unmask = mtk_eint_unmask,
- .irq_ack = mtk_eint_ack,
- .irq_set_type = mtk_eint_set_type,
- .irq_set_wake = mtk_eint_irq_set_wake,
- .irq_request_resources = mtk_pinctrl_irq_request_resources,
- .irq_release_resources = mtk_pinctrl_irq_release_resources,
-};
-
-static unsigned int mtk_eint_init(struct mtk_pinctrl *pctl)
-{
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- void __iomem *reg = pctl->eint_reg_base + eint_offsets->dom_en;
- unsigned int i;
-
- for (i = 0; i < pctl->devdata->ap_num; i += 32) {
- writel(0xffffffff, reg);
- reg += 4;
- }
- return 0;
-}
-
-static inline void
-mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index)
-{
- unsigned int rst, ctrl_offset;
- unsigned int bit, dbnc;
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
-
- ctrl_offset = (index / 4) * 4 + eint_offsets->dbnc_ctrl;
- dbnc = readl(pctl->eint_reg_base + ctrl_offset);
- bit = EINT_DBNC_SET_EN << ((index % 4) * 8);
- if ((bit & dbnc) > 0) {
- ctrl_offset = (index / 4) * 4 + eint_offsets->dbnc_set;
- rst = EINT_DBNC_RST_BIT << ((index % 4) * 8);
- writel(rst, pctl->eint_reg_base + ctrl_offset);
- }
-}
-
-static void mtk_eint_irq_handler(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
- unsigned int status, eint_num;
- int offset, index, virq;
- const struct mtk_eint_offsets *eint_offsets =
- &pctl->devdata->eint_offsets;
- void __iomem *reg = mtk_eint_get_offset(pctl, 0, eint_offsets->stat);
- int dual_edges, start_level, curr_level;
- const struct mtk_desc_pin *pin;
-
- chained_irq_enter(chip, desc);
- for (eint_num = 0;
- eint_num < pctl->devdata->ap_num;
- eint_num += 32, reg += 4) {
- status = readl(reg);
- while (status) {
- offset = __ffs(status);
- index = eint_num + offset;
- virq = irq_find_mapping(pctl->domain, index);
- status &= ~BIT(offset);
-
- dual_edges = pctl->eint_dual_edges[index];
- if (dual_edges) {
- /* Clear soft-irq in case we raised it
- last time */
- writel(BIT(offset), reg - eint_offsets->stat +
- eint_offsets->soft_clr);
-
- pin = mtk_find_pin_by_eint_num(pctl, index);
- start_level = mtk_gpio_get(pctl->chip,
- pin->pin.number);
- }
-
- generic_handle_irq(virq);
-
- if (dual_edges) {
- curr_level = mtk_eint_flip_edge(pctl, index);
-
- /* If level changed, we might lost one edge
- interrupt, raised it through soft-irq */
- if (start_level != curr_level)
- writel(BIT(offset), reg -
- eint_offsets->stat +
- eint_offsets->soft_set);
- }
-
- if (index < pctl->devdata->db_cnt)
- mtk_eint_debounce_process(pctl , index);
- }
- }
- chained_irq_exit(chip, desc);
-}
-
static int mtk_pctrl_build_state(struct platform_device *pdev)
{
struct mtk_pinctrl *pctl = platform_get_drvdata(pdev);
@@ -1345,6 +931,101 @@ static int mtk_pctrl_build_state(struct platform_device *pdev)
return 0;
}
+static int
+mtk_xt_get_gpio_n(void *data, unsigned long eint_n, unsigned int *gpio_n,
+ struct gpio_chip **gpio_chip)
+{
+ struct mtk_pinctrl *pctl = (struct mtk_pinctrl *)data;
+ const struct mtk_desc_pin *pin;
+
+ pin = mtk_find_pin_by_eint_num(pctl, eint_n);
+ if (!pin)
+ return -EINVAL;
+
+ *gpio_chip = pctl->chip;
+ *gpio_n = pin->pin.number;
+
+ return 0;
+}
+
+static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *pctl = (struct mtk_pinctrl *)data;
+ const struct mtk_desc_pin *pin;
+
+ pin = mtk_find_pin_by_eint_num(pctl, eint_n);
+ if (!pin)
+ return -EINVAL;
+
+ return mtk_gpio_get(pctl->chip, pin->pin.number);
+}
+
+static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *pctl = (struct mtk_pinctrl *)data;
+ const struct mtk_desc_pin *pin;
+
+ pin = mtk_find_pin_by_eint_num(pctl, eint_n);
+ if (!pin)
+ return -EINVAL;
+
+ /* set mux to INT mode */
+ mtk_pmx_set_mode(pctl->pctl_dev, pin->pin.number, pin->eint.eintmux);
+ /* set gpio direction to input */
+ mtk_pmx_gpio_set_direction(pctl->pctl_dev, NULL, pin->pin.number,
+ true);
+ /* set input-enable */
+ mtk_pconf_set_ies_smt(pctl, pin->pin.number, 1,
+ PIN_CONFIG_INPUT_ENABLE);
+
+ return 0;
+}
+
+static const struct mtk_eint_xt mtk_eint_xt = {
+ .get_gpio_n = mtk_xt_get_gpio_n,
+ .get_gpio_state = mtk_xt_get_gpio_state,
+ .set_gpio_as_eint = mtk_xt_set_gpio_as_eint,
+};
+
+static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ return -ENODEV;
+
+ pctl->eint = devm_kzalloc(pctl->dev, sizeof(*pctl->eint), GFP_KERNEL);
+ if (!pctl->eint)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get eint resource\n");
+ return -ENODEV;
+ }
+
+ pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctl->eint->base))
+ return PTR_ERR(pctl->eint->base);
+
+ pctl->eint->irq = irq_of_parse_and_map(np, 0);
+ if (!pctl->eint->irq)
+ return -EINVAL;
+
+ pctl->eint->dev = &pdev->dev;
+ /*
+ * If pctl->eint->regs == NULL, it would fall back into using a generic
+ * register map in mtk_eint_do_init calls.
+ */
+ pctl->eint->regs = pctl->devdata->eint_regs;
+ pctl->eint->hw = &pctl->devdata->eint_hw;
+ pctl->eint->pctl = pctl;
+ pctl->eint->gpio_xlate = &mtk_eint_xt;
+
+ return mtk_eint_do_init(pctl->eint);
+}
+
int mtk_pctrl_init(struct platform_device *pdev,
const struct mtk_pinctrl_devdata *data,
struct regmap *regmap)
@@ -1353,8 +1034,7 @@ int mtk_pctrl_init(struct platform_device *pdev,
struct mtk_pinctrl *pctl;
struct device_node *np = pdev->dev.of_node, *node;
struct property *prop;
- struct resource *res;
- int i, ret, irq, ports_buf;
+ int ret, i;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
@@ -1441,70 +1121,10 @@ int mtk_pctrl_init(struct platform_device *pdev,
goto chip_error;
}
- if (!of_property_read_bool(np, "interrupt-controller"))
- return 0;
-
- /* Get EINT register base from dts. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get Pinctrl resource\n");
- ret = -EINVAL;
- goto chip_error;
- }
-
- pctl->eint_reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pctl->eint_reg_base)) {
- ret = -EINVAL;
- goto chip_error;
- }
-
- ports_buf = pctl->devdata->eint_offsets.ports;
- pctl->wake_mask = devm_kcalloc(&pdev->dev, ports_buf,
- sizeof(*pctl->wake_mask), GFP_KERNEL);
- if (!pctl->wake_mask) {
- ret = -ENOMEM;
- goto chip_error;
- }
-
- pctl->cur_mask = devm_kcalloc(&pdev->dev, ports_buf,
- sizeof(*pctl->cur_mask), GFP_KERNEL);
- if (!pctl->cur_mask) {
- ret = -ENOMEM;
- goto chip_error;
- }
-
- pctl->eint_dual_edges = devm_kcalloc(&pdev->dev, pctl->devdata->ap_num,
- sizeof(int), GFP_KERNEL);
- if (!pctl->eint_dual_edges) {
- ret = -ENOMEM;
- goto chip_error;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- dev_err(&pdev->dev, "couldn't parse and map irq\n");
- ret = -EINVAL;
- goto chip_error;
- }
-
- pctl->domain = irq_domain_add_linear(np,
- pctl->devdata->ap_num, &irq_domain_simple_ops, NULL);
- if (!pctl->domain) {
- dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
- ret = -ENOMEM;
+ ret = mtk_eint_init(pctl, pdev);
+ if (ret)
goto chip_error;
- }
-
- mtk_eint_init(pctl);
- for (i = 0; i < pctl->devdata->ap_num; i++) {
- int virq = irq_create_mapping(pctl->domain, i);
-
- irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
- handle_level_irq);
- irq_set_chip_data(virq, pctl);
- }
- irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
return 0;
chip_error:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
index 8543bc478a1ec..bf13eb0a68d62 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -19,6 +19,8 @@
#include <linux/regmap.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "mtk-eint.h"
+
#define NO_EINT_SUPPORT 255
#define MT_EDGE_SENSITIVE 0
#define MT_LEVEL_SENSITIVE 1
@@ -258,9 +260,8 @@ struct mtk_pinctrl_devdata {
unsigned char port_shf;
unsigned char port_mask;
unsigned char port_align;
- struct mtk_eint_offsets eint_offsets;
- unsigned int ap_num;
- unsigned int db_cnt;
+ struct mtk_eint_hw eint_hw;
+ struct mtk_eint_regs *eint_regs;
};
struct mtk_pinctrl {
@@ -274,11 +275,7 @@ struct mtk_pinctrl {
const char **grp_names;
struct pinctrl_dev *pctl_dev;
const struct mtk_pinctrl_devdata *devdata;
- void __iomem *eint_reg_base;
- struct irq_domain *domain;
- int *eint_dual_edges;
- u32 *wake_mask;
- u32 *cur_mask;
+ struct mtk_eint *eint;
};
int mtk_pctrl_init(struct platform_device *pdev,
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 99a6ceac8e53c..46a0918bd284c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -312,6 +312,47 @@ static const unsigned int pdm_din1_pins[] = {GPIOA_16};
static const unsigned int pdm_din2_pins[] = {GPIOA_17};
static const unsigned int pdm_din3_pins[] = {GPIOA_18};
+/* mclk */
+static const unsigned int mclk_c_pins[] = {GPIOA_0};
+static const unsigned int mclk_b_pins[] = {GPIOA_1};
+
+/* tdm */
+static const unsigned int tdma_sclk_pins[] = {GPIOX_12};
+static const unsigned int tdma_sclk_slv_pins[] = {GPIOX_12};
+static const unsigned int tdma_fs_pins[] = {GPIOX_13};
+static const unsigned int tdma_fs_slv_pins[] = {GPIOX_13};
+static const unsigned int tdma_din0_pins[] = {GPIOX_14};
+static const unsigned int tdma_dout0_x14_pins[] = {GPIOX_14};
+static const unsigned int tdma_dout0_x15_pins[] = {GPIOX_15};
+static const unsigned int tdma_dout1_pins[] = {GPIOX_15};
+static const unsigned int tdma_din1_pins[] = {GPIOX_15};
+
+static const unsigned int tdmc_sclk_pins[] = {GPIOA_2};
+static const unsigned int tdmc_sclk_slv_pins[] = {GPIOA_2};
+static const unsigned int tdmc_fs_pins[] = {GPIOA_3};
+static const unsigned int tdmc_fs_slv_pins[] = {GPIOA_3};
+static const unsigned int tdmc_din0_pins[] = {GPIOA_4};
+static const unsigned int tdmc_dout0_pins[] = {GPIOA_4};
+static const unsigned int tdmc_din1_pins[] = {GPIOA_5};
+static const unsigned int tdmc_dout1_pins[] = {GPIOA_5};
+static const unsigned int tdmc_din2_pins[] = {GPIOA_6};
+static const unsigned int tdmc_dout2_pins[] = {GPIOA_6};
+static const unsigned int tdmc_din3_pins[] = {GPIOA_7};
+static const unsigned int tdmc_dout3_pins[] = {GPIOA_7};
+
+static const unsigned int tdmb_sclk_pins[] = {GPIOA_8};
+static const unsigned int tdmb_sclk_slv_pins[] = {GPIOA_8};
+static const unsigned int tdmb_fs_pins[] = {GPIOA_9};
+static const unsigned int tdmb_fs_slv_pins[] = {GPIOA_9};
+static const unsigned int tdmb_din0_pins[] = {GPIOA_10};
+static const unsigned int tdmb_dout0_pins[] = {GPIOA_10};
+static const unsigned int tdmb_din1_pins[] = {GPIOA_11};
+static const unsigned int tdmb_dout1_pins[] = {GPIOA_11};
+static const unsigned int tdmb_din2_pins[] = {GPIOA_12};
+static const unsigned int tdmb_dout2_pins[] = {GPIOA_12};
+static const unsigned int tdmb_din3_pins[] = {GPIOA_13};
+static const unsigned int tdmb_dout3_pins[] = {GPIOA_13};
+
static struct meson_pmx_group meson_axg_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
@@ -495,6 +536,15 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
GROUP(eth_rx_dv_x, 4),
GROUP(eth_mdio_x, 4),
GROUP(eth_mdc_x, 4),
+ GROUP(tdma_sclk, 1),
+ GROUP(tdma_sclk_slv, 2),
+ GROUP(tdma_fs, 1),
+ GROUP(tdma_fs_slv, 2),
+ GROUP(tdma_din0, 1),
+ GROUP(tdma_dout0_x14, 2),
+ GROUP(tdma_dout0_x15, 1),
+ GROUP(tdma_dout1, 2),
+ GROUP(tdma_din1, 3),
/* bank GPIOY */
GROUP(eth_txd0_y, 1),
@@ -544,6 +594,32 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
GROUP(pdm_din1, 1),
GROUP(pdm_din2, 1),
GROUP(pdm_din3, 1),
+ GROUP(mclk_c, 1),
+ GROUP(mclk_b, 1),
+ GROUP(tdmc_sclk, 1),
+ GROUP(tdmc_sclk_slv, 2),
+ GROUP(tdmc_fs, 1),
+ GROUP(tdmc_fs_slv, 2),
+ GROUP(tdmc_din0, 2),
+ GROUP(tdmc_dout0, 1),
+ GROUP(tdmc_din1, 2),
+ GROUP(tdmc_dout1, 1),
+ GROUP(tdmc_din2, 2),
+ GROUP(tdmc_dout2, 1),
+ GROUP(tdmc_din3, 2),
+ GROUP(tdmc_dout3, 1),
+ GROUP(tdmb_sclk, 1),
+ GROUP(tdmb_sclk_slv, 2),
+ GROUP(tdmb_fs, 1),
+ GROUP(tdmb_fs_slv, 2),
+ GROUP(tdmb_din0, 2),
+ GROUP(tdmb_dout0, 1),
+ GROUP(tdmb_din1, 2),
+ GROUP(tdmb_dout1, 1),
+ GROUP(tdmb_din2, 2),
+ GROUP(tdmb_dout2, 1),
+ GROUP(tdmb_din3, 2),
+ GROUP(tdmb_dout3, 1),
};
/* uart_ao_a */
@@ -845,6 +921,32 @@ static const char * const jtag_ao_groups[] = {
"jtag_ao_tdi", "jtag_ao_tdo", "jtag_ao_clk", "jtag_ao_tms",
};
+static const char * const mclk_c_groups[] = {
+ "mclk_c",
+};
+
+static const char * const mclk_b_groups[] = {
+ "mclk_b",
+};
+
+static const char * const tdma_groups[] = {
+ "tdma_sclk", "tdma_sclk_slv", "tdma_fs", "tdma_fs_slv",
+ "tdma_din0", "tdma_dout0_x14", "tdma_dout0_x15", "tdma_dout1",
+ "tdma_din1",
+};
+
+static const char * const tdmc_groups[] = {
+ "tdmc_sclk", "tdmc_sclk_slv", "tdmc_fs", "tdmc_fs_slv",
+ "tdmc_din0", "tdmc_dout0", "tdmc_din1", "tdmc_dout1",
+ "tdmc_din2", "tdmc_dout2", "tdmc_din3", "tdmc_dout3",
+};
+
+static const char * const tdmb_groups[] = {
+ "tdmb_sclk", "tdmb_sclk_slv", "tdmb_fs", "tdmb_fs_slv",
+ "tdmb_din0", "tdmb_dout0", "tdmb_din1", "tdmb_dout1",
+ "tdmb_din2", "tdmb_dout2", "tdmb_din3", "tdmb_dout3",
+};
+
static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
@@ -870,6 +972,11 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {
FUNCTION(spdif_in),
FUNCTION(jtag_ee),
FUNCTION(pdm),
+ FUNCTION(mclk_b),
+ FUNCTION(mclk_c),
+ FUNCTION(tdma),
+ FUNCTION(tdmb),
+ FUNCTION(tdmc),
};
static struct meson_pmx_func meson_axg_aobus_functions[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 9079020259c5a..2c97a2e07a5fc 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -627,8 +627,8 @@ static const char * const sdio_groups[] = {
};
static const char * const nand_groups[] = {
- "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale", "nand_cle",
- "nand_wen_clk", "nand_ren_wr", "nand_dqs",
+ "emmc_nand_d07", "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale",
+ "nand_cle", "nand_wen_clk", "nand_ren_wr", "nand_dqs",
};
static const char * const uart_a_groups[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index b3786cde963d5..7dae1d7bf6b0a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -617,8 +617,8 @@ static const char * const sdio_groups[] = {
};
static const char * const nand_groups[] = {
- "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale", "nand_cle",
- "nand_wen_clk", "nand_ren_wr", "nand_dqs",
+ "emmc_nand_d07", "nand_ce0", "nand_ce1", "nand_rb0", "nand_ale",
+ "nand_cle", "nand_wen_clk", "nand_ren_wr", "nand_dqs",
};
static const char * const uart_a_groups[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 49c7ce03547bf..c6d79315218fa 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1,5 +1,5 @@
/*
- * Pin controller and GPIO driver for Amlogic Meson8.
+ * Pin controller and GPIO driver for Amlogic Meson8 and Meson8m2.
*
* Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
*
@@ -299,6 +299,10 @@ static const unsigned int spi_mosi_1_pins[] = { GPIOZ_12 };
static const unsigned int spi_miso_1_pins[] = { GPIOZ_13 };
static const unsigned int spi_ss2_1_pins[] = { GPIOZ_14 };
+static const unsigned int eth_txd3_pins[] = { GPIOZ_0 };
+static const unsigned int eth_txd2_pins[] = { GPIOZ_1 };
+static const unsigned int eth_rxd3_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rxd2_pins[] = { GPIOZ_3 };
static const unsigned int eth_tx_clk_50m_pins[] = { GPIOZ_4 };
static const unsigned int eth_tx_en_pins[] = { GPIOZ_5 };
static const unsigned int eth_txd1_pins[] = { GPIOZ_6 };
@@ -650,6 +654,12 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
GROUP(eth_mdio, 6, 6),
GROUP(eth_mdc, 6, 5),
+ /* NOTE: the following four groups are only available on Meson8m2: */
+ GROUP(eth_rxd2, 6, 3),
+ GROUP(eth_rxd3, 6, 2),
+ GROUP(eth_txd2, 6, 1),
+ GROUP(eth_txd3, 6, 0),
+
GROUP(i2c_sda_a0, 5, 31),
GROUP(i2c_sck_a0, 5, 30),
@@ -877,7 +887,8 @@ static const char * const spi_groups[] = {
static const char * const ethernet_groups[] = {
"eth_tx_clk_50m", "eth_tx_en", "eth_txd1",
"eth_txd0", "eth_rx_clk_in", "eth_rx_dv",
- "eth_rxd1", "eth_rxd0", "eth_mdio", "eth_mdc"
+ "eth_rxd1", "eth_rxd0", "eth_mdio", "eth_mdc", "eth_rxd2",
+ "eth_rxd3", "eth_txd2", "eth_txd3"
};
static const char * const i2c_a_groups[] = {
@@ -1080,6 +1091,14 @@ static const struct of_device_id meson8_pinctrl_dt_match[] = {
.compatible = "amlogic,meson8-aobus-pinctrl",
.data = &meson8_aobus_pinctrl_data,
},
+ {
+ .compatible = "amlogic,meson8m2-cbus-pinctrl",
+ .data = &meson8_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8m2-aobus-pinctrl",
+ .data = &meson8_aobus_pinctrl_data,
+ },
{ },
};
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 5b63248c82090..674ffdf8103c3 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -214,18 +214,6 @@ static inline void armada_37xx_update_reg(unsigned int *reg,
}
}
-static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
- const char *func)
-{
- int f;
-
- for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
- if (!strcmp(grp->funcs[f], func))
- return f;
-
- return -ENOTSUPP;
-}
-
static struct armada_37xx_pin_group *armada_37xx_find_next_grp_by_pin(
struct armada_37xx_pinctrl *info, int pin, int *grp)
{
@@ -344,10 +332,9 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev,
dev_dbg(info->dev, "enable function %s group %s\n",
name, grp->name);
- func = armada_37xx_get_func_reg(grp, name);
-
+ func = match_string(grp->funcs, NB_FUNCS, name);
if (func < 0)
- return func;
+ return -ENOTSUPP;
val = grp->val[func];
@@ -679,12 +666,13 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
writel(1 << hwirq,
info->base +
IRQ_STATUS + 4 * i);
- continue;
+ goto update_status;
}
}
generic_handle_irq(virq);
+update_status:
/* Update status in case a new IRQ appears */
spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base +
@@ -932,12 +920,12 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
struct armada_37xx_pin_group *gp = &info->groups[g];
int f;
- for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
- if (strcmp(gp->funcs[f], name) == 0) {
- *groups = gp->name;
- groups++;
- }
- }
+ f = match_string(gp->funcs, NB_FUNCS, name);
+ if (f < 0)
+ continue;
+
+ *groups = gp->name;
+ groups++;
}
}
return 0;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index b854f1ee5de57..5e828468e43da 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -431,40 +431,40 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = {
MPP_MODE(19,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x4, "dev", "rb", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "nand", "rb", V_98DX3236_PLUS)),
MPP_MODE(20,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x4, "dev", "we0", V_98DX3236_PLUS)),
MPP_MODE(21,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad0", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad0", V_98DX3236_PLUS)),
MPP_MODE(22,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad1", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad1", V_98DX3236_PLUS)),
MPP_MODE(23,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad2", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad2", V_98DX3236_PLUS)),
MPP_MODE(24,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad3", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad3", V_98DX3236_PLUS)),
MPP_MODE(25,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad4", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad4", V_98DX3236_PLUS)),
MPP_MODE(26,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad5", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad5", V_98DX3236_PLUS)),
MPP_MODE(27,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad6", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad6", V_98DX3236_PLUS)),
MPP_MODE(28,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "ad7", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "ad7", V_98DX3236_PLUS)),
MPP_MODE(29,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "a0", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "a0", V_98DX3236_PLUS)),
MPP_MODE(30,
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS),
- MPP_VAR_FUNCTION(0x1, "dev", "a1", V_98DX3236_PLUS)),
+ MPP_VAR_FUNCTION(0x4, "dev", "a1", V_98DX3236_PLUS)),
MPP_MODE(31,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS),
MPP_VAR_FUNCTION(0x1, "slv_smi", "mdc", V_98DX3236_PLUS),
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 4b57a13758a43..bafb3d40545e4 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -576,8 +576,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
for_each_child_of_node(np_config, np) {
ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(np);
break;
+ }
}
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 3924779f55785..1882713e68f93 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -59,6 +59,7 @@
#define GPIO_LS_SYNC 0x60
enum rockchip_pinctrl_type {
+ PX30,
RV1108,
RK2928,
RK3066B,
@@ -701,6 +702,66 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin,
*bit = data->bit;
}
+static struct rockchip_mux_route_data px30_mux_route_data[] = {
+ {
+ /* cif-d2m0 */
+ .bank_num = 2,
+ .pin = 0,
+ .func = 1,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 7),
+ }, {
+ /* cif-d2m1 */
+ .bank_num = 3,
+ .pin = 3,
+ .func = 3,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 7) | BIT(7),
+ }, {
+ /* pdm-m0 */
+ .bank_num = 3,
+ .pin = 22,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 8),
+ }, {
+ /* pdm-m1 */
+ .bank_num = 2,
+ .pin = 22,
+ .func = 1,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 8) | BIT(8),
+ }, {
+ /* uart2-rxm0 */
+ .bank_num = 1,
+ .pin = 27,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 10),
+ }, {
+ /* uart2-rxm1 */
+ .bank_num = 2,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 10) | BIT(10),
+ }, {
+ /* uart3-rxm0 */
+ .bank_num = 0,
+ .pin = 17,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 9),
+ }, {
+ /* uart3-rxm1 */
+ .bank_num = 1,
+ .pin = 15,
+ .func = 2,
+ .route_offset = 0x184,
+ .route_val = BIT(16 + 9) | BIT(9),
+ },
+};
+
static struct rockchip_mux_route_data rk3128_mux_route_data[] = {
{
/* spi-0 */
@@ -1202,6 +1263,97 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
return ret;
}
+#define PX30_PULL_PMU_OFFSET 0x10
+#define PX30_PULL_GRF_OFFSET 0x60
+#define PX30_PULL_BITS_PER_PIN 2
+#define PX30_PULL_PINS_PER_REG 8
+#define PX30_PULL_BANK_STRIDE 16
+
+static void px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 32 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = PX30_PULL_PMU_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = PX30_PULL_GRF_OFFSET;
+
+ /* correct the offset, as we're starting with the 2nd bank */
+ *reg -= 0x10;
+ *reg += bank->bank_num * PX30_PULL_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / PX30_PULL_PINS_PER_REG) * 4);
+ *bit = (pin_num % PX30_PULL_PINS_PER_REG);
+ *bit *= PX30_PULL_BITS_PER_PIN;
+}
+
+#define PX30_DRV_PMU_OFFSET 0x20
+#define PX30_DRV_GRF_OFFSET 0xf0
+#define PX30_DRV_BITS_PER_PIN 2
+#define PX30_DRV_PINS_PER_REG 8
+#define PX30_DRV_BANK_STRIDE 16
+
+static void px30_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 32 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = PX30_DRV_PMU_OFFSET;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = PX30_DRV_GRF_OFFSET;
+
+ /* correct the offset, as we're starting with the 2nd bank */
+ *reg -= 0x10;
+ *reg += bank->bank_num * PX30_DRV_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / PX30_DRV_PINS_PER_REG) * 4);
+ *bit = (pin_num % PX30_DRV_PINS_PER_REG);
+ *bit *= PX30_DRV_BITS_PER_PIN;
+}
+
+#define PX30_SCHMITT_PMU_OFFSET 0x38
+#define PX30_SCHMITT_GRF_OFFSET 0xc0
+#define PX30_SCHMITT_PINS_PER_PMU_REG 16
+#define PX30_SCHMITT_BANK_STRIDE 16
+#define PX30_SCHMITT_PINS_PER_GRF_REG 8
+
+static int px30_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num,
+ struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ int pins_per_reg;
+
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = PX30_SCHMITT_PMU_OFFSET;
+ pins_per_reg = PX30_SCHMITT_PINS_PER_PMU_REG;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = PX30_SCHMITT_GRF_OFFSET;
+ pins_per_reg = PX30_SCHMITT_PINS_PER_GRF_REG;
+ *reg += (bank->bank_num - 1) * PX30_SCHMITT_BANK_STRIDE;
+ }
+
+ *reg += ((pin_num / pins_per_reg) * 4);
+ *bit = pin_num % pins_per_reg;
+
+ return 0;
+}
+
#define RV1108_PULL_PMU_OFFSET 0x10
#define RV1108_PULL_OFFSET 0x110
#define RV1108_PULL_PINS_PER_REG 8
@@ -1798,6 +1950,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
return !(data & BIT(bit))
? PIN_CONFIG_BIAS_PULL_PIN_DEFAULT
: PIN_CONFIG_BIAS_DISABLE;
+ case PX30:
case RV1108:
case RK3188:
case RK3288:
@@ -1841,6 +1994,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
data |= BIT(bit);
ret = regmap_write(regmap, reg, data);
break;
+ case PX30:
case RV1108:
case RK3188:
case RK3288:
@@ -2103,6 +2257,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
pull == PIN_CONFIG_BIAS_DISABLE);
case RK3066B:
return pull ? false : true;
+ case PX30:
case RV1108:
case RK3188:
case RK3288:
@@ -2555,6 +2710,57 @@ static int rockchip_gpio_direction_output(struct gpio_chip *gc,
return pinctrl_gpio_direction_output(gc->base + offset);
}
+static void rockchip_gpio_set_debounce(struct gpio_chip *gc,
+ unsigned int offset, bool enable)
+{
+ struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
+ void __iomem *reg = bank->reg_base + GPIO_DEBOUNCE;
+ unsigned long flags;
+ u32 data;
+
+ clk_enable(bank->clk);
+ raw_spin_lock_irqsave(&bank->slock, flags);
+
+ data = readl(reg);
+ if (enable)
+ data |= BIT(offset);
+ else
+ data &= ~BIT(offset);
+ writel(data, reg);
+
+ raw_spin_unlock_irqrestore(&bank->slock, flags);
+ clk_disable(bank->clk);
+}
+
+/*
+ * gpiolib set_config callback function. The setting of the pin
+ * mux function as 'gpio output' will be handled by the pinctrl subsystem
+ * interface.
+ */
+static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ enum pin_config_param param = pinconf_to_config_param(config);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ rockchip_gpio_set_debounce(gc, offset, true);
+ /*
+ * Rockchip's gpio could only support up to one period
+ * of the debounce clock(pclk), which is far away from
+ * satisftying the requirement, as pclk is usually near
+ * 100MHz shared by all peripherals. So the fact is it
+ * has crippled debounce capability could only be useful
+ * to prevent any spurious glitches from waking up the system
+ * if the gpio is conguired as wakeup interrupt source. Let's
+ * still return -ENOTSUPP as before, to make sure the caller
+ * of gpiod_set_debounce won't change its behaviour.
+ */
+ default:
+ return -ENOTSUPP;
+ }
+}
+
/*
* gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
* and a virtual IRQ, if not already present.
@@ -2580,6 +2786,7 @@ static const struct gpio_chip rockchip_gpiolib_chip = {
.get_direction = rockchip_gpio_get_direction,
.direction_input = rockchip_gpio_direction_input,
.direction_output = rockchip_gpio_direction_output,
+ .set_config = rockchip_gpio_set_config,
.to_irq = rockchip_gpio_to_irq,
.owner = THIS_MODULE,
};
@@ -3237,6 +3444,43 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static struct rockchip_pin_bank px30_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU
+ ),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT
+ ),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT
+ ),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT
+ ),
+};
+
+static struct rockchip_pin_ctrl px30_pin_ctrl = {
+ .pin_banks = px30_pin_banks,
+ .nr_banks = ARRAY_SIZE(px30_pin_banks),
+ .label = "PX30-GPIO",
+ .type = PX30,
+ .grf_mux_offset = 0x0,
+ .pmu_mux_offset = 0x0,
+ .iomux_routes = px30_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(px30_mux_route_data),
+ .pull_calc_reg = px30_calc_pull_reg_and_bit,
+ .drv_calc_reg = px30_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = px30_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rv1108_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
IOMUX_SOURCE_PMU,
@@ -3545,6 +3789,8 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
};
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
+ { .compatible = "rockchip,px30-pinctrl",
+ .data = &px30_pin_ctrl },
{ .compatible = "rockchip,rv1108-pinctrl",
.data = &rv1108_pin_ctrl },
{ .compatible = "rockchip,rk2928-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index a7c5eb39b1eb4..9c3c00515aa0f 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -144,6 +144,7 @@ struct pcs_soc_data {
* struct pcs_device - pinctrl device instance
* @res: resources
* @base: virtual address of the controller
+ * @saved_vals: saved values for the controller
* @size: size of the ioremapped area
* @dev: device entry
* @np: device tree node
@@ -172,11 +173,13 @@ struct pcs_soc_data {
struct pcs_device {
struct resource *res;
void __iomem *base;
+ void *saved_vals;
unsigned size;
struct device *dev;
struct device_node *np;
struct pinctrl_dev *pctl;
unsigned flags;
+#define PCS_CONTEXT_LOSS_OFF (1 << 3)
#define PCS_QUIRK_SHARED_IRQ (1 << 2)
#define PCS_FEAT_IRQ (1 << 1)
#define PCS_FEAT_PINCONF (1 << 0)
@@ -1576,6 +1579,67 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
}
#ifdef CONFIG_PM
+static int pcs_save_context(struct pcs_device *pcs)
+{
+ int i, mux_bytes;
+ u64 *regsl;
+ u32 *regsw;
+ u16 *regshw;
+
+ mux_bytes = pcs->width / BITS_PER_BYTE;
+
+ if (!pcs->saved_vals)
+ pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
+
+ switch (pcs->width) {
+ case 64:
+ regsl = (u64 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ regsl[i] = pcs->read(pcs->base + i * mux_bytes);
+ break;
+ case 32:
+ regsw = (u32 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ regsw[i] = pcs->read(pcs->base + i * mux_bytes);
+ break;
+ case 16:
+ regshw = (u16 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ regshw[i] = pcs->read(pcs->base + i * mux_bytes);
+ break;
+ }
+
+ return 0;
+}
+
+static void pcs_restore_context(struct pcs_device *pcs)
+{
+ int i, mux_bytes;
+ u64 *regsl;
+ u32 *regsw;
+ u16 *regshw;
+
+ mux_bytes = pcs->width / BITS_PER_BYTE;
+
+ switch (pcs->width) {
+ case 64:
+ regsl = (u64 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ pcs->write(regsl[i], pcs->base + i * mux_bytes);
+ break;
+ case 32:
+ regsw = (u32 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ pcs->write(regsw[i], pcs->base + i * mux_bytes);
+ break;
+ case 16:
+ regshw = (u16 *)pcs->saved_vals;
+ for (i = 0; i < pcs->size / mux_bytes; i++)
+ pcs->write(regshw[i], pcs->base + i * mux_bytes);
+ break;
+ }
+}
+
static int pinctrl_single_suspend(struct platform_device *pdev,
pm_message_t state)
{
@@ -1585,6 +1649,9 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
if (!pcs)
return -EINVAL;
+ if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
+ pcs_save_context(pcs);
+
return pinctrl_force_sleep(pcs->pctl);
}
@@ -1596,6 +1663,9 @@ static int pinctrl_single_resume(struct platform_device *pdev)
if (!pcs)
return -EINVAL;
+ if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
+ pcs_restore_context(pcs);
+
return pinctrl_force_default(pcs->pctl);
}
#endif
@@ -1824,7 +1894,7 @@ static const struct pcs_soc_data pinctrl_single_dra7 = {
};
static const struct pcs_soc_data pinctrl_single_am437x = {
- .flags = PCS_QUIRK_SHARED_IRQ,
+ .flags = PCS_QUIRK_SHARED_IRQ | PCS_CONTEXT_LOSS_OFF,
.irq_enable_mask = (1 << 29), /* OMAP_WAKEUP_EN */
.irq_status_mask = (1 << 30), /* OMAP_WAKEUP_EVENT */
};
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index ad80a17c9990d..0e22f52b2a199 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -58,7 +58,10 @@ struct msm_pinctrl {
struct device *dev;
struct pinctrl_dev *pctrl;
struct gpio_chip chip;
+ struct pinctrl_desc desc;
struct notifier_block restart_nb;
+
+ struct irq_chip irq_chip;
int irq;
raw_spinlock_t lock;
@@ -390,13 +393,6 @@ static const struct pinconf_ops msm_pinconf_ops = {
.pin_config_group_set = msm_config_group_set,
};
-static struct pinctrl_desc msm_pinctrl_desc = {
- .pctlops = &msm_pinctrl_ops,
- .pmxops = &msm_pinmux_ops,
- .confops = &msm_pinconf_ops,
- .owner = THIS_MODULE,
-};
-
static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
const struct msm_pingroup *g;
@@ -506,29 +502,46 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
int is_out;
int drive;
int pull;
- u32 ctl_reg;
+ int val;
+ u32 ctl_reg, io_reg;
- static const char * const pulls[] = {
+ static const char * const pulls_keeper[] = {
"no pull",
"pull down",
"keeper",
"pull up"
};
+ static const char * const pulls_no_keeper[] = {
+ "no pull",
+ "pull down",
+ "pull up",
+ };
+
if (!gpiochip_line_is_valid(chip, offset))
return;
g = &pctrl->soc->groups[offset];
ctl_reg = readl(pctrl->regs + g->ctl_reg);
+ io_reg = readl(pctrl->regs + g->io_reg);
is_out = !!(ctl_reg & BIT(g->oe_bit));
func = (ctl_reg >> g->mux_bit) & 7;
drive = (ctl_reg >> g->drv_bit) & 7;
pull = (ctl_reg >> g->pull_bit) & 3;
- seq_printf(s, " %-8s: %-3s %d", g->name, is_out ? "out" : "in", func);
+ if (is_out)
+ val = !!(io_reg & BIT(g->out_bit));
+ else
+ val = !!(io_reg & BIT(g->in_bit));
+
+ seq_printf(s, " %-8s: %-3s", g->name, is_out ? "out" : "in");
+ seq_printf(s, " %-4s func%d", val ? "high" : "low", func);
seq_printf(s, " %dmA", msm_regval_to_drive(drive));
- seq_printf(s, " %s", pulls[pull]);
+ if (pctrl->soc->pull_no_keeper)
+ seq_printf(s, " %s", pulls_no_keeper[pull]);
+ else
+ seq_printf(s, " %s", pulls_keeper[pull]);
seq_puts(s, "\n");
}
@@ -776,15 +789,6 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-static struct irq_chip msm_gpio_irq_chip = {
- .name = "msmgpio",
- .irq_mask = msm_gpio_irq_mask,
- .irq_unmask = msm_gpio_irq_unmask,
- .irq_ack = msm_gpio_irq_ack,
- .irq_set_type = msm_gpio_irq_set_type,
- .irq_set_wake = msm_gpio_irq_set_wake,
-};
-
static void msm_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -877,6 +881,13 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
chip->of_node = pctrl->dev->of_node;
chip->need_valid_mask = msm_gpio_needs_valid_mask(pctrl);
+ pctrl->irq_chip.name = "msmgpio";
+ pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
+ pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
+ pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
+ pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
+ pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
+
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "Failed register gpiochip\n");
@@ -890,15 +901,28 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return ret;
}
- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
- if (ret) {
- dev_err(pctrl->dev, "Failed to add pin range\n");
- gpiochip_remove(&pctrl->chip);
- return ret;
+ /*
+ * For DeviceTree-supported systems, the gpio core checks the
+ * pinctrl's device node for the "gpio-ranges" property.
+ * If it is present, it takes care of adding the pin ranges
+ * for the driver. In this case the driver can skip ahead.
+ *
+ * In order to remain compatible with older, existing DeviceTree
+ * files which don't set the "gpio-ranges" property or systems that
+ * utilize ACPI the driver has to call gpiochip_add_pin_range().
+ */
+ if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) {
+ ret = gpiochip_add_pin_range(&pctrl->chip,
+ dev_name(pctrl->dev), 0, 0, chip->ngpio);
+ if (ret) {
+ dev_err(pctrl->dev, "Failed to add pin range\n");
+ gpiochip_remove(&pctrl->chip);
+ return ret;
+ }
}
ret = gpiochip_irqchip_add(chip,
- &msm_gpio_irq_chip,
+ &pctrl->irq_chip,
0,
handle_edge_irq,
IRQ_TYPE_NONE);
@@ -908,7 +932,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return -ENOSYS;
}
- gpiochip_set_chained_irqchip(chip, &msm_gpio_irq_chip, pctrl->irq,
+ gpiochip_set_chained_irqchip(chip, &pctrl->irq_chip, pctrl->irq,
msm_gpio_irq_handler);
return 0;
@@ -979,11 +1003,15 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return pctrl->irq;
}
- msm_pinctrl_desc.name = dev_name(&pdev->dev);
- msm_pinctrl_desc.pins = pctrl->soc->pins;
- msm_pinctrl_desc.npins = pctrl->soc->npins;
- pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &msm_pinctrl_desc,
- pctrl);
+ pctrl->desc.owner = THIS_MODULE;
+ pctrl->desc.pctlops = &msm_pinctrl_ops;
+ pctrl->desc.pmxops = &msm_pinmux_ops;
+ pctrl->desc.confops = &msm_pinconf_ops;
+ pctrl->desc.name = dev_name(&pdev->dev);
+ pctrl->desc.pins = pctrl->soc->pins;
+ pctrl->desc.npins = pctrl->soc->npins;
+
+ pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &pctrl->desc, pctrl);
if (IS_ERR(pctrl->pctrl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pctrl->pctrl);
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
index bb3ce5c3e18b7..1dfbe42dd8956 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -30,9 +30,7 @@
#include "pinctrl-msm.h"
-static struct msm_pinctrl_soc_data qdf2xxx_pinctrl;
-
-/* A reasonable limit to the number of GPIOS */
+/* A maximum of 256 allows us to use a u8 array to hold the GPIO numbers */
#define MAX_GPIOS 256
/* maximum size of each gpio name (enough room for "gpioXXX" + null) */
@@ -40,77 +38,111 @@ static struct msm_pinctrl_soc_data qdf2xxx_pinctrl;
static int qdf2xxx_pinctrl_probe(struct platform_device *pdev)
{
+ struct msm_pinctrl_soc_data *pinctrl;
struct pinctrl_pin_desc *pins;
struct msm_pingroup *groups;
char (*names)[NAME_SIZE];
unsigned int i;
u32 num_gpios;
+ unsigned int avail_gpios; /* The number of GPIOs we support */
+ u8 gpios[MAX_GPIOS]; /* An array of supported GPIOs */
int ret;
/* Query the number of GPIOs from ACPI */
ret = device_property_read_u32(&pdev->dev, "num-gpios", &num_gpios);
if (ret < 0) {
- dev_warn(&pdev->dev, "missing num-gpios property\n");
+ dev_err(&pdev->dev, "missing 'num-gpios' property\n");
return ret;
}
-
if (!num_gpios || num_gpios > MAX_GPIOS) {
- dev_warn(&pdev->dev, "invalid num-gpios property\n");
+ dev_err(&pdev->dev, "invalid 'num-gpios' property\n");
+ return -ENODEV;
+ }
+
+ /* The number of GPIOs in the approved list */
+ ret = device_property_read_u8_array(&pdev->dev, "gpios", NULL, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "missing 'gpios' property\n");
+ return ret;
+ }
+ /*
+ * The number of available GPIOs should be non-zero, and no
+ * more than the total number of GPIOS.
+ */
+ if (!ret || ret > num_gpios) {
+ dev_err(&pdev->dev, "invalid 'gpios' property\n");
return -ENODEV;
}
+ avail_gpios = ret;
+ ret = device_property_read_u8_array(&pdev->dev, "gpios", gpios,
+ avail_gpios);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not read list of GPIOs\n");
+ return ret;
+ }
+
+ pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
pins = devm_kcalloc(&pdev->dev, num_gpios,
sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
groups = devm_kcalloc(&pdev->dev, num_gpios,
sizeof(struct msm_pingroup), GFP_KERNEL);
- names = devm_kcalloc(&pdev->dev, num_gpios, NAME_SIZE, GFP_KERNEL);
+ names = devm_kcalloc(&pdev->dev, avail_gpios, NAME_SIZE, GFP_KERNEL);
- if (!pins || !groups || !names)
+ if (!pinctrl || !pins || !groups || !names)
return -ENOMEM;
+ /*
+ * Initialize the array. GPIOs not listed in the 'gpios' array
+ * still need a number, but nothing else.
+ */
for (i = 0; i < num_gpios; i++) {
- snprintf(names[i], NAME_SIZE, "gpio%u", i);
-
pins[i].number = i;
- pins[i].name = names[i];
-
- groups[i].npins = 1;
- groups[i].name = names[i];
groups[i].pins = &pins[i].number;
+ }
- groups[i].ctl_reg = 0x10000 * i;
- groups[i].io_reg = 0x04 + 0x10000 * i;
- groups[i].intr_cfg_reg = 0x08 + 0x10000 * i;
- groups[i].intr_status_reg = 0x0c + 0x10000 * i;
- groups[i].intr_target_reg = 0x08 + 0x10000 * i;
-
- groups[i].mux_bit = 2;
- groups[i].pull_bit = 0;
- groups[i].drv_bit = 6;
- groups[i].oe_bit = 9;
- groups[i].in_bit = 0;
- groups[i].out_bit = 1;
- groups[i].intr_enable_bit = 0;
- groups[i].intr_status_bit = 0;
- groups[i].intr_target_bit = 5;
- groups[i].intr_target_kpss_val = 1;
- groups[i].intr_raw_status_bit = 4;
- groups[i].intr_polarity_bit = 1;
- groups[i].intr_detection_bit = 2;
- groups[i].intr_detection_width = 2;
+ /* Populate the entries that are meant to be exposed as GPIOs. */
+ for (i = 0; i < avail_gpios; i++) {
+ unsigned int gpio = gpios[i];
+
+ groups[gpio].npins = 1;
+ snprintf(names[i], NAME_SIZE, "gpio%u", gpio);
+ pins[gpio].name = names[i];
+ groups[gpio].name = names[i];
+
+ groups[gpio].ctl_reg = 0x10000 * gpio;
+ groups[gpio].io_reg = 0x04 + 0x10000 * gpio;
+ groups[gpio].intr_cfg_reg = 0x08 + 0x10000 * gpio;
+ groups[gpio].intr_status_reg = 0x0c + 0x10000 * gpio;
+ groups[gpio].intr_target_reg = 0x08 + 0x10000 * gpio;
+
+ groups[gpio].mux_bit = 2;
+ groups[gpio].pull_bit = 0;
+ groups[gpio].drv_bit = 6;
+ groups[gpio].oe_bit = 9;
+ groups[gpio].in_bit = 0;
+ groups[gpio].out_bit = 1;
+ groups[gpio].intr_enable_bit = 0;
+ groups[gpio].intr_status_bit = 0;
+ groups[gpio].intr_target_bit = 5;
+ groups[gpio].intr_target_kpss_val = 1;
+ groups[gpio].intr_raw_status_bit = 4;
+ groups[gpio].intr_polarity_bit = 1;
+ groups[gpio].intr_detection_bit = 2;
+ groups[gpio].intr_detection_width = 2;
}
- qdf2xxx_pinctrl.pins = pins;
- qdf2xxx_pinctrl.groups = groups;
- qdf2xxx_pinctrl.npins = num_gpios;
- qdf2xxx_pinctrl.ngroups = num_gpios;
- qdf2xxx_pinctrl.ngpios = num_gpios;
+ pinctrl->pins = pins;
+ pinctrl->groups = groups;
+ pinctrl->npins = num_gpios;
+ pinctrl->ngroups = num_gpios;
+ pinctrl->ngpios = num_gpios;
- return msm_pinctrl_probe(pdev, &qdf2xxx_pinctrl);
+ return msm_pinctrl_probe(pdev, pinctrl);
}
static const struct acpi_device_id qdf2xxx_acpi_ids[] = {
- {"QCOM8001"},
+ {"QCOM8002"},
{},
};
MODULE_DEVICE_TABLE(acpi, qdf2xxx_acpi_ids);
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 11b5eeb14c4a0..425fadd6c346b 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -8,26 +8,20 @@ config PINCTRL_SAMSUNG
select PINCONF
config PINCTRL_EXYNOS
- bool "Pinctrl driver data for Samsung EXYNOS SoCs other than 5440"
+ bool "Pinctrl driver data for Samsung EXYNOS SoCs"
depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_SAMSUNG
select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
config PINCTRL_EXYNOS_ARM
- bool "ARMv7-specific pinctrl driver data for Exynos (except Exynos5440)" if COMPILE_TEST
+ bool "ARMv7-specific pinctrl driver data for Exynos" if COMPILE_TEST
depends on PINCTRL_EXYNOS
config PINCTRL_EXYNOS_ARM64
bool "ARMv8-specific pinctrl driver data for Exynos" if COMPILE_TEST
depends on PINCTRL_EXYNOS
-config PINCTRL_EXYNOS5440
- bool "Samsung EXYNOS5440 SoC pinctrl driver"
- depends on SOC_EXYNOS5440
- select PINMUX
- select PINCONF
-
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
depends on ARCH_S3C24XX && OF
diff --git a/drivers/pinctrl/samsung/Makefile b/drivers/pinctrl/samsung/Makefile
index df426561d067c..ed951df6a1128 100644
--- a/drivers/pinctrl/samsung/Makefile
+++ b/drivers/pinctrl/samsung/Makefile
@@ -5,6 +5,5 @@ obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
obj-$(CONFIG_PINCTRL_EXYNOS_ARM) += pinctrl-exynos-arm.o
obj-$(CONFIG_PINCTRL_EXYNOS_ARM64) += pinctrl-exynos-arm64.o
-obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o
obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index 90c274490181a..d82820fc349a9 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -88,6 +88,7 @@ static const struct samsung_retention_data s5pv210_retention_data __initconst =
/* pin banks of s5pv210 pin-controller */
static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -105,12 +106,12 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40),
- EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44),
EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48),
EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c),
EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50),
EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54),
+ EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"),
EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"),
EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"),
@@ -147,6 +148,7 @@ static atomic_t exynos_shared_retention_refcnt;
/* pin banks of exynos3250 pin-controller 0 */
static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -158,6 +160,7 @@ static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst =
/* pin banks of exynos3250 pin-controller 1 */
static const struct samsung_pin_bank_data exynos3250_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpe0"),
EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpe1"),
EXYNOS_PIN_BANK_EINTN(3, 0x180, "gpe2"),
@@ -232,6 +235,7 @@ const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = {
/* pin banks of exynos4210 pin-controller 0 */
static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -252,6 +256,7 @@ static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst =
/* pin banks of exynos4210 pin-controller 1 */
static const struct samsung_pin_bank_data exynos4210_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpj0", 0x00),
EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpj1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
@@ -276,6 +281,7 @@ static const struct samsung_pin_bank_data exynos4210_pin_banks1[] __initconst =
/* pin banks of exynos4210 pin-controller 2 */
static const struct samsung_pin_bank_data exynos4210_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"),
};
@@ -346,6 +352,7 @@ const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = {
/* pin banks of exynos4x12 pin-controller 0 */
static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
@@ -363,6 +370,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst =
/* pin banks of exynos4x12 pin-controller 1 */
static const struct samsung_pin_bank_data exynos4x12_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
@@ -390,11 +398,13 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks1[] __initconst =
/* pin banks of exynos4x12 pin-controller 2 */
static const struct samsung_pin_bank_data exynos4x12_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
/* pin banks of exynos4x12 pin-controller 3 */
static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpv2", 0x08),
@@ -449,6 +459,7 @@ const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = {
/* pin banks of exynos5250 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -478,6 +489,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst =
/* pin banks of exynos5250 pin-controller 1 */
static const struct samsung_pin_bank_data exynos5250_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpf0", 0x08),
@@ -491,6 +503,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks1[] __initconst =
/* pin banks of exynos5250 pin-controller 2 */
static const struct samsung_pin_bank_data exynos5250_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
@@ -500,6 +513,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks2[] __initconst =
/* pin banks of exynos5250 pin-controller 3 */
static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
@@ -550,6 +564,7 @@ const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = {
/* pin banks of exynos5260 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -575,6 +590,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst =
/* pin banks of exynos5260 pin-controller 1 */
static const struct samsung_pin_bank_data exynos5260_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
@@ -584,6 +600,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks1[] __initconst =
/* pin banks of exynos5260 pin-controller 2 */
static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
@@ -619,6 +636,7 @@ const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = {
/* pin banks of exynos5410 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -630,7 +648,6 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20),
EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24),
EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28),
- EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c),
EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30),
EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34),
@@ -641,6 +658,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48),
EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c),
EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50),
+ EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"),
EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"),
EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"),
@@ -658,6 +676,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
/* pin banks of exynos5410 pin-controller 1 */
static const struct samsung_pin_bank_data exynos5410_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpj0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpj1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpj2", 0x08),
@@ -671,6 +690,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks1[] __initconst =
/* pin banks of exynos5410 pin-controller 2 */
static const struct samsung_pin_bank_data exynos5410_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
@@ -680,6 +700,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks2[] __initconst =
/* pin banks of exynos5410 pin-controller 3 */
static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
@@ -727,6 +748,7 @@ const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = {
/* pin banks of exynos5420 pin-controller 0 */
static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
@@ -736,6 +758,7 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst =
/* pin banks of exynos5420 pin-controller 1 */
static const struct samsung_pin_bank_data exynos5420_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpc0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc1", 0x04),
EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
@@ -753,6 +776,7 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks1[] __initconst =
/* pin banks of exynos5420 pin-controller 2 */
static const struct samsung_pin_bank_data exynos5420_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
EXYNOS_PIN_BANK_EINTG(6, 0x040, "gpf0", 0x08),
@@ -765,6 +789,7 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks2[] __initconst =
/* pin banks of exynos5420 pin-controller 3 */
static const struct samsung_pin_bank_data exynos5420_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
@@ -778,6 +803,7 @@ static const struct samsung_pin_bank_data exynos5420_pin_banks3[] __initconst =
/* pin banks of exynos5420 pin-controller 4 */
static const struct samsung_pin_bank_data exynos5420_pin_banks4[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 71c9d1d9f3451..b6e56422a700e 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -45,6 +45,7 @@ static atomic_t exynos_shared_retention_refcnt;
/* pin banks of exynos5433 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
@@ -58,27 +59,32 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst =
/* pin banks of exynos5433 pin-controller - AUD */
static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
/* pin banks of exynos5433 pin-controller - CPIF */
static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
};
/* pin banks of exynos5433 pin-controller - eSE */
static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
};
/* pin banks of exynos5433 pin-controller - FINGER */
static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
};
/* pin banks of exynos5433 pin-controller - FSYS */
static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
@@ -89,16 +95,19 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst =
/* pin banks of exynos5433 pin-controller - IMEM */
static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
};
/* pin banks of exynos5433 pin-controller - NFC */
static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos5433 pin-controller - PERIC */
static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
@@ -120,6 +129,7 @@ static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst =
/* pin banks of exynos5433 pin-controller - TOUCH */
static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
@@ -267,6 +277,7 @@ const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = {
/* pin banks of exynos7 pin-controller - ALIVE */
static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
@@ -275,6 +286,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
/* pin banks of exynos7 pin-controller - BUS0 */
static const struct samsung_pin_bank_data exynos7_pin_banks1[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc0", 0x04),
EXYNOS_PIN_BANK_EINTG(2, 0x040, "gpc1", 0x08),
@@ -294,31 +306,37 @@ static const struct samsung_pin_bank_data exynos7_pin_banks1[] __initconst = {
/* pin banks of exynos7 pin-controller - NFC */
static const struct samsung_pin_bank_data exynos7_pin_banks2[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
};
/* pin banks of exynos7 pin-controller - TOUCH */
static const struct samsung_pin_bank_data exynos7_pin_banks3[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
};
/* pin banks of exynos7 pin-controller - FF */
static const struct samsung_pin_bank_data exynos7_pin_banks4[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpg4", 0x00),
};
/* pin banks of exynos7 pin-controller - ESE */
static const struct samsung_pin_bank_data exynos7_pin_banks5[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpv7", 0x00),
};
/* pin banks of exynos7 pin-controller - FSYS0 */
static const struct samsung_pin_bank_data exynos7_pin_banks6[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpr4", 0x00),
};
/* pin banks of exynos7 pin-controller - FSYS1 */
static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpr0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpr1", 0x04),
EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr2", 0x08),
@@ -327,6 +345,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
/* pin banks of exynos7 pin-controller - BUS1 */
static const struct samsung_pin_bank_data exynos7_pin_banks8[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpf0", 0x00),
EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpf1", 0x04),
EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpf2", 0x08),
@@ -340,6 +359,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks8[] __initconst = {
};
static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = {
+ /* Must start with EINTG banks, ordered by EINT group number. */
EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index abd43aa7eb0d9..da1ec13697e77 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -99,7 +99,7 @@
#define EXYNOS5433_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
{ \
- .type = &exynos5433_bank_type_alive, \
+ .type = &exynos5433_bank_type_off, \
.pctl_offset = reg, \
.nr_pins = pins, \
.eint_type = EINT_TYPE_WKUP, \
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
deleted file mode 100644
index 3d8d5e8128398..0000000000000
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// pin-controller/pin-mux/pin-config/gpio-driver for Samsung's EXYNOS5440 SoC.
-//
-// Author: Thomas Abraham <thomas.ab@samsung.com>
-//
-// Copyright (c) 2012 Samsung Electronics Co., Ltd.
-// http://www.samsung.com
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/gpio/driver.h>
-#include <linux/device.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/of_irq.h>
-#include "../core.h"
-
-/* EXYNOS5440 GPIO and Pinctrl register offsets */
-#define GPIO_MUX 0x00
-#define GPIO_IE 0x04
-#define GPIO_INT 0x08
-#define GPIO_TYPE 0x0C
-#define GPIO_VAL 0x10
-#define GPIO_OE 0x14
-#define GPIO_IN 0x18
-#define GPIO_PE 0x1C
-#define GPIO_PS 0x20
-#define GPIO_SR 0x24
-#define GPIO_DS0 0x28
-#define GPIO_DS1 0x2C
-
-#define EXYNOS5440_MAX_PINS 23
-#define EXYNOS5440_MAX_GPIO_INT 8
-#define PIN_NAME_LENGTH 10
-
-#define GROUP_SUFFIX "-grp"
-#define FUNCTION_SUFFIX "-mux"
-
-/*
- * pin configuration type and its value are packed together into a 16-bits.
- * The upper 8-bits represent the configuration type and the lower 8-bits
- * hold the value of the configuration type.
- */
-#define PINCFG_TYPE_MASK 0xFF
-#define PINCFG_VALUE_SHIFT 8
-#define PINCFG_VALUE_MASK (0xFF << PINCFG_VALUE_SHIFT)
-#define PINCFG_PACK(type, value) (((value) << PINCFG_VALUE_SHIFT) | type)
-#define PINCFG_UNPACK_TYPE(cfg) ((cfg) & PINCFG_TYPE_MASK)
-#define PINCFG_UNPACK_VALUE(cfg) (((cfg) & PINCFG_VALUE_MASK) >> \
- PINCFG_VALUE_SHIFT)
-
-/**
- * enum pincfg_type - possible pin configuration types supported.
- * @PINCFG_TYPE_PUD: Pull up/down configuration.
- * @PINCFG_TYPE_DRV: Drive strength configuration.
- * @PINCFG_TYPE_SKEW_RATE: Skew rate configuration.
- * @PINCFG_TYPE_INPUT_TYPE: Pin input type configuration.
- */
-enum pincfg_type {
- PINCFG_TYPE_PUD,
- PINCFG_TYPE_DRV,
- PINCFG_TYPE_SKEW_RATE,
- PINCFG_TYPE_INPUT_TYPE
-};
-
-/**
- * struct exynos5440_pin_group: represent group of pins for pincfg setting.
- * @name: name of the pin group, used to lookup the group.
- * @pins: the pins included in this group.
- * @num_pins: number of pins included in this group.
- */
-struct exynos5440_pin_group {
- const char *name;
- const unsigned int *pins;
- u8 num_pins;
-};
-
-/**
- * struct exynos5440_pmx_func: represent a pin function.
- * @name: name of the pin function, used to lookup the function.
- * @groups: one or more names of pin groups that provide this function.
- * @num_groups: number of groups included in @groups.
- * @function: the function number to be programmed when selected.
- */
-struct exynos5440_pmx_func {
- const char *name;
- const char **groups;
- u8 num_groups;
- unsigned long function;
-};
-
-/**
- * struct exynos5440_pinctrl_priv_data: driver's private runtime data.
- * @reg_base: ioremapped based address of the register space.
- * @gc: gpio chip registered with gpiolib.
- * @pin_groups: list of pin groups parsed from device tree.
- * @nr_groups: number of pin groups available.
- * @pmx_functions: list of pin functions parsed from device tree.
- * @nr_functions: number of pin functions available.
- * @range: gpio range to register with pinctrl
- */
-struct exynos5440_pinctrl_priv_data {
- void __iomem *reg_base;
- struct gpio_chip *gc;
- struct irq_domain *irq_domain;
-
- const struct exynos5440_pin_group *pin_groups;
- unsigned int nr_groups;
- const struct exynos5440_pmx_func *pmx_functions;
- unsigned int nr_functions;
- struct pinctrl_gpio_range range;
-};
-
-/**
- * struct exynos5440_gpio_intr_data: private data for gpio interrupts.
- * @priv: driver's private runtime data.
- * @gpio_int: gpio interrupt number.
- */
-struct exynos5440_gpio_intr_data {
- struct exynos5440_pinctrl_priv_data *priv;
- unsigned int gpio_int;
-};
-
-/* list of all possible config options supported */
-static struct pin_config {
- char *prop_cfg;
- unsigned int cfg_type;
-} pcfgs[] = {
- { "samsung,exynos5440-pin-pud", PINCFG_TYPE_PUD },
- { "samsung,exynos5440-pin-drv", PINCFG_TYPE_DRV },
- { "samsung,exynos5440-pin-skew-rate", PINCFG_TYPE_SKEW_RATE },
- { "samsung,exynos5440-pin-input-type", PINCFG_TYPE_INPUT_TYPE },
-};
-
-/* check if the selector is a valid pin group selector */
-static int exynos5440_get_group_count(struct pinctrl_dev *pctldev)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- return priv->nr_groups;
-}
-
-/* return the name of the group selected by the group selector */
-static const char *exynos5440_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- return priv->pin_groups[selector].name;
-}
-
-/* return the pin numbers associated with the specified group */
-static int exynos5440_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned selector, const unsigned **pins, unsigned *num_pins)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- *pins = priv->pin_groups[selector].pins;
- *num_pins = priv->pin_groups[selector].num_pins;
- return 0;
-}
-
-/* create pinctrl_map entries by parsing device tree nodes */
-static int exynos5440_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np, struct pinctrl_map **maps,
- unsigned *nmaps)
-{
- struct device *dev = pctldev->dev;
- struct pinctrl_map *map;
- unsigned long *cfg = NULL;
- char *gname, *fname;
- int cfg_cnt = 0, map_cnt = 0, idx = 0;
-
- /* count the number of config options specfied in the node */
- for (idx = 0; idx < ARRAY_SIZE(pcfgs); idx++)
- if (of_find_property(np, pcfgs[idx].prop_cfg, NULL))
- cfg_cnt++;
-
- /*
- * Find out the number of map entries to create. All the config options
- * can be accomadated into a single config map entry.
- */
- if (cfg_cnt)
- map_cnt = 1;
- if (of_find_property(np, "samsung,exynos5440-pin-function", NULL))
- map_cnt++;
- if (!map_cnt) {
- dev_err(dev, "node %s does not have either config or function "
- "configurations\n", np->name);
- return -EINVAL;
- }
-
- /* Allocate memory for pin-map entries */
- map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL);
- if (!map)
- return -ENOMEM;
- *nmaps = 0;
-
- /*
- * Allocate memory for pin group name. The pin group name is derived
- * from the node name from which these map entries are be created.
- */
- gname = kasprintf(GFP_KERNEL, "%s%s", np->name, GROUP_SUFFIX);
- if (!gname)
- goto free_map;
-
- /*
- * don't have config options? then skip over to creating function
- * map entries.
- */
- if (!cfg_cnt)
- goto skip_cfgs;
-
- /* Allocate memory for config entries */
- cfg = kzalloc(sizeof(*cfg) * cfg_cnt, GFP_KERNEL);
- if (!cfg)
- goto free_gname;
-
- /* Prepare a list of config settings */
- for (idx = 0, cfg_cnt = 0; idx < ARRAY_SIZE(pcfgs); idx++) {
- u32 value;
- if (!of_property_read_u32(np, pcfgs[idx].prop_cfg, &value))
- cfg[cfg_cnt++] =
- PINCFG_PACK(pcfgs[idx].cfg_type, value);
- }
-
- /* create the config map entry */
- map[*nmaps].data.configs.group_or_pin = gname;
- map[*nmaps].data.configs.configs = cfg;
- map[*nmaps].data.configs.num_configs = cfg_cnt;
- map[*nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
- *nmaps += 1;
-
-skip_cfgs:
- /* create the function map entry */
- if (of_find_property(np, "samsung,exynos5440-pin-function", NULL)) {
- fname = kasprintf(GFP_KERNEL,
- "%s%s", np->name, FUNCTION_SUFFIX);
- if (!fname)
- goto free_cfg;
-
- map[*nmaps].data.mux.group = gname;
- map[*nmaps].data.mux.function = fname;
- map[*nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
- *nmaps += 1;
- }
-
- *maps = map;
- return 0;
-
-free_cfg:
- kfree(cfg);
-free_gname:
- kfree(gname);
-free_map:
- kfree(map);
- return -ENOMEM;
-}
-
-/* free the memory allocated to hold the pin-map table */
-static void exynos5440_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
-{
- int idx;
-
- for (idx = 0; idx < num_maps; idx++) {
- if (map[idx].type == PIN_MAP_TYPE_MUX_GROUP) {
- kfree(map[idx].data.mux.function);
- if (!idx)
- kfree(map[idx].data.mux.group);
- } else if (map->type == PIN_MAP_TYPE_CONFIGS_GROUP) {
- kfree(map[idx].data.configs.configs);
- if (!idx)
- kfree(map[idx].data.configs.group_or_pin);
- }
- }
-
- kfree(map);
-}
-
-/* list of pinctrl callbacks for the pinctrl core */
-static const struct pinctrl_ops exynos5440_pctrl_ops = {
- .get_groups_count = exynos5440_get_group_count,
- .get_group_name = exynos5440_get_group_name,
- .get_group_pins = exynos5440_get_group_pins,
- .dt_node_to_map = exynos5440_dt_node_to_map,
- .dt_free_map = exynos5440_dt_free_map,
-};
-
-/* check if the selector is a valid pin function selector */
-static int exynos5440_get_functions_count(struct pinctrl_dev *pctldev)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- return priv->nr_functions;
-}
-
-/* return the name of the pin function specified */
-static const char *exynos5440_pinmux_get_fname(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- return priv->pmx_functions[selector].name;
-}
-
-/* return the groups associated for the specified function selector */
-static int exynos5440_pinmux_get_groups(struct pinctrl_dev *pctldev,
- unsigned selector, const char * const **groups,
- unsigned * const num_groups)
-{
- struct exynos5440_pinctrl_priv_data *priv;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- *groups = priv->pmx_functions[selector].groups;
- *num_groups = priv->pmx_functions[selector].num_groups;
- return 0;
-}
-
-/* enable or disable a pinmux function */
-static void exynos5440_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group, bool enable)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- void __iomem *base;
- u32 function;
- u32 data;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- base = priv->reg_base;
- function = priv->pmx_functions[selector].function;
-
- data = readl(base + GPIO_MUX);
- if (enable)
- data |= (1 << function);
- else
- data &= ~(1 << function);
- writel(data, base + GPIO_MUX);
-}
-
-/* enable a specified pinmux by writing to registers */
-static int exynos5440_pinmux_set_mux(struct pinctrl_dev *pctldev,
- unsigned selector,
- unsigned group)
-{
- exynos5440_pinmux_setup(pctldev, selector, group, true);
- return 0;
-}
-
-/*
- * The calls to gpio_direction_output() and gpio_direction_input()
- * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
- * function called from the gpiolib interface).
- */
-static int exynos5440_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range, unsigned offset, bool input)
-{
- return 0;
-}
-
-/* list of pinmux callbacks for the pinmux vertical in pinctrl core */
-static const struct pinmux_ops exynos5440_pinmux_ops = {
- .get_functions_count = exynos5440_get_functions_count,
- .get_function_name = exynos5440_pinmux_get_fname,
- .get_function_groups = exynos5440_pinmux_get_groups,
- .set_mux = exynos5440_pinmux_set_mux,
- .gpio_set_direction = exynos5440_pinmux_gpio_set_direction,
-};
-
-/* set the pin config settings for a specified pin */
-static int exynos5440_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *configs,
- unsigned num_configs)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- void __iomem *base;
- enum pincfg_type cfg_type;
- u32 cfg_value;
- u32 data;
- int i;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- base = priv->reg_base;
-
- for (i = 0; i < num_configs; i++) {
- cfg_type = PINCFG_UNPACK_TYPE(configs[i]);
- cfg_value = PINCFG_UNPACK_VALUE(configs[i]);
-
- switch (cfg_type) {
- case PINCFG_TYPE_PUD:
- /* first set pull enable/disable bit */
- data = readl(base + GPIO_PE);
- data &= ~(1 << pin);
- if (cfg_value)
- data |= (1 << pin);
- writel(data, base + GPIO_PE);
-
- /* then set pull up/down bit */
- data = readl(base + GPIO_PS);
- data &= ~(1 << pin);
- if (cfg_value == 2)
- data |= (1 << pin);
- writel(data, base + GPIO_PS);
- break;
-
- case PINCFG_TYPE_DRV:
- /* set the first bit of the drive strength */
- data = readl(base + GPIO_DS0);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_DS0);
- cfg_value >>= 1;
-
- /* set the second bit of the driver strength */
- data = readl(base + GPIO_DS1);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_DS1);
- break;
- case PINCFG_TYPE_SKEW_RATE:
- data = readl(base + GPIO_SR);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_SR);
- break;
- case PINCFG_TYPE_INPUT_TYPE:
- data = readl(base + GPIO_TYPE);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_TYPE);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
- } /* for each config */
-
- return 0;
-}
-
-/* get the pin config settings for a specified pin */
-static int exynos5440_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *config)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- void __iomem *base;
- enum pincfg_type cfg_type = PINCFG_UNPACK_TYPE(*config);
- u32 data;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- base = priv->reg_base;
-
- switch (cfg_type) {
- case PINCFG_TYPE_PUD:
- data = readl(base + GPIO_PE);
- data = (data >> pin) & 1;
- if (!data)
- *config = 0;
- else
- *config = ((readl(base + GPIO_PS) >> pin) & 1) + 1;
- break;
- case PINCFG_TYPE_DRV:
- data = readl(base + GPIO_DS0);
- data = (data >> pin) & 1;
- *config = data;
- data = readl(base + GPIO_DS1);
- data = (data >> pin) & 1;
- *config |= (data << 1);
- break;
- case PINCFG_TYPE_SKEW_RATE:
- data = readl(base + GPIO_SR);
- *config = (data >> pin) & 1;
- break;
- case PINCFG_TYPE_INPUT_TYPE:
- data = readl(base + GPIO_TYPE);
- *config = (data >> pin) & 1;
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* set the pin config settings for a specified pin group */
-static int exynos5440_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long *configs,
- unsigned num_configs)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- const unsigned int *pins;
- unsigned int cnt;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- pins = priv->pin_groups[group].pins;
-
- for (cnt = 0; cnt < priv->pin_groups[group].num_pins; cnt++)
- exynos5440_pinconf_set(pctldev, pins[cnt], configs,
- num_configs);
-
- return 0;
-}
-
-/* get the pin config settings for a specified pin group */
-static int exynos5440_pinconf_group_get(struct pinctrl_dev *pctldev,
- unsigned int group, unsigned long *config)
-{
- struct exynos5440_pinctrl_priv_data *priv;
- const unsigned int *pins;
-
- priv = pinctrl_dev_get_drvdata(pctldev);
- pins = priv->pin_groups[group].pins;
- exynos5440_pinconf_get(pctldev, pins[0], config);
- return 0;
-}
-
-/* list of pinconfig callbacks for pinconfig vertical in the pinctrl code */
-static const struct pinconf_ops exynos5440_pinconf_ops = {
- .pin_config_get = exynos5440_pinconf_get,
- .pin_config_set = exynos5440_pinconf_set,
- .pin_config_group_get = exynos5440_pinconf_group_get,
- .pin_config_group_set = exynos5440_pinconf_group_set,
-};
-
-/* gpiolib gpio_set callback function */
-static void exynos5440_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- void __iomem *base = priv->reg_base;
- u32 data;
-
- data = readl(base + GPIO_VAL);
- data &= ~(1 << offset);
- if (value)
- data |= 1 << offset;
- writel(data, base + GPIO_VAL);
-}
-
-/* gpiolib gpio_get callback function */
-static int exynos5440_gpio_get(struct gpio_chip *gc, unsigned offset)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- void __iomem *base = priv->reg_base;
- u32 data;
-
- data = readl(base + GPIO_IN);
- data >>= offset;
- data &= 1;
- return data;
-}
-
-/* gpiolib gpio_direction_input callback function */
-static int exynos5440_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- void __iomem *base = priv->reg_base;
- u32 data;
-
- /* first disable the data output enable on this pin */
- data = readl(base + GPIO_OE);
- data &= ~(1 << offset);
- writel(data, base + GPIO_OE);
-
- /* now enable input on this pin */
- data = readl(base + GPIO_IE);
- data |= 1 << offset;
- writel(data, base + GPIO_IE);
- return 0;
-}
-
-/* gpiolib gpio_direction_output callback function */
-static int exynos5440_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
- int value)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- void __iomem *base = priv->reg_base;
- u32 data;
-
- exynos5440_gpio_set(gc, offset, value);
-
- /* first disable the data input enable on this pin */
- data = readl(base + GPIO_IE);
- data &= ~(1 << offset);
- writel(data, base + GPIO_IE);
-
- /* now enable output on this pin */
- data = readl(base + GPIO_OE);
- data |= 1 << offset;
- writel(data, base + GPIO_OE);
- return 0;
-}
-
-/* gpiolib gpio_to_irq callback function */
-static int exynos5440_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
-{
- struct exynos5440_pinctrl_priv_data *priv = gpiochip_get_data(gc);
- unsigned int virq;
-
- if (offset < 16 || offset > 23)
- return -ENXIO;
-
- if (!priv->irq_domain)
- return -ENXIO;
-
- virq = irq_create_mapping(priv->irq_domain, offset - 16);
- return virq ? : -ENXIO;
-}
-
-/* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */
-static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
- struct device_node *cfg_np, unsigned int **pin_list,
- unsigned int *npins)
-{
- struct device *dev = &pdev->dev;
- struct property *prop;
-
- prop = of_find_property(cfg_np, "samsung,exynos5440-pins", NULL);
- if (!prop)
- return -ENOENT;
-
- *npins = prop->length / sizeof(unsigned long);
- if (!*npins) {
- dev_err(dev, "invalid pin list in %s node", cfg_np->name);
- return -EINVAL;
- }
-
- *pin_list = devm_kzalloc(dev, *npins * sizeof(**pin_list), GFP_KERNEL);
- if (!*pin_list)
- return -ENOMEM;
-
- return of_property_read_u32_array(cfg_np, "samsung,exynos5440-pins",
- *pin_list, *npins);
-}
-
-/*
- * Parse the information about all the available pin groups and pin functions
- * from device node of the pin-controller.
- */
-static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- struct device *dev = &pdev->dev;
- struct device_node *dev_np = dev->of_node;
- struct device_node *cfg_np;
- struct exynos5440_pin_group *groups, *grp;
- struct exynos5440_pmx_func *functions, *func;
- unsigned *pin_list;
- unsigned int npins, grp_cnt, func_idx = 0;
- char *gname, *fname;
- int ret;
-
- grp_cnt = of_get_child_count(dev_np);
- if (!grp_cnt)
- return -EINVAL;
-
- groups = devm_kzalloc(dev, grp_cnt * sizeof(*groups), GFP_KERNEL);
- if (!groups)
- return -EINVAL;
-
- grp = groups;
-
- functions = devm_kzalloc(dev, grp_cnt * sizeof(*functions), GFP_KERNEL);
- if (!functions)
- return -EINVAL;
-
- func = functions;
-
- /*
- * Iterate over all the child nodes of the pin controller node
- * and create pin groups and pin function lists.
- */
- for_each_child_of_node(dev_np, cfg_np) {
- u32 function;
-
- ret = exynos5440_pinctrl_parse_dt_pins(pdev, cfg_np,
- &pin_list, &npins);
- if (ret) {
- gname = NULL;
- goto skip_to_pin_function;
- }
-
- /* derive pin group name from the node name */
- gname = devm_kasprintf(dev, GFP_KERNEL,
- "%s%s", cfg_np->name, GROUP_SUFFIX);
- if (!gname)
- return -ENOMEM;
-
- grp->name = gname;
- grp->pins = pin_list;
- grp->num_pins = npins;
- grp++;
-
-skip_to_pin_function:
- ret = of_property_read_u32(cfg_np, "samsung,exynos5440-pin-function",
- &function);
- if (ret)
- continue;
-
- /* derive function name from the node name */
- fname = devm_kasprintf(dev, GFP_KERNEL,
- "%s%s", cfg_np->name, FUNCTION_SUFFIX);
- if (!fname)
- return -ENOMEM;
-
- func->name = fname;
- func->groups = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
- if (!func->groups)
- return -ENOMEM;
- func->groups[0] = gname;
- func->num_groups = gname ? 1 : 0;
- func->function = function;
- func++;
- func_idx++;
- }
-
- priv->pin_groups = groups;
- priv->nr_groups = grp_cnt;
- priv->pmx_functions = functions;
- priv->nr_functions = func_idx;
- return 0;
-}
-
-/* register the pinctrl interface with the pinctrl subsystem */
-static int exynos5440_pinctrl_register(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- struct device *dev = &pdev->dev;
- struct pinctrl_desc *ctrldesc;
- struct pinctrl_dev *pctl_dev;
- struct pinctrl_pin_desc *pindesc, *pdesc;
- char *pin_names;
- int pin, ret;
-
- ctrldesc = devm_kzalloc(dev, sizeof(*ctrldesc), GFP_KERNEL);
- if (!ctrldesc)
- return -ENOMEM;
-
- ctrldesc->name = "exynos5440-pinctrl";
- ctrldesc->owner = THIS_MODULE;
- ctrldesc->pctlops = &exynos5440_pctrl_ops;
- ctrldesc->pmxops = &exynos5440_pinmux_ops;
- ctrldesc->confops = &exynos5440_pinconf_ops;
-
- pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
- EXYNOS5440_MAX_PINS, GFP_KERNEL);
- if (!pindesc)
- return -ENOMEM;
- ctrldesc->pins = pindesc;
- ctrldesc->npins = EXYNOS5440_MAX_PINS;
-
- /* dynamically populate the pin number and pin name for pindesc */
- for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
- pdesc->number = pin;
-
- /*
- * allocate space for storing the dynamically generated names for all
- * the pins which belong to this pin-controller.
- */
- pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
- ctrldesc->npins, GFP_KERNEL);
- if (!pin_names)
- return -ENOMEM;
-
- /* for each pin, set the name of the pin */
- for (pin = 0; pin < ctrldesc->npins; pin++) {
- snprintf(pin_names, 6, "gpio%02d", pin);
- pdesc = pindesc + pin;
- pdesc->name = pin_names;
- pin_names += PIN_NAME_LENGTH;
- }
-
- ret = exynos5440_pinctrl_parse_dt(pdev, priv);
- if (ret)
- return ret;
-
- pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, priv);
- if (IS_ERR(pctl_dev)) {
- dev_err(&pdev->dev, "could not register pinctrl driver\n");
- return PTR_ERR(pctl_dev);
- }
-
- priv->range.name = "exynos5440-pctrl-gpio-range";
- priv->range.id = 0;
- priv->range.base = 0;
- priv->range.npins = EXYNOS5440_MAX_PINS;
- priv->range.gc = priv->gc;
- pinctrl_add_gpio_range(pctl_dev, &priv->range);
- return 0;
-}
-
-/* register the gpiolib interface with the gpiolib subsystem */
-static int exynos5440_gpiolib_register(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- struct gpio_chip *gc;
- int ret;
-
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
- return -ENOMEM;
-
- priv->gc = gc;
- gc->base = 0;
- gc->ngpio = EXYNOS5440_MAX_PINS;
- gc->parent = &pdev->dev;
- gc->set = exynos5440_gpio_set;
- gc->get = exynos5440_gpio_get;
- gc->direction_input = exynos5440_gpio_direction_input;
- gc->direction_output = exynos5440_gpio_direction_output;
- gc->to_irq = exynos5440_gpio_to_irq;
- gc->label = "gpiolib-exynos5440";
- gc->owner = THIS_MODULE;
- ret = gpiochip_add_data(gc, priv);
- if (ret) {
- dev_err(&pdev->dev, "failed to register gpio_chip %s, error "
- "code: %d\n", gc->label, ret);
- return ret;
- }
-
- return 0;
-}
-
-/* unregister the gpiolib interface with the gpiolib subsystem */
-static int exynos5440_gpiolib_unregister(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- gpiochip_remove(priv->gc);
- return 0;
-}
-
-static void exynos5440_gpio_irq_unmask(struct irq_data *irqd)
-{
- struct exynos5440_pinctrl_priv_data *d;
- unsigned long gpio_int;
-
- d = irq_data_get_irq_chip_data(irqd);
- gpio_int = readl(d->reg_base + GPIO_INT);
- gpio_int |= 1 << irqd->hwirq;
- writel(gpio_int, d->reg_base + GPIO_INT);
-}
-
-static void exynos5440_gpio_irq_mask(struct irq_data *irqd)
-{
- struct exynos5440_pinctrl_priv_data *d;
- unsigned long gpio_int;
-
- d = irq_data_get_irq_chip_data(irqd);
- gpio_int = readl(d->reg_base + GPIO_INT);
- gpio_int &= ~(1 << irqd->hwirq);
- writel(gpio_int, d->reg_base + GPIO_INT);
-}
-
-/* irq_chip for gpio interrupts */
-static struct irq_chip exynos5440_gpio_irq_chip = {
- .name = "exynos5440_gpio_irq_chip",
- .irq_unmask = exynos5440_gpio_irq_unmask,
- .irq_mask = exynos5440_gpio_irq_mask,
-};
-
-/* interrupt handler for GPIO interrupts 0..7 */
-static irqreturn_t exynos5440_gpio_irq(int irq, void *data)
-{
- struct exynos5440_gpio_intr_data *intd = data;
- struct exynos5440_pinctrl_priv_data *d = intd->priv;
- int virq;
-
- virq = irq_linear_revmap(d->irq_domain, intd->gpio_int);
- if (!virq)
- return IRQ_NONE;
- generic_handle_irq(virq);
- return IRQ_HANDLED;
-}
-
-static int exynos5440_gpio_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct exynos5440_pinctrl_priv_data *d = h->host_data;
-
- irq_set_chip_data(virq, d);
- irq_set_chip_and_handler(virq, &exynos5440_gpio_irq_chip,
- handle_level_irq);
- return 0;
-}
-
-/* irq domain callbacks for gpio interrupt controller */
-static const struct irq_domain_ops exynos5440_gpio_irqd_ops = {
- .map = exynos5440_gpio_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-/* setup handling of gpio interrupts */
-static int exynos5440_gpio_irq_init(struct platform_device *pdev,
- struct exynos5440_pinctrl_priv_data *priv)
-{
- struct device *dev = &pdev->dev;
- struct exynos5440_gpio_intr_data *intd;
- int i, irq, ret;
-
- intd = devm_kzalloc(dev, sizeof(*intd) * EXYNOS5440_MAX_GPIO_INT,
- GFP_KERNEL);
- if (!intd)
- return -ENOMEM;
-
- for (i = 0; i < EXYNOS5440_MAX_GPIO_INT; i++) {
- irq = irq_of_parse_and_map(dev->of_node, i);
- if (irq <= 0) {
- dev_err(dev, "irq parsing failed\n");
- return -EINVAL;
- }
-
- intd->gpio_int = i;
- intd->priv = priv;
- ret = devm_request_irq(dev, irq, exynos5440_gpio_irq,
- 0, dev_name(dev), intd++);
- if (ret) {
- dev_err(dev, "irq request failed\n");
- return -ENXIO;
- }
- }
-
- priv->irq_domain = irq_domain_add_linear(dev->of_node,
- EXYNOS5440_MAX_GPIO_INT,
- &exynos5440_gpio_irqd_ops, priv);
- if (!priv->irq_domain) {
- dev_err(dev, "failed to create irq domain\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
-static int exynos5440_pinctrl_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct exynos5440_pinctrl_priv_data *priv;
- struct resource *res;
- int ret;
-
- if (!dev->of_node) {
- dev_err(dev, "device tree node not found\n");
- return -ENODEV;
- }
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->reg_base))
- return PTR_ERR(priv->reg_base);
-
- ret = exynos5440_gpiolib_register(pdev, priv);
- if (ret)
- return ret;
-
- ret = exynos5440_pinctrl_register(pdev, priv);
- if (ret) {
- exynos5440_gpiolib_unregister(pdev, priv);
- return ret;
- }
-
- ret = exynos5440_gpio_irq_init(pdev, priv);
- if (ret) {
- dev_err(dev, "failed to setup gpio interrupts\n");
- return ret;
- }
-
- platform_set_drvdata(pdev, priv);
- dev_info(dev, "EXYNOS5440 pinctrl driver registered\n");
- return 0;
-}
-
-static const struct of_device_id exynos5440_pinctrl_dt_match[] = {
- { .compatible = "samsung,exynos5440-pinctrl" },
- {},
-};
-
-static struct platform_driver exynos5440_pinctrl_driver = {
- .probe = exynos5440_pinctrl_probe,
- .driver = {
- .name = "exynos5440-pinctrl",
- .of_match_table = exynos5440_pinctrl_dt_match,
- .suppress_bind_attrs = true,
- },
-};
-
-static int __init exynos5440_pinctrl_drv_register(void)
-{
- return platform_driver_register(&exynos5440_pinctrl_driver);
-}
-postcore_initcall(exynos5440_pinctrl_drv_register);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 336e88d7bdb9b..618945a0fd380 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -279,6 +279,32 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+/* Forward declaration which can be used by samsung_pin_dbg_show */
+static int samsung_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config);
+static const char * const reg_names[] = {"CON", "DAT", "PUD", "DRV", "CON_PDN",
+ "PUD_PDN"};
+
+static void samsung_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int pin)
+{
+ enum pincfg_type cfg_type;
+ unsigned long config;
+ int ret;
+
+ for (cfg_type = 0; cfg_type < PINCFG_TYPE_NUM; cfg_type++) {
+ config = PINCFG_PACK(cfg_type, 0);
+ ret = samsung_pinconf_get(pctldev, pin, &config);
+ if (ret < 0)
+ continue;
+
+ seq_printf(s, " %s(0x%lx)", reg_names[cfg_type],
+ PINCFG_UNPACK_VALUE(config));
+ }
+}
+#endif
+
/* list of pinctrl callbacks for the pinctrl core */
static const struct pinctrl_ops samsung_pctrl_ops = {
.get_groups_count = samsung_get_group_count,
@@ -286,6 +312,9 @@ static const struct pinctrl_ops samsung_pctrl_ops = {
.get_group_pins = samsung_get_group_pins,
.dt_node_to_map = samsung_dt_node_to_map,
.dt_free_map = samsung_dt_free_map,
+#ifdef CONFIG_DEBUG_FS
+ .pin_dbg_show = samsung_pin_dbg_show,
+#endif
};
/* check if the selector is a valid pin function selector */
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index c11b789ec5831..43d950c165289 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -44,6 +44,11 @@ config PINCTRL_PFC_R8A7745
depends on ARCH_R8A7745
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A77470
+ def_bool y
+ depends on ARCH_R8A77470
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7778
def_bool y
depends on ARCH_R8A7778
@@ -104,6 +109,11 @@ config PINCTRL_PFC_R8A77980
depends on ARCH_R8A77980
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A77990
+ def_bool y
+ depends on ARCH_R8A77990
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A77995
def_bool y
depends on ARCH_R8A77995
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 463775f28cf10..d0b29c51c1597 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
obj-$(CONFIG_PINCTRL_PFC_R8A7743) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77470) += pfc-r8a77470.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
obj-$(CONFIG_PINCTRL_PFC_R8A7790) += pfc-r8a7790.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7796) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A77970) += pfc-r8a77970.o
obj-$(CONFIG_PINCTRL_PFC_R8A77980) += pfc-r8a77980.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77990) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A77995) += pfc-r8a77995.o
obj-$(CONFIG_PINCTRL_PFC_SH7203) += pfc-sh7203.o
obj-$(CONFIG_PINCTRL_PFC_SH7264) += pfc-sh7264.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 74861b7b5b0d7..eb06981538b42 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -503,6 +503,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7745_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77470
+ {
+ .compatible = "renesas,pfc-r8a77470",
+ .data = &r8a77470_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7778
{
.compatible = "renesas,pfc-r8a7778",
@@ -575,6 +581,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a77980_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
+ {
+ .compatible = "renesas,pfc-r8a77990",
+ .data = &r8a77990_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77995
{
.compatible = "renesas,pfc-r8a77995",
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77470.c b/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
new file mode 100644
index 0000000000000..9d3ed438ec7b1
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
@@ -0,0 +1,2343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A77470 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+
+#include "sh_pfc.h"
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_23(0, fn, sfx), \
+ PORT_GP_23(1, fn, sfx), \
+ PORT_GP_32(2, fn, sfx), \
+ PORT_GP_17(3, fn, sfx), \
+ PORT_GP_1(3, 27, fn, sfx), \
+ PORT_GP_1(3, 28, fn, sfx), \
+ PORT_GP_1(3, 29, fn, sfx), \
+ PORT_GP_26(4, fn, sfx), \
+ PORT_GP_32(5, fn, sfx)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+
+ /* GPSR0 */
+ FN_USB0_PWEN, FN_USB0_OVC, FN_USB1_PWEN, FN_USB1_OVC, FN_CLKOUT,
+ FN_IP0_3_0, FN_IP0_7_4, FN_IP0_11_8, FN_IP0_15_12, FN_IP0_19_16,
+ FN_IP0_23_20, FN_IP0_27_24, FN_IP0_31_28, FN_MMC0_CLK_SDHI1_CLK,
+ FN_MMC0_CMD_SDHI1_CMD, FN_MMC0_D0_SDHI1_D0, FN_MMC0_D1_SDHI1_D1,
+ FN_MMC0_D2_SDHI1_D2, FN_MMC0_D3_SDHI1_D3, FN_IP1_3_0,
+ FN_IP1_7_4, FN_MMC0_D6, FN_MMC0_D7,
+
+ /* GPSR1 */
+ FN_IP1_11_8, FN_IP1_15_12, FN_IP1_19_16, FN_IP1_23_20, FN_IP1_27_24,
+ FN_IP1_31_28, FN_IP2_3_0, FN_IP2_7_4, FN_IP2_11_8, FN_IP2_15_12,
+ FN_IP2_19_16, FN_IP2_23_20, FN_IP2_27_24, FN_IP2_31_28, FN_IP3_3_0,
+ FN_IP3_7_4, FN_IP3_11_8, FN_IP3_15_12, FN_IP3_19_16, FN_IP3_23_20,
+ FN_IP3_27_24, FN_IP3_31_28, FN_IP4_3_0,
+
+ /* GPSR2 */
+ FN_IP4_7_4, FN_IP4_11_8, FN_IP4_15_12, FN_IP4_19_16, FN_IP4_23_20,
+ FN_IP4_27_24, FN_IP4_31_28, FN_IP5_3_0, FN_IP5_7_4, FN_IP5_11_8,
+ FN_IP5_15_12, FN_IP5_19_16, FN_IP5_23_20, FN_IP5_27_24, FN_IP5_31_28,
+ FN_IP6_3_0, FN_IP6_7_4, FN_IP6_11_8, FN_IP6_15_12, FN_IP6_19_16,
+ FN_IP6_23_20, FN_IP6_27_24, FN_IP6_31_28, FN_IP7_3_0, FN_IP7_7_4,
+ FN_IP7_11_8, FN_IP7_15_12, FN_IP7_19_16, FN_IP7_23_20, FN_IP7_27_24,
+ FN_IP7_31_28, FN_IP8_3_0,
+
+ /* GPSR3 */
+ FN_IP8_7_4, FN_IP8_11_8, FN_IP8_15_12, FN_IP8_19_16, FN_IP8_23_20,
+ FN_IP8_27_24, FN_IP8_31_28, FN_IP9_3_0, FN_IP9_7_4, FN_IP9_11_8,
+ FN_IP9_15_12, FN_IP9_19_16, FN_IP9_23_20, FN_IP9_27_24, FN_IP9_31_28,
+ FN_IP10_3_0, FN_IP10_7_4, FN_IP10_11_8, FN_IP10_15_12, FN_IP10_19_16,
+
+ /* GPSR4 */
+ FN_IP10_23_20, FN_IP10_27_24, FN_IP10_31_28, FN_IP11_3_0, FN_IP11_7_4,
+ FN_IP11_11_8, FN_IP11_15_12, FN_IP11_19_16, FN_IP11_23_20,
+ FN_IP11_27_24, FN_IP11_31_28, FN_IP12_3_0, FN_IP12_7_4, FN_IP12_11_8,
+ FN_IP12_15_12, FN_IP12_19_16, FN_IP12_23_20, FN_IP12_27_24,
+ FN_IP12_31_28, FN_IP13_3_0, FN_IP13_7_4, FN_IP13_11_8, FN_IP13_15_12,
+ FN_IP13_19_16, FN_IP13_23_20, FN_IP13_27_24,
+
+ /* GPSR5 */
+ FN_IP13_31_28, FN_IP14_3_0, FN_IP14_7_4, FN_IP14_11_8, FN_IP14_15_12,
+ FN_IP14_19_16, FN_IP14_23_20, FN_IP14_27_24, FN_IP14_31_28,
+ FN_IP15_3_0, FN_IP15_7_4, FN_IP15_11_8, FN_IP15_15_12, FN_IP15_19_16,
+ FN_IP15_23_20, FN_IP15_27_24, FN_IP15_31_28, FN_IP16_3_0, FN_IP16_7_4,
+ FN_IP16_11_8, FN_IP16_15_12, FN_IP16_19_16, FN_IP16_23_20,
+ FN_IP16_27_24, FN_IP16_31_28, FN_IP17_3_0, FN_IP17_7_4, FN_IP17_11_8,
+ FN_IP17_15_12, FN_IP17_19_16, FN_IP17_23_20, FN_IP17_27_24,
+
+ /* IPSR0 */
+ FN_SD0_CLK, FN_SSI_SCK1_C, FN_RX3_C,
+ FN_SD0_CMD, FN_SSI_WS1_C, FN_TX3_C,
+ FN_SD0_DAT0, FN_SSI_SDATA1_C, FN_RX4_E,
+ FN_SD0_DAT1, FN_SSI_SCK0129_B, FN_TX4_E,
+ FN_SD0_DAT2, FN_SSI_WS0129_B, FN_RX5_E,
+ FN_SD0_DAT3, FN_SSI_SDATA0_B, FN_TX5_E,
+ FN_SD0_CD, FN_CAN0_RX_A,
+ FN_SD0_WP, FN_IRQ7, FN_CAN0_TX_A,
+
+ /* IPSR1 */
+ FN_MMC0_D4, FN_SD1_CD,
+ FN_MMC0_D5, FN_SD1_WP,
+ FN_D0, FN_SCL3_B, FN_RX5_B, FN_IRQ4, FN_MSIOF2_RXD_C, FN_SSI_SDATA5_B,
+ FN_D1, FN_SDA3_B, FN_TX5_B, FN_MSIOF2_TXD_C, FN_SSI_WS5_B,
+ FN_D2, FN_RX4_B, FN_SCL0_D, FN_PWM1_C, FN_MSIOF2_SCK_C, FN_SSI_SCK5_B,
+ FN_D3, FN_TX4_B, FN_SDA0_D, FN_PWM0_A, FN_MSIOF2_SYNC_C,
+ FN_D4, FN_IRQ3, FN_TCLK1_A, FN_PWM6_C,
+ FN_D5, FN_HRX2, FN_SCL1_B, FN_PWM2_C, FN_TCLK2_B,
+
+ /* IPSR2 */
+ FN_D6, FN_HTX2, FN_SDA1_B, FN_PWM4_C,
+ FN_D7, FN_HSCK2, FN_SCIF1_SCK_C, FN_IRQ6, FN_PWM5_C,
+ FN_D8, FN_HCTS2_N, FN_RX1_C, FN_SCL1_D, FN_PWM3_C,
+ FN_D9, FN_HRTS2_N, FN_TX1_C, FN_SDA1_D,
+ FN_D10, FN_MSIOF2_RXD_A, FN_HRX0_B,
+ FN_D11, FN_MSIOF2_TXD_A, FN_HTX0_B,
+ FN_D12, FN_MSIOF2_SCK_A, FN_HSCK0, FN_CAN_CLK_C,
+ FN_D13, FN_MSIOF2_SYNC_A, FN_RX4_C,
+
+ /* IPSR3 */
+ FN_D14, FN_MSIOF2_SS1, FN_TX4_C, FN_CAN1_RX_B, FN_AVB_AVTP_CAPTURE_A,
+ FN_D15, FN_MSIOF2_SS2, FN_PWM4_A, FN_CAN1_TX_B, FN_IRQ2, FN_AVB_AVTP_MATCH_A,
+ FN_QSPI0_SPCLK, FN_WE0_N,
+ FN_QSPI0_MOSI_QSPI0_IO0, FN_BS_N,
+ FN_QSPI0_MISO_QSPI0_IO1, FN_RD_WR_N,
+ FN_QSPI0_IO2, FN_CS0_N,
+ FN_QSPI0_IO3, FN_RD_N,
+ FN_QSPI0_SSL, FN_WE1_N,
+
+ /* IPSR4 */
+ FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK_A,
+ FN_DU0_DR0, FN_RX5_C, FN_SCL2_D, FN_A0,
+ FN_DU0_DR1, FN_TX5_C, FN_SDA2_D, FN_A1,
+ FN_DU0_DR2, FN_RX0_D, FN_SCL0_E, FN_A2,
+ FN_DU0_DR3, FN_TX0_D, FN_SDA0_E, FN_PWM0_B, FN_A3,
+ FN_DU0_DR4, FN_RX1_D, FN_A4,
+ FN_DU0_DR5, FN_TX1_D, FN_PWM1_B, FN_A5,
+ FN_DU0_DR6, FN_RX2_C, FN_A6,
+
+ /* IPSR5 */
+ FN_DU0_DR7, FN_TX2_C, FN_PWM2_B, FN_A7,
+ FN_DU0_DG0, FN_RX3_B, FN_SCL3_D, FN_A8,
+ FN_DU0_DG1, FN_TX3_B, FN_SDA3_D, FN_PWM3_B, FN_A9,
+ FN_DU0_DG2, FN_RX4_D, FN_A10,
+ FN_DU0_DG3, FN_TX4_D, FN_PWM4_B, FN_A11,
+ FN_DU0_DG4, FN_HRX0_A, FN_A12,
+ FN_DU0_DG5, FN_HTX0_A, FN_PWM5_B, FN_A13,
+ FN_DU0_DG6, FN_HRX1_C, FN_A14,
+
+ /* IPSR6 */
+ FN_DU0_DG7, FN_HTX1_C, FN_PWM6_B, FN_A15,
+ FN_DU0_DB0, FN_SCL4_D, FN_CAN0_RX_C, FN_A16,
+ FN_DU0_DB1, FN_SDA4_D, FN_CAN0_TX_C, FN_A17,
+ FN_DU0_DB2, FN_HCTS0_N, FN_A18,
+ FN_DU0_DB3, FN_HRTS0_N, FN_A19,
+ FN_DU0_DB4, FN_HCTS1_N_C, FN_A20,
+ FN_DU0_DB5, FN_HRTS1_N_C, FN_A21,
+ FN_DU0_DB6, FN_A22,
+
+ /* IPSR7 */
+ FN_DU0_DB7, FN_A23,
+ FN_DU0_DOTCLKIN, FN_A24,
+ FN_DU0_DOTCLKOUT0, FN_A25,
+ FN_DU0_DOTCLKOUT1, FN_MSIOF2_RXD_B, FN_CS1_N_A26,
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_MSIOF2_TXD_B, FN_DREQ0_N,
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_MSIOF2_SYNC_B, FN_DACK0,
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_MSIOF2_SCK_B, FN_DRACK0,
+ FN_DU0_DISP, FN_CAN1_RX_C,
+
+ /* IPSR8 */
+ FN_DU0_CDE, FN_CAN1_TX_C,
+ FN_VI1_CLK, FN_AVB_RX_CLK, FN_ETH_REF_CLK,
+ FN_VI1_DATA0, FN_AVB_RX_DV, FN_ETH_CRS_DV,
+ FN_VI1_DATA1, FN_AVB_RXD0, FN_ETH_RXD0,
+ FN_VI1_DATA2, FN_AVB_RXD1, FN_ETH_RXD1,
+ FN_VI1_DATA3, FN_AVB_RXD2, FN_ETH_MDIO,
+ FN_VI1_DATA4, FN_AVB_RXD3, FN_ETH_RX_ER,
+ FN_VI1_DATA5, FN_AVB_RXD4, FN_ETH_LINK,
+
+ /* IPSR9 */
+ FN_VI1_DATA6, FN_AVB_RXD5, FN_ETH_TXD1,
+ FN_VI1_DATA7, FN_AVB_RXD6, FN_ETH_TX_EN,
+ FN_VI1_CLKENB, FN_SCL3_A, FN_AVB_RXD7, FN_ETH_MAGIC,
+ FN_VI1_FIELD, FN_SDA3_A, FN_AVB_RX_ER, FN_ETH_TXD0,
+ FN_VI1_HSYNC_N, FN_RX0_B, FN_SCL0_C, FN_AVB_GTXREFCLK, FN_ETH_MDC,
+ FN_VI1_VSYNC_N, FN_TX0_B, FN_SDA0_C, FN_AUDIO_CLKOUT_B, FN_AVB_TX_CLK,
+ FN_VI1_DATA8, FN_SCL2_B, FN_AVB_TX_EN,
+ FN_VI1_DATA9, FN_SDA2_B, FN_AVB_TXD0,
+
+ /* IPSR10 */
+ FN_VI1_DATA10, FN_CAN0_RX_B, FN_AVB_TXD1,
+ FN_VI1_DATA11, FN_CAN0_TX_B, FN_AVB_TXD2,
+ FN_AVB_TXD3, FN_AUDIO_CLKA_B, FN_SSI_SCK1_D, FN_RX5_F, FN_MSIOF0_RXD_B,
+ FN_AVB_TXD4, FN_AUDIO_CLKB_B, FN_SSI_WS1_D, FN_TX5_F, FN_MSIOF0_TXD_B,
+ FN_AVB_TXD5, FN_SCIF_CLK_B, FN_AUDIO_CLKC_B, FN_SSI_SDATA1_D, FN_MSIOF0_SCK_B,
+ FN_SCL0_A, FN_RX0_C, FN_PWM5_A, FN_TCLK1_B, FN_AVB_TXD6, FN_CAN1_RX_D, FN_MSIOF0_SYNC_B,
+ FN_SDA0_A, FN_TX0_C, FN_IRQ5, FN_CAN_CLK_A, FN_AVB_GTX_CLK, FN_CAN1_TX_D, FN_DVC_MUTE,
+ FN_SCL1_A, FN_RX4_A, FN_PWM5_D, FN_DU1_DR0, FN_SSI_SCK6_B, FN_VI0_G0,
+
+ /* IPSR11 */
+ FN_SDA1_A, FN_TX4_A, FN_DU1_DR1, FN_SSI_WS6_B, FN_VI0_G1,
+ FN_MSIOF0_RXD_A, FN_RX5_A, FN_SCL2_C, FN_DU1_DR2, FN_QSPI1_MOSI_QSPI1_IO0, FN_SSI_SDATA6_B, FN_VI0_G2,
+ FN_MSIOF0_TXD_A, FN_TX5_A, FN_SDA2_C, FN_DU1_DR3, FN_QSPI1_MISO_QSPI1_IO1, FN_SSI_WS78_B, FN_VI0_G3,
+ FN_MSIOF0_SCK_A, FN_IRQ0, FN_DU1_DR4, FN_QSPI1_SPCLK, FN_SSI_SCK78_B, FN_VI0_G4,
+ FN_MSIOF0_SYNC_A, FN_PWM1_A, FN_DU1_DR5, FN_QSPI1_IO2, FN_SSI_SDATA7_B,
+ FN_MSIOF0_SS1_A, FN_DU1_DR6, FN_QSPI1_IO3, FN_SSI_SDATA8_B,
+ FN_MSIOF0_SS2_A, FN_DU1_DR7, FN_QSPI1_SSL,
+ FN_HRX1_A, FN_SCL4_A, FN_PWM6_A, FN_DU1_DG0, FN_RX0_A,
+
+ /* IPSR12 */
+ FN_HTX1_A, FN_SDA4_A, FN_DU1_DG1, FN_TX0_A,
+ FN_HCTS1_N_A, FN_PWM2_A, FN_DU1_DG2, FN_REMOCON_B,
+ FN_HRTS1_N_A, FN_DU1_DG3, FN_SSI_WS1_B, FN_IRQ1,
+ FN_SD2_CLK, FN_HSCK1, FN_DU1_DG4, FN_SSI_SCK1_B,
+ FN_SD2_CMD, FN_SCIF1_SCK_A, FN_TCLK2_A, FN_DU1_DG5, FN_SSI_SCK2_B, FN_PWM3_A,
+ FN_SD2_DAT0, FN_RX1_A, FN_SCL1_E, FN_DU1_DG6, FN_SSI_SDATA1_B,
+ FN_SD2_DAT1, FN_TX1_A, FN_SDA1_E, FN_DU1_DG7, FN_SSI_WS2_B,
+ FN_SD2_DAT2, FN_RX2_A, FN_DU1_DB0, FN_SSI_SDATA2_B,
+
+ /* IPSR13 */
+ FN_SD2_DAT3, FN_TX2_A, FN_DU1_DB1, FN_SSI_WS9_B,
+ FN_SD2_CD, FN_SCIF2_SCK_A, FN_DU1_DB2, FN_SSI_SCK9_B,
+ FN_SD2_WP, FN_SCIF3_SCK, FN_DU1_DB3, FN_SSI_SDATA9_B,
+ FN_RX3_A, FN_SCL1_C, FN_MSIOF1_RXD_B, FN_DU1_DB4, FN_AUDIO_CLKA_C, FN_SSI_SDATA4_B,
+ FN_TX3_A, FN_SDA1_C, FN_MSIOF1_TXD_B, FN_DU1_DB5, FN_AUDIO_CLKB_C, FN_SSI_WS4_B,
+ FN_SCL2_A, FN_MSIOF1_SCK_B, FN_DU1_DB6, FN_AUDIO_CLKC_C, FN_SSI_SCK4_B,
+ FN_SDA2_A, FN_MSIOF1_SYNC_B, FN_DU1_DB7, FN_AUDIO_CLKOUT_C,
+ FN_SSI_SCK5_A, FN_DU1_DOTCLKOUT1,
+
+ /* IPSR14 */
+ FN_SSI_WS5_A, FN_SCL3_C, FN_DU1_DOTCLKIN,
+ FN_SSI_SDATA5_A, FN_SDA3_C, FN_DU1_DOTCLKOUT0,
+ FN_SSI_SCK6_A, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE,
+ FN_SSI_WS6_A, FN_SCL4_C, FN_DU1_EXHSYNC_DU1_HSYNC,
+ FN_SSI_SDATA6_A, FN_SDA4_C, FN_DU1_EXVSYNC_DU1_VSYNC,
+ FN_SSI_SCK78_A, FN_SDA4_E, FN_DU1_DISP,
+ FN_SSI_WS78_A, FN_SCL4_E, FN_DU1_CDE,
+ FN_SSI_SDATA7_A, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D, FN_VI0_G5,
+
+ /* IPSR15 */
+ FN_SSI_SCK0129_A, FN_MSIOF1_RXD_A, FN_RX5_D, FN_VI0_G6,
+ FN_SSI_WS0129_A, FN_MSIOF1_TXD_A, FN_TX5_D, FN_VI0_G7,
+ FN_SSI_SDATA0_A, FN_MSIOF1_SYNC_A, FN_PWM0_C, FN_VI0_R0,
+ FN_SSI_SCK34, FN_MSIOF1_SCK_A, FN_AVB_MDC, FN_DACK1, FN_VI0_R1,
+ FN_SSI_WS34, FN_MSIOF1_SS1_A, FN_AVB_MDIO, FN_CAN1_RX_A, FN_DREQ1_N, FN_VI0_R2,
+ FN_SSI_SDATA3, FN_MSIOF1_SS2_A, FN_AVB_LINK, FN_CAN1_TX_A, FN_DREQ2_N, FN_VI0_R3,
+ FN_SSI_SCK4_A, FN_AVB_MAGIC, FN_VI0_R4,
+ FN_SSI_WS4_A, FN_AVB_PHY_INT, FN_VI0_R5,
+
+ /* IPSR16 */
+ FN_SSI_SDATA4_A, FN_AVB_CRS, FN_VI0_R6,
+ FN_SSI_SCK1_A, FN_SCIF1_SCK_B, FN_PWM1_D, FN_IRQ9, FN_REMOCON_A, FN_DACK2, FN_VI0_CLK, FN_AVB_COL,
+ FN_SSI_SDATA8_A, FN_RX1_B, FN_CAN0_RX_D, FN_AVB_AVTP_CAPTURE_B, FN_VI0_R7,
+ FN_SSI_WS1_A, FN_TX1_B, FN_CAN0_TX_D, FN_AVB_AVTP_MATCH_B, FN_VI0_DATA0_VI0_B0,
+ FN_SSI_SDATA1_A, FN_HRX1_B, FN_VI0_DATA1_VI0_B1,
+ FN_SSI_SCK2_A, FN_HTX1_B, FN_AVB_TXD7, FN_VI0_DATA2_VI0_B2,
+ FN_SSI_WS2_A, FN_HCTS1_N_B, FN_AVB_TX_ER, FN_VI0_DATA3_VI0_B3,
+ FN_SSI_SDATA2_A, FN_HRTS1_N_B, FN_VI0_DATA4_VI0_B4,
+
+ /* IPSR17 */
+ FN_SSI_SCK9_A, FN_RX2_B, FN_SCL3_E, FN_EX_WAIT1, FN_VI0_DATA5_VI0_B5,
+ FN_SSI_WS9_A, FN_TX2_B, FN_SDA3_E, FN_VI0_DATA6_VI0_B6,
+ FN_SSI_SDATA9_A, FN_SCIF2_SCK_B, FN_PWM2_D, FN_VI0_DATA7_VI0_B7,
+ FN_AUDIO_CLKA_A, FN_SCL0_B, FN_VI0_CLKENB,
+ FN_AUDIO_CLKB_A, FN_SDA0_B, FN_VI0_FIELD,
+ FN_AUDIO_CLKC_A, FN_SCL4_B, FN_VI0_HSYNC_N,
+ FN_AUDIO_CLKOUT_A, FN_SDA4_B, FN_VI0_VSYNC_N,
+
+ /* MOD_SEL0 */
+ FN_SEL_ADGA_0, FN_SEL_ADGA_1, FN_SEL_ADGA_2, FN_SEL_ADGA_3,
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2, FN_SEL_CANCLK_3,
+ FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3,
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3,
+ FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2, FN_SEL_I2C04_3, FN_SEL_I2C04_4,
+ FN_SEL_I2C03_0, FN_SEL_I2C03_1, FN_SEL_I2C03_2, FN_SEL_I2C03_3, FN_SEL_I2C03_4,
+ FN_SEL_I2C02_0, FN_SEL_I2C02_1, FN_SEL_I2C02_2, FN_SEL_I2C02_3,
+ FN_SEL_I2C01_0, FN_SEL_I2C01_1, FN_SEL_I2C01_2, FN_SEL_I2C01_3, FN_SEL_I2C01_4,
+ FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3, FN_SEL_I2C00_4,
+ FN_SEL_AVB_0, FN_SEL_AVB_1,
+
+ /* MOD_SEL1 */
+ FN_SEL_SCIFCLK_0, FN_SEL_SCIFCLK_1,
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3, FN_SEL_SCIF5_4, FN_SEL_SCIF5_5,
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3, FN_SEL_SCIF4_4,
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2,
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2,
+ FN_SEL_SCIF2_CLK_0, FN_SEL_SCIF2_CLK_1,
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ FN_SEL_MSIOF2_0, FN_SEL_MSIOF2_1, FN_SEL_MSIOF2_2,
+ FN_SEL_MSIOF1_0, FN_SEL_MSIOF1_1,
+ FN_SEL_MSIOF0_0, FN_SEL_MSIOF0_1,
+ FN_SEL_RCN_0, FN_SEL_RCN_1,
+ FN_SEL_TMU2_0, FN_SEL_TMU2_1,
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1,
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2,
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
+
+ /* MOD_SEL2 */
+ FN_SEL_ADGB_0, FN_SEL_ADGB_1, FN_SEL_ADGB_2,
+ FN_SEL_ADGC_0, FN_SEL_ADGC_1, FN_SEL_ADGC_2,
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1,
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1,
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1,
+ FN_SEL_SSI6_0, FN_SEL_SSI6_1,
+ FN_SEL_SSI5_0, FN_SEL_SSI5_1,
+ FN_SEL_SSI4_0, FN_SEL_SSI4_1,
+ FN_SEL_SSI2_0, FN_SEL_SSI2_1,
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1, FN_SEL_SSI1_2, FN_SEL_SSI1_3,
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1,
+ PINMUX_FUNCTION_END,
+
+ PINMUX_MARK_BEGIN,
+
+ USB0_PWEN_MARK, USB0_OVC_MARK, USB1_PWEN_MARK, USB1_OVC_MARK,
+ CLKOUT_MARK, MMC0_CLK_SDHI1_CLK_MARK, MMC0_CMD_SDHI1_CMD_MARK,
+ MMC0_D0_SDHI1_D0_MARK, MMC0_D1_SDHI1_D1_MARK,
+ MMC0_D2_SDHI1_D2_MARK, MMC0_D3_SDHI1_D3_MARK, MMC0_D6_MARK,
+ MMC0_D7_MARK,
+
+ /* IPSR0 */
+ SD0_CLK_MARK, SSI_SCK1_C_MARK, RX3_C_MARK,
+ SD0_CMD_MARK, SSI_WS1_C_MARK, TX3_C_MARK,
+ SD0_DAT0_MARK, SSI_SDATA1_C_MARK, RX4_E_MARK,
+ SD0_DAT1_MARK, SSI_SCK0129_B_MARK, TX4_E_MARK,
+ SD0_DAT2_MARK, SSI_WS0129_B_MARK, RX5_E_MARK,
+ SD0_DAT3_MARK, SSI_SDATA0_B_MARK, TX5_E_MARK,
+ SD0_CD_MARK, CAN0_RX_A_MARK,
+ SD0_WP_MARK, IRQ7_MARK, CAN0_TX_A_MARK,
+
+ /* IPSR1 */
+ MMC0_D4_MARK, SD1_CD_MARK,
+ MMC0_D5_MARK, SD1_WP_MARK,
+ D0_MARK, SCL3_B_MARK, RX5_B_MARK, IRQ4_MARK, MSIOF2_RXD_C_MARK, SSI_SDATA5_B_MARK,
+ D1_MARK, SDA3_B_MARK, TX5_B_MARK, MSIOF2_TXD_C_MARK, SSI_WS5_B_MARK,
+ D2_MARK, RX4_B_MARK, SCL0_D_MARK, PWM1_C_MARK, MSIOF2_SCK_C_MARK, SSI_SCK5_B_MARK,
+ D3_MARK, TX4_B_MARK, SDA0_D_MARK, PWM0_A_MARK, MSIOF2_SYNC_C_MARK,
+ D4_MARK, IRQ3_MARK, TCLK1_A_MARK, PWM6_C_MARK,
+ D5_MARK, HRX2_MARK, SCL1_B_MARK, PWM2_C_MARK, TCLK2_B_MARK,
+
+ /* IPSR2 */
+ D6_MARK, HTX2_MARK, SDA1_B_MARK, PWM4_C_MARK,
+ D7_MARK, HSCK2_MARK, SCIF1_SCK_C_MARK, IRQ6_MARK, PWM5_C_MARK,
+ D8_MARK, HCTS2_N_MARK, RX1_C_MARK, SCL1_D_MARK, PWM3_C_MARK,
+ D9_MARK, HRTS2_N_MARK, TX1_C_MARK, SDA1_D_MARK,
+ D10_MARK, MSIOF2_RXD_A_MARK, HRX0_B_MARK,
+ D11_MARK, MSIOF2_TXD_A_MARK, HTX0_B_MARK,
+ D12_MARK, MSIOF2_SCK_A_MARK, HSCK0_MARK, CAN_CLK_C_MARK,
+ D13_MARK, MSIOF2_SYNC_A_MARK, RX4_C_MARK,
+
+ /* IPSR3 */
+ D14_MARK, MSIOF2_SS1_MARK, TX4_C_MARK, CAN1_RX_B_MARK, AVB_AVTP_CAPTURE_A_MARK,
+ D15_MARK, MSIOF2_SS2_MARK, PWM4_A_MARK, CAN1_TX_B_MARK, IRQ2_MARK, AVB_AVTP_MATCH_A_MARK,
+ QSPI0_SPCLK_MARK, WE0_N_MARK,
+ QSPI0_MOSI_QSPI0_IO0_MARK, BS_N_MARK,
+ QSPI0_MISO_QSPI0_IO1_MARK, RD_WR_N_MARK,
+ QSPI0_IO2_MARK, CS0_N_MARK,
+ QSPI0_IO3_MARK, RD_N_MARK,
+ QSPI0_SSL_MARK, WE1_N_MARK,
+
+ /* IPSR4 */
+ EX_WAIT0_MARK, CAN_CLK_B_MARK, SCIF_CLK_A_MARK,
+ DU0_DR0_MARK, RX5_C_MARK, SCL2_D_MARK, A0_MARK,
+ DU0_DR1_MARK, TX5_C_MARK, SDA2_D_MARK, A1_MARK,
+ DU0_DR2_MARK, RX0_D_MARK, SCL0_E_MARK, A2_MARK,
+ DU0_DR3_MARK, TX0_D_MARK, SDA0_E_MARK, PWM0_B_MARK, A3_MARK,
+ DU0_DR4_MARK, RX1_D_MARK, A4_MARK,
+ DU0_DR5_MARK, TX1_D_MARK, PWM1_B_MARK, A5_MARK,
+ DU0_DR6_MARK, RX2_C_MARK, A6_MARK,
+
+ /* IPSR5 */
+ DU0_DR7_MARK, TX2_C_MARK, PWM2_B_MARK, A7_MARK,
+ DU0_DG0_MARK, RX3_B_MARK, SCL3_D_MARK, A8_MARK,
+ DU0_DG1_MARK, TX3_B_MARK, SDA3_D_MARK, PWM3_B_MARK, A9_MARK,
+ DU0_DG2_MARK, RX4_D_MARK, A10_MARK,
+ DU0_DG3_MARK, TX4_D_MARK, PWM4_B_MARK, A11_MARK,
+ DU0_DG4_MARK, HRX0_A_MARK, A12_MARK,
+ DU0_DG5_MARK, HTX0_A_MARK, PWM5_B_MARK, A13_MARK,
+ DU0_DG6_MARK, HRX1_C_MARK, A14_MARK,
+
+ /* IPSR6 */
+ DU0_DG7_MARK, HTX1_C_MARK, PWM6_B_MARK, A15_MARK,
+ DU0_DB0_MARK, SCL4_D_MARK, CAN0_RX_C_MARK, A16_MARK,
+ DU0_DB1_MARK, SDA4_D_MARK, CAN0_TX_C_MARK, A17_MARK,
+ DU0_DB2_MARK, HCTS0_N_MARK, A18_MARK,
+ DU0_DB3_MARK, HRTS0_N_MARK, A19_MARK,
+ DU0_DB4_MARK, HCTS1_N_C_MARK, A20_MARK,
+ DU0_DB5_MARK, HRTS1_N_C_MARK, A21_MARK,
+ DU0_DB6_MARK, A22_MARK,
+
+ /* IPSR7 */
+ DU0_DB7_MARK, A23_MARK,
+ DU0_DOTCLKIN_MARK, A24_MARK,
+ DU0_DOTCLKOUT0_MARK, A25_MARK,
+ DU0_DOTCLKOUT1_MARK, MSIOF2_RXD_B_MARK, CS1_N_A26_MARK,
+ DU0_EXHSYNC_DU0_HSYNC_MARK, MSIOF2_TXD_B_MARK, DREQ0_N_MARK,
+ DU0_EXVSYNC_DU0_VSYNC_MARK, MSIOF2_SYNC_B_MARK, DACK0_MARK,
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK, MSIOF2_SCK_B_MARK, DRACK0_MARK,
+ DU0_DISP_MARK, CAN1_RX_C_MARK,
+
+ /* IPSR8 */
+ DU0_CDE_MARK, CAN1_TX_C_MARK,
+ VI1_CLK_MARK, AVB_RX_CLK_MARK, ETH_REF_CLK_MARK,
+ VI1_DATA0_MARK, AVB_RX_DV_MARK, ETH_CRS_DV_MARK,
+ VI1_DATA1_MARK, AVB_RXD0_MARK, ETH_RXD0_MARK,
+ VI1_DATA2_MARK, AVB_RXD1_MARK, ETH_RXD1_MARK,
+ VI1_DATA3_MARK, AVB_RXD2_MARK, ETH_MDIO_MARK,
+ VI1_DATA4_MARK, AVB_RXD3_MARK, ETH_RX_ER_MARK,
+ VI1_DATA5_MARK, AVB_RXD4_MARK, ETH_LINK_MARK,
+
+ /* IPSR9 */
+ VI1_DATA6_MARK, AVB_RXD5_MARK, ETH_TXD1_MARK,
+ VI1_DATA7_MARK, AVB_RXD6_MARK, ETH_TX_EN_MARK,
+ VI1_CLKENB_MARK, SCL3_A_MARK, AVB_RXD7_MARK, ETH_MAGIC_MARK,
+ VI1_FIELD_MARK, SDA3_A_MARK, AVB_RX_ER_MARK, ETH_TXD0_MARK,
+ VI1_HSYNC_N_MARK, RX0_B_MARK, SCL0_C_MARK, AVB_GTXREFCLK_MARK, ETH_MDC_MARK,
+ VI1_VSYNC_N_MARK, TX0_B_MARK, SDA0_C_MARK, AUDIO_CLKOUT_B_MARK, AVB_TX_CLK_MARK,
+ VI1_DATA8_MARK, SCL2_B_MARK, AVB_TX_EN_MARK,
+ VI1_DATA9_MARK, SDA2_B_MARK, AVB_TXD0_MARK,
+
+ /* IPSR10 */
+ VI1_DATA10_MARK, CAN0_RX_B_MARK, AVB_TXD1_MARK,
+ VI1_DATA11_MARK, CAN0_TX_B_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK, AUDIO_CLKA_B_MARK, SSI_SCK1_D_MARK, RX5_F_MARK, MSIOF0_RXD_B_MARK,
+ AVB_TXD4_MARK, AUDIO_CLKB_B_MARK, SSI_WS1_D_MARK, TX5_F_MARK, MSIOF0_TXD_B_MARK,
+ AVB_TXD5_MARK, SCIF_CLK_B_MARK, AUDIO_CLKC_B_MARK, SSI_SDATA1_D_MARK, MSIOF0_SCK_B_MARK,
+ SCL0_A_MARK, RX0_C_MARK, PWM5_A_MARK, TCLK1_B_MARK, AVB_TXD6_MARK, CAN1_RX_D_MARK, MSIOF0_SYNC_B_MARK,
+ SDA0_A_MARK, TX0_C_MARK, IRQ5_MARK, CAN_CLK_A_MARK, AVB_GTX_CLK_MARK, CAN1_TX_D_MARK, DVC_MUTE_MARK,
+ SCL1_A_MARK, RX4_A_MARK, PWM5_D_MARK, DU1_DR0_MARK, SSI_SCK6_B_MARK, VI0_G0_MARK,
+
+ /* IPSR11 */
+ SDA1_A_MARK, TX4_A_MARK, DU1_DR1_MARK, SSI_WS6_B_MARK, VI0_G1_MARK,
+ MSIOF0_RXD_A_MARK, RX5_A_MARK, SCL2_C_MARK, DU1_DR2_MARK, QSPI1_MOSI_QSPI1_IO0_MARK, SSI_SDATA6_B_MARK, VI0_G2_MARK,
+ MSIOF0_TXD_A_MARK, TX5_A_MARK, SDA2_C_MARK, DU1_DR3_MARK, QSPI1_MISO_QSPI1_IO1_MARK, SSI_WS78_B_MARK, VI0_G3_MARK,
+ MSIOF0_SCK_A_MARK, IRQ0_MARK, DU1_DR4_MARK, QSPI1_SPCLK_MARK, SSI_SCK78_B_MARK, VI0_G4_MARK,
+ MSIOF0_SYNC_A_MARK, PWM1_A_MARK, DU1_DR5_MARK, QSPI1_IO2_MARK, SSI_SDATA7_B_MARK,
+ MSIOF0_SS1_A_MARK, DU1_DR6_MARK, QSPI1_IO3_MARK, SSI_SDATA8_B_MARK,
+ MSIOF0_SS2_A_MARK, DU1_DR7_MARK, QSPI1_SSL_MARK,
+ HRX1_A_MARK, SCL4_A_MARK, PWM6_A_MARK, DU1_DG0_MARK, RX0_A_MARK,
+
+ /* IPSR12 */
+ HTX1_A_MARK, SDA4_A_MARK, DU1_DG1_MARK, TX0_A_MARK,
+ HCTS1_N_A_MARK, PWM2_A_MARK, DU1_DG2_MARK, REMOCON_B_MARK,
+ HRTS1_N_A_MARK, DU1_DG3_MARK, SSI_WS1_B_MARK, IRQ1_MARK,
+ SD2_CLK_MARK, HSCK1_MARK, DU1_DG4_MARK, SSI_SCK1_B_MARK,
+ SD2_CMD_MARK, SCIF1_SCK_A_MARK, TCLK2_A_MARK, DU1_DG5_MARK, SSI_SCK2_B_MARK, PWM3_A_MARK,
+ SD2_DAT0_MARK, RX1_A_MARK, SCL1_E_MARK, DU1_DG6_MARK, SSI_SDATA1_B_MARK,
+ SD2_DAT1_MARK, TX1_A_MARK, SDA1_E_MARK, DU1_DG7_MARK, SSI_WS2_B_MARK,
+ SD2_DAT2_MARK, RX2_A_MARK, DU1_DB0_MARK, SSI_SDATA2_B_MARK,
+
+ /* IPSR13 */
+ SD2_DAT3_MARK, TX2_A_MARK, DU1_DB1_MARK, SSI_WS9_B_MARK,
+ SD2_CD_MARK, SCIF2_SCK_A_MARK, DU1_DB2_MARK, SSI_SCK9_B_MARK,
+ SD2_WP_MARK, SCIF3_SCK_MARK, DU1_DB3_MARK, SSI_SDATA9_B_MARK,
+ RX3_A_MARK, SCL1_C_MARK, MSIOF1_RXD_B_MARK, DU1_DB4_MARK, AUDIO_CLKA_C_MARK, SSI_SDATA4_B_MARK,
+ TX3_A_MARK, SDA1_C_MARK, MSIOF1_TXD_B_MARK, DU1_DB5_MARK, AUDIO_CLKB_C_MARK, SSI_WS4_B_MARK,
+ SCL2_A_MARK, MSIOF1_SCK_B_MARK, DU1_DB6_MARK, AUDIO_CLKC_C_MARK, SSI_SCK4_B_MARK,
+ SDA2_A_MARK, MSIOF1_SYNC_B_MARK, DU1_DB7_MARK, AUDIO_CLKOUT_C_MARK,
+ SSI_SCK5_A_MARK, DU1_DOTCLKOUT1_MARK,
+
+ /* IPSR14 */
+ SSI_WS5_A_MARK, SCL3_C_MARK, DU1_DOTCLKIN_MARK,
+ SSI_SDATA5_A_MARK, SDA3_C_MARK, DU1_DOTCLKOUT0_MARK,
+ SSI_SCK6_A_MARK, DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
+ SSI_WS6_A_MARK, SCL4_C_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK,
+ SSI_SDATA6_A_MARK, SDA4_C_MARK, DU1_EXVSYNC_DU1_VSYNC_MARK,
+ SSI_SCK78_A_MARK, SDA4_E_MARK, DU1_DISP_MARK,
+ SSI_WS78_A_MARK, SCL4_E_MARK, DU1_CDE_MARK,
+ SSI_SDATA7_A_MARK, IRQ8_MARK, AUDIO_CLKA_D_MARK, CAN_CLK_D_MARK, VI0_G5_MARK,
+
+ /* IPSR15 */
+ SSI_SCK0129_A_MARK, MSIOF1_RXD_A_MARK, RX5_D_MARK, VI0_G6_MARK,
+ SSI_WS0129_A_MARK, MSIOF1_TXD_A_MARK, TX5_D_MARK, VI0_G7_MARK,
+ SSI_SDATA0_A_MARK, MSIOF1_SYNC_A_MARK, PWM0_C_MARK, VI0_R0_MARK,
+ SSI_SCK34_MARK, MSIOF1_SCK_A_MARK, AVB_MDC_MARK, DACK1_MARK, VI0_R1_MARK,
+ SSI_WS34_MARK, MSIOF1_SS1_A_MARK, AVB_MDIO_MARK, CAN1_RX_A_MARK, DREQ1_N_MARK, VI0_R2_MARK,
+ SSI_SDATA3_MARK, MSIOF1_SS2_A_MARK, AVB_LINK_MARK, CAN1_TX_A_MARK, DREQ2_N_MARK, VI0_R3_MARK,
+ SSI_SCK4_A_MARK, AVB_MAGIC_MARK, VI0_R4_MARK,
+ SSI_WS4_A_MARK, AVB_PHY_INT_MARK, VI0_R5_MARK,
+
+ /* IPSR16 */
+ SSI_SDATA4_A_MARK, AVB_CRS_MARK, VI0_R6_MARK,
+ SSI_SCK1_A_MARK, SCIF1_SCK_B_MARK, PWM1_D_MARK, IRQ9_MARK, REMOCON_A_MARK, DACK2_MARK, VI0_CLK_MARK, AVB_COL_MARK,
+ SSI_SDATA8_A_MARK, RX1_B_MARK, CAN0_RX_D_MARK, AVB_AVTP_CAPTURE_B_MARK, VI0_R7_MARK,
+ SSI_WS1_A_MARK, TX1_B_MARK, CAN0_TX_D_MARK, AVB_AVTP_MATCH_B_MARK, VI0_DATA0_VI0_B0_MARK,
+ SSI_SDATA1_A_MARK, HRX1_B_MARK, VI0_DATA1_VI0_B1_MARK,
+ SSI_SCK2_A_MARK, HTX1_B_MARK, AVB_TXD7_MARK, VI0_DATA2_VI0_B2_MARK,
+ SSI_WS2_A_MARK, HCTS1_N_B_MARK, AVB_TX_ER_MARK, VI0_DATA3_VI0_B3_MARK,
+ SSI_SDATA2_A_MARK, HRTS1_N_B_MARK, VI0_DATA4_VI0_B4_MARK,
+
+ /* IPSR17 */
+ SSI_SCK9_A_MARK, RX2_B_MARK, SCL3_E_MARK, EX_WAIT1_MARK, VI0_DATA5_VI0_B5_MARK,
+ SSI_WS9_A_MARK, TX2_B_MARK, SDA3_E_MARK, VI0_DATA6_VI0_B6_MARK,
+ SSI_SDATA9_A_MARK, SCIF2_SCK_B_MARK, PWM2_D_MARK, VI0_DATA7_VI0_B7_MARK,
+ AUDIO_CLKA_A_MARK, SCL0_B_MARK, VI0_CLKENB_MARK,
+ AUDIO_CLKB_A_MARK, SDA0_B_MARK, VI0_FIELD_MARK,
+ AUDIO_CLKC_A_MARK, SCL4_B_MARK, VI0_HSYNC_N_MARK,
+ AUDIO_CLKOUT_A_MARK, SDA4_B_MARK, VI0_VSYNC_N_MARK,
+
+ PINMUX_MARK_END,
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
+
+ PINMUX_SINGLE(USB0_PWEN),
+ PINMUX_SINGLE(USB0_OVC),
+ PINMUX_SINGLE(USB1_PWEN),
+ PINMUX_SINGLE(USB1_OVC),
+ PINMUX_SINGLE(CLKOUT),
+ PINMUX_SINGLE(MMC0_CLK_SDHI1_CLK),
+ PINMUX_SINGLE(MMC0_CMD_SDHI1_CMD),
+ PINMUX_SINGLE(MMC0_D0_SDHI1_D0),
+ PINMUX_SINGLE(MMC0_D1_SDHI1_D1),
+ PINMUX_SINGLE(MMC0_D2_SDHI1_D2),
+ PINMUX_SINGLE(MMC0_D3_SDHI1_D3),
+ PINMUX_SINGLE(MMC0_D6),
+ PINMUX_SINGLE(MMC0_D7),
+
+ /* IPSR0 */
+ PINMUX_IPSR_GPSR(IP0_3_0, SD0_CLK),
+ PINMUX_IPSR_MSEL(IP0_3_0, SSI_SCK1_C, SEL_SSI1_2),
+ PINMUX_IPSR_MSEL(IP0_3_0, RX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_GPSR(IP0_7_4, SD0_CMD),
+ PINMUX_IPSR_MSEL(IP0_7_4, SSI_WS1_C, SEL_SSI1_2),
+ PINMUX_IPSR_MSEL(IP0_7_4, TX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_GPSR(IP0_11_8, SD0_DAT0),
+ PINMUX_IPSR_MSEL(IP0_11_8, SSI_SDATA1_C, SEL_SSI1_2),
+ PINMUX_IPSR_MSEL(IP0_11_8, RX4_E, SEL_SCIF4_4),
+ PINMUX_IPSR_GPSR(IP0_15_12, SD0_DAT1),
+ PINMUX_IPSR_MSEL(IP0_15_12, SSI_SCK0129_B, SEL_SSI0_1),
+ PINMUX_IPSR_MSEL(IP0_15_12, TX4_E, SEL_SCIF4_4),
+ PINMUX_IPSR_GPSR(IP0_19_16, SD0_DAT2),
+ PINMUX_IPSR_MSEL(IP0_19_16, SSI_WS0129_B, SEL_SSI0_1),
+ PINMUX_IPSR_MSEL(IP0_19_16, RX5_E, SEL_SCIF5_4),
+ PINMUX_IPSR_GPSR(IP0_23_20, SD0_DAT3),
+ PINMUX_IPSR_MSEL(IP0_23_20, SSI_SDATA0_B, SEL_SSI0_1),
+ PINMUX_IPSR_MSEL(IP0_23_20, TX5_E, SEL_SCIF5_4),
+ PINMUX_IPSR_GPSR(IP0_27_24, SD0_CD),
+ PINMUX_IPSR_MSEL(IP0_27_24, CAN0_RX_A, SEL_CAN0_0),
+ PINMUX_IPSR_GPSR(IP0_31_28, SD0_WP),
+ PINMUX_IPSR_GPSR(IP0_31_28, IRQ7),
+ PINMUX_IPSR_MSEL(IP0_31_28, CAN0_TX_A, SEL_CAN0_0),
+
+ /* IPSR1 */
+ PINMUX_IPSR_GPSR(IP1_3_0, MMC0_D4),
+ PINMUX_IPSR_GPSR(IP1_3_0, SD1_CD),
+ PINMUX_IPSR_GPSR(IP1_7_4, MMC0_D5),
+ PINMUX_IPSR_GPSR(IP1_7_4, SD1_WP),
+ PINMUX_IPSR_GPSR(IP1_11_8, D0),
+ PINMUX_IPSR_MSEL(IP1_11_8, SCL3_B, SEL_I2C03_1),
+ PINMUX_IPSR_MSEL(IP1_11_8, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_GPSR(IP1_11_8, IRQ4),
+ PINMUX_IPSR_MSEL(IP1_11_8, MSIOF2_RXD_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP1_11_8, SSI_SDATA5_B, SEL_SSI5_1),
+ PINMUX_IPSR_GPSR(IP1_15_12, D1),
+ PINMUX_IPSR_MSEL(IP1_15_12, SDA3_B, SEL_I2C03_1),
+ PINMUX_IPSR_MSEL(IP1_15_12, TX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MSEL(IP1_15_12, MSIOF2_TXD_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP1_15_12, SSI_WS5_B, SEL_SSI5_1),
+ PINMUX_IPSR_GPSR(IP1_19_16, D2),
+ PINMUX_IPSR_MSEL(IP1_19_16, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MSEL(IP1_19_16, SCL0_D, SEL_I2C00_3),
+ PINMUX_IPSR_GPSR(IP1_19_16, PWM1_C),
+ PINMUX_IPSR_MSEL(IP1_19_16, MSIOF2_SCK_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_MSEL(IP1_19_16, SSI_SCK5_B, SEL_SSI5_1),
+ PINMUX_IPSR_GPSR(IP1_23_20, D3),
+ PINMUX_IPSR_MSEL(IP1_23_20, TX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MSEL(IP1_23_20, SDA0_D, SEL_I2C00_3),
+ PINMUX_IPSR_GPSR(IP1_23_20, PWM0_A),
+ PINMUX_IPSR_MSEL(IP1_23_20, MSIOF2_SYNC_C, SEL_MSIOF2_2),
+ PINMUX_IPSR_GPSR(IP1_27_24, D4),
+ PINMUX_IPSR_GPSR(IP1_27_24, IRQ3),
+ PINMUX_IPSR_MSEL(IP1_27_24, TCLK1_A, SEL_TMU1_0),
+ PINMUX_IPSR_GPSR(IP1_27_24, PWM6_C),
+ PINMUX_IPSR_GPSR(IP1_31_28, D5),
+ PINMUX_IPSR_GPSR(IP1_31_28, HRX2),
+ PINMUX_IPSR_MSEL(IP1_31_28, SCL1_B, SEL_I2C01_1),
+ PINMUX_IPSR_GPSR(IP1_31_28, PWM2_C),
+ PINMUX_IPSR_MSEL(IP1_31_28, TCLK2_B, SEL_TMU2_1),
+
+ /* IPSR2 */
+ PINMUX_IPSR_GPSR(IP2_3_0, D6),
+ PINMUX_IPSR_GPSR(IP2_3_0, HTX2),
+ PINMUX_IPSR_MSEL(IP2_3_0, SDA1_B, SEL_I2C01_1),
+ PINMUX_IPSR_GPSR(IP2_3_0, PWM4_C),
+ PINMUX_IPSR_GPSR(IP2_7_4, D7),
+ PINMUX_IPSR_GPSR(IP2_7_4, HSCK2),
+ PINMUX_IPSR_MSEL(IP2_7_4, SCIF1_SCK_C, SEL_SCIF1_2),
+ PINMUX_IPSR_GPSR(IP2_7_4, IRQ6),
+ PINMUX_IPSR_GPSR(IP2_7_4, PWM5_C),
+ PINMUX_IPSR_GPSR(IP2_11_8, D8),
+ PINMUX_IPSR_GPSR(IP2_11_8, HCTS2_N),
+ PINMUX_IPSR_MSEL(IP2_11_8, RX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MSEL(IP2_11_8, SCL1_D, SEL_I2C01_3),
+ PINMUX_IPSR_GPSR(IP2_11_8, PWM3_C),
+ PINMUX_IPSR_GPSR(IP2_15_12, D9),
+ PINMUX_IPSR_GPSR(IP2_15_12, HRTS2_N),
+ PINMUX_IPSR_MSEL(IP2_15_12, TX1_C, SEL_SCIF1_2),
+ PINMUX_IPSR_MSEL(IP2_15_12, SDA1_D, SEL_I2C01_3),
+ PINMUX_IPSR_GPSR(IP2_19_16, D10),
+ PINMUX_IPSR_MSEL(IP2_19_16, MSIOF2_RXD_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_19_16, HRX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_GPSR(IP2_23_20, D11),
+ PINMUX_IPSR_MSEL(IP2_23_20, MSIOF2_TXD_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_23_20, HTX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_GPSR(IP2_27_24, D12),
+ PINMUX_IPSR_MSEL(IP2_27_24, MSIOF2_SCK_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_GPSR(IP2_27_24, HSCK0),
+ PINMUX_IPSR_MSEL(IP2_27_24, CAN_CLK_C, SEL_CANCLK_2),
+ PINMUX_IPSR_GPSR(IP2_31_28, D13),
+ PINMUX_IPSR_MSEL(IP2_31_28, MSIOF2_SYNC_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP2_31_28, RX4_C, SEL_SCIF4_2),
+
+ /* IPSR3 */
+ PINMUX_IPSR_GPSR(IP3_3_0, D14),
+ PINMUX_IPSR_GPSR(IP3_3_0, MSIOF2_SS1),
+ PINMUX_IPSR_MSEL(IP3_3_0, TX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_MSEL(IP3_3_0, CAN1_RX_B, SEL_CAN1_1),
+ PINMUX_IPSR_MSEL(IP3_3_0, AVB_AVTP_CAPTURE_A, SEL_AVB_0),
+ PINMUX_IPSR_GPSR(IP3_7_4, D15),
+ PINMUX_IPSR_GPSR(IP3_7_4, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP3_7_4, PWM4_A),
+ PINMUX_IPSR_MSEL(IP3_7_4, CAN1_TX_B, SEL_CAN1_1),
+ PINMUX_IPSR_GPSR(IP3_7_4, IRQ2),
+ PINMUX_IPSR_MSEL(IP3_7_4, AVB_AVTP_MATCH_A, SEL_AVB_0),
+ PINMUX_IPSR_GPSR(IP3_11_8, QSPI0_SPCLK),
+ PINMUX_IPSR_GPSR(IP3_11_8, WE0_N),
+ PINMUX_IPSR_GPSR(IP3_15_12, QSPI0_MOSI_QSPI0_IO0),
+ PINMUX_IPSR_GPSR(IP3_15_12, BS_N),
+ PINMUX_IPSR_GPSR(IP3_19_16, QSPI0_MISO_QSPI0_IO1),
+ PINMUX_IPSR_GPSR(IP3_19_16, RD_WR_N),
+ PINMUX_IPSR_GPSR(IP3_23_20, QSPI0_IO2),
+ PINMUX_IPSR_GPSR(IP3_23_20, CS0_N),
+ PINMUX_IPSR_GPSR(IP3_27_24, QSPI0_IO3),
+ PINMUX_IPSR_GPSR(IP3_27_24, RD_N),
+ PINMUX_IPSR_GPSR(IP3_31_28, QSPI0_SSL),
+ PINMUX_IPSR_GPSR(IP3_31_28, WE1_N),
+
+ /* IPSR4 */
+ PINMUX_IPSR_GPSR(IP4_3_0, EX_WAIT0),
+ PINMUX_IPSR_MSEL(IP4_3_0, CAN_CLK_B, SEL_CANCLK_1),
+ PINMUX_IPSR_MSEL(IP4_3_0, SCIF_CLK_A, SEL_SCIFCLK_0),
+ PINMUX_IPSR_GPSR(IP4_7_4, DU0_DR0),
+ PINMUX_IPSR_MSEL(IP4_7_4, RX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_MSEL(IP4_7_4, SCL2_D, SEL_I2C02_3),
+ PINMUX_IPSR_GPSR(IP4_7_4, A0),
+ PINMUX_IPSR_GPSR(IP4_11_8, DU0_DR1),
+ PINMUX_IPSR_MSEL(IP4_11_8, TX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_MSEL(IP4_11_8, SDA2_D, SEL_I2C02_3),
+ PINMUX_IPSR_GPSR(IP4_11_8, A1),
+ PINMUX_IPSR_GPSR(IP4_15_12, DU0_DR2),
+ PINMUX_IPSR_MSEL(IP4_15_12, RX0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_MSEL(IP4_15_12, SCL0_E, SEL_I2C00_4),
+ PINMUX_IPSR_GPSR(IP4_15_12, A2),
+ PINMUX_IPSR_GPSR(IP4_19_16, DU0_DR3),
+ PINMUX_IPSR_MSEL(IP4_19_16, TX0_D, SEL_SCIF0_3),
+ PINMUX_IPSR_MSEL(IP4_19_16, SDA0_E, SEL_I2C00_4),
+ PINMUX_IPSR_GPSR(IP4_19_16, PWM0_B),
+ PINMUX_IPSR_GPSR(IP4_19_16, A3),
+ PINMUX_IPSR_GPSR(IP4_23_20, DU0_DR4),
+ PINMUX_IPSR_MSEL(IP4_23_20, RX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_GPSR(IP4_23_20, A4),
+ PINMUX_IPSR_GPSR(IP4_27_24, DU0_DR5),
+ PINMUX_IPSR_MSEL(IP4_27_24, TX1_D, SEL_SCIF1_3),
+ PINMUX_IPSR_GPSR(IP4_27_24, PWM1_B),
+ PINMUX_IPSR_GPSR(IP4_27_24, A5),
+ PINMUX_IPSR_GPSR(IP4_31_28, DU0_DR6),
+ PINMUX_IPSR_MSEL(IP4_31_28, RX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_GPSR(IP4_31_28, A6),
+
+ /* IPSR5 */
+ PINMUX_IPSR_GPSR(IP5_3_0, DU0_DR7),
+ PINMUX_IPSR_MSEL(IP5_3_0, TX2_C, SEL_SCIF2_2),
+ PINMUX_IPSR_GPSR(IP5_3_0, PWM2_B),
+ PINMUX_IPSR_GPSR(IP5_3_0, A7),
+ PINMUX_IPSR_GPSR(IP5_7_4, DU0_DG0),
+ PINMUX_IPSR_MSEL(IP5_7_4, RX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MSEL(IP5_7_4, SCL3_D, SEL_I2C03_3),
+ PINMUX_IPSR_GPSR(IP5_7_4, A8),
+ PINMUX_IPSR_GPSR(IP5_11_8, DU0_DG1),
+ PINMUX_IPSR_MSEL(IP5_11_8, TX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MSEL(IP5_11_8, SDA3_D, SEL_I2C03_3),
+ PINMUX_IPSR_GPSR(IP5_11_8, PWM3_B),
+ PINMUX_IPSR_GPSR(IP5_11_8, A9),
+ PINMUX_IPSR_GPSR(IP5_15_12, DU0_DG2),
+ PINMUX_IPSR_MSEL(IP5_15_12, RX4_D, SEL_SCIF4_3),
+ PINMUX_IPSR_GPSR(IP5_15_12, A10),
+ PINMUX_IPSR_GPSR(IP5_19_16, DU0_DG3),
+ PINMUX_IPSR_MSEL(IP5_19_16, TX4_D, SEL_SCIF4_3),
+ PINMUX_IPSR_GPSR(IP5_19_16, PWM4_B),
+ PINMUX_IPSR_GPSR(IP5_19_16, A11),
+ PINMUX_IPSR_GPSR(IP5_23_20, DU0_DG4),
+ PINMUX_IPSR_MSEL(IP5_23_20, HRX0_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_GPSR(IP5_23_20, A12),
+ PINMUX_IPSR_GPSR(IP5_27_24, DU0_DG5),
+ PINMUX_IPSR_MSEL(IP5_27_24, HTX0_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_GPSR(IP5_27_24, PWM5_B),
+ PINMUX_IPSR_GPSR(IP5_27_24, A13),
+ PINMUX_IPSR_GPSR(IP5_31_28, DU0_DG6),
+ PINMUX_IPSR_MSEL(IP5_31_28, HRX1_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_GPSR(IP5_31_28, A14),
+
+ /* IPSR6 */
+ PINMUX_IPSR_GPSR(IP6_3_0, DU0_DG7),
+ PINMUX_IPSR_MSEL(IP6_3_0, HTX1_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_GPSR(IP6_3_0, PWM6_B),
+ PINMUX_IPSR_GPSR(IP6_3_0, A15),
+ PINMUX_IPSR_GPSR(IP6_7_4, DU0_DB0),
+ PINMUX_IPSR_MSEL(IP6_7_4, SCL4_D, SEL_I2C04_3),
+ PINMUX_IPSR_MSEL(IP6_7_4, CAN0_RX_C, SEL_CAN0_2),
+ PINMUX_IPSR_GPSR(IP6_7_4, A16),
+ PINMUX_IPSR_GPSR(IP6_11_8, DU0_DB1),
+ PINMUX_IPSR_MSEL(IP6_11_8, SDA4_D, SEL_I2C04_3),
+ PINMUX_IPSR_MSEL(IP6_11_8, CAN0_TX_C, SEL_CAN0_2),
+ PINMUX_IPSR_GPSR(IP6_11_8, A17),
+ PINMUX_IPSR_GPSR(IP6_15_12, DU0_DB2),
+ PINMUX_IPSR_GPSR(IP6_15_12, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP6_15_12, A18),
+ PINMUX_IPSR_GPSR(IP6_19_16, DU0_DB3),
+ PINMUX_IPSR_GPSR(IP6_19_16, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP6_19_16, A19),
+ PINMUX_IPSR_GPSR(IP6_23_20, DU0_DB4),
+ PINMUX_IPSR_MSEL(IP6_23_20, HCTS1_N_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_GPSR(IP6_23_20, A20),
+ PINMUX_IPSR_GPSR(IP6_27_24, DU0_DB5),
+ PINMUX_IPSR_MSEL(IP6_27_24, HRTS1_N_C, SEL_HSCIF1_2),
+ PINMUX_IPSR_GPSR(IP6_27_24, A21),
+ PINMUX_IPSR_GPSR(IP6_31_28, DU0_DB6),
+ PINMUX_IPSR_GPSR(IP6_31_28, A22),
+
+ /* IPSR7 */
+ PINMUX_IPSR_GPSR(IP7_3_0, DU0_DB7),
+ PINMUX_IPSR_GPSR(IP7_3_0, A23),
+ PINMUX_IPSR_GPSR(IP7_7_4, DU0_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP7_7_4, A24),
+ PINMUX_IPSR_GPSR(IP7_11_8, DU0_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP7_11_8, A25),
+ PINMUX_IPSR_GPSR(IP7_15_12, DU0_DOTCLKOUT1),
+ PINMUX_IPSR_MSEL(IP7_15_12, MSIOF2_RXD_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP7_15_12, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP7_19_16, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_MSEL(IP7_19_16, MSIOF2_TXD_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP7_19_16, DREQ0_N),
+ PINMUX_IPSR_GPSR(IP7_23_20, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_MSEL(IP7_23_20, MSIOF2_SYNC_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP7_23_20, DACK0),
+ PINMUX_IPSR_GPSR(IP7_27_24, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+ PINMUX_IPSR_MSEL(IP7_27_24, MSIOF2_SCK_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP7_27_24, DRACK0),
+ PINMUX_IPSR_GPSR(IP7_31_28, DU0_DISP),
+ PINMUX_IPSR_MSEL(IP7_31_28, CAN1_RX_C, SEL_CAN1_2),
+
+ /* IPSR8 */
+ PINMUX_IPSR_GPSR(IP8_3_0, DU0_CDE),
+ PINMUX_IPSR_MSEL(IP8_3_0, CAN1_TX_C, SEL_CAN1_2),
+ PINMUX_IPSR_GPSR(IP8_7_4, VI1_CLK),
+ PINMUX_IPSR_GPSR(IP8_7_4, AVB_RX_CLK),
+ PINMUX_IPSR_GPSR(IP8_7_4, ETH_REF_CLK),
+ PINMUX_IPSR_GPSR(IP8_11_8, VI1_DATA0),
+ PINMUX_IPSR_GPSR(IP8_11_8, AVB_RX_DV),
+ PINMUX_IPSR_GPSR(IP8_11_8, ETH_CRS_DV),
+ PINMUX_IPSR_GPSR(IP8_15_12, VI1_DATA1),
+ PINMUX_IPSR_GPSR(IP8_15_12, AVB_RXD0),
+ PINMUX_IPSR_GPSR(IP8_15_12, ETH_RXD0),
+ PINMUX_IPSR_GPSR(IP8_19_16, VI1_DATA2),
+ PINMUX_IPSR_GPSR(IP8_19_16, AVB_RXD1),
+ PINMUX_IPSR_GPSR(IP8_19_16, ETH_RXD1),
+ PINMUX_IPSR_GPSR(IP8_23_20, VI1_DATA3),
+ PINMUX_IPSR_GPSR(IP8_23_20, AVB_RXD2),
+ PINMUX_IPSR_GPSR(IP8_23_20, ETH_MDIO),
+ PINMUX_IPSR_GPSR(IP8_27_24, VI1_DATA4),
+ PINMUX_IPSR_GPSR(IP8_27_24, AVB_RXD3),
+ PINMUX_IPSR_GPSR(IP8_27_24, ETH_RX_ER),
+ PINMUX_IPSR_GPSR(IP8_31_28, VI1_DATA5),
+ PINMUX_IPSR_GPSR(IP8_31_28, AVB_RXD4),
+ PINMUX_IPSR_GPSR(IP8_31_28, ETH_LINK),
+
+ /* IPSR9 */
+ PINMUX_IPSR_GPSR(IP9_3_0, VI1_DATA6),
+ PINMUX_IPSR_GPSR(IP9_3_0, AVB_RXD5),
+ PINMUX_IPSR_GPSR(IP9_3_0, ETH_TXD1),
+ PINMUX_IPSR_GPSR(IP9_7_4, VI1_DATA7),
+ PINMUX_IPSR_GPSR(IP9_7_4, AVB_RXD6),
+ PINMUX_IPSR_GPSR(IP9_7_4, ETH_TX_EN),
+ PINMUX_IPSR_GPSR(IP9_11_8, VI1_CLKENB),
+ PINMUX_IPSR_MSEL(IP9_11_8, SCL3_A, SEL_I2C03_0),
+ PINMUX_IPSR_GPSR(IP9_11_8, AVB_RXD7),
+ PINMUX_IPSR_GPSR(IP9_11_8, ETH_MAGIC),
+ PINMUX_IPSR_GPSR(IP9_15_12, VI1_FIELD),
+ PINMUX_IPSR_MSEL(IP9_15_12, SDA3_A, SEL_I2C03_0),
+ PINMUX_IPSR_GPSR(IP9_15_12, AVB_RX_ER),
+ PINMUX_IPSR_GPSR(IP9_15_12, ETH_TXD0),
+ PINMUX_IPSR_GPSR(IP9_19_16, VI1_HSYNC_N),
+ PINMUX_IPSR_MSEL(IP9_19_16, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MSEL(IP9_19_16, SCL0_C, SEL_I2C00_2),
+ PINMUX_IPSR_GPSR(IP9_19_16, AVB_GTXREFCLK),
+ PINMUX_IPSR_GPSR(IP9_19_16, ETH_MDC),
+ PINMUX_IPSR_GPSR(IP9_23_20, VI1_VSYNC_N),
+ PINMUX_IPSR_MSEL(IP9_23_20, TX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MSEL(IP9_23_20, SDA0_C, SEL_I2C00_2),
+ PINMUX_IPSR_GPSR(IP9_23_20, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_GPSR(IP9_23_20, AVB_TX_CLK),
+ PINMUX_IPSR_GPSR(IP9_27_24, VI1_DATA8),
+ PINMUX_IPSR_MSEL(IP9_27_24, SCL2_B, SEL_I2C02_1),
+ PINMUX_IPSR_GPSR(IP9_27_24, AVB_TX_EN),
+ PINMUX_IPSR_GPSR(IP9_31_28, VI1_DATA9),
+ PINMUX_IPSR_MSEL(IP9_31_28, SDA2_B, SEL_I2C02_1),
+ PINMUX_IPSR_GPSR(IP9_31_28, AVB_TXD0),
+
+ /* IPSR10 */
+ PINMUX_IPSR_GPSR(IP10_3_0, VI1_DATA10),
+ PINMUX_IPSR_MSEL(IP10_3_0, CAN0_RX_B, SEL_CAN0_1),
+ PINMUX_IPSR_GPSR(IP10_3_0, AVB_TXD1),
+ PINMUX_IPSR_GPSR(IP10_7_4, VI1_DATA11),
+ PINMUX_IPSR_MSEL(IP10_7_4, CAN0_TX_B, SEL_CAN0_1),
+ PINMUX_IPSR_GPSR(IP10_7_4, AVB_TXD2),
+ PINMUX_IPSR_GPSR(IP10_11_8, AVB_TXD3),
+ PINMUX_IPSR_MSEL(IP10_11_8, AUDIO_CLKA_B, SEL_ADGA_1),
+ PINMUX_IPSR_MSEL(IP10_11_8, SSI_SCK1_D, SEL_SSI1_3),
+ PINMUX_IPSR_MSEL(IP10_11_8, RX5_F, SEL_SCIF5_5),
+ PINMUX_IPSR_MSEL(IP10_11_8, MSIOF0_RXD_B, SEL_MSIOF0_1),
+ PINMUX_IPSR_GPSR(IP10_15_12, AVB_TXD4),
+ PINMUX_IPSR_MSEL(IP10_15_12, AUDIO_CLKB_B, SEL_ADGB_1),
+ PINMUX_IPSR_MSEL(IP10_15_12, SSI_WS1_D, SEL_SSI1_3),
+ PINMUX_IPSR_MSEL(IP10_15_12, TX5_F, SEL_SCIF5_5),
+ PINMUX_IPSR_MSEL(IP10_15_12, MSIOF0_TXD_B, SEL_MSIOF0_1),
+ PINMUX_IPSR_GPSR(IP10_19_16, AVB_TXD5),
+ PINMUX_IPSR_MSEL(IP10_19_16, SCIF_CLK_B, SEL_SCIFCLK_1),
+ PINMUX_IPSR_MSEL(IP10_19_16, AUDIO_CLKC_B, SEL_ADGC_1),
+ PINMUX_IPSR_MSEL(IP10_19_16, SSI_SDATA1_D, SEL_SSI1_3),
+ PINMUX_IPSR_MSEL(IP10_19_16, MSIOF0_SCK_B, SEL_MSIOF0_1),
+ PINMUX_IPSR_MSEL(IP10_23_20, SCL0_A, SEL_I2C00_0),
+ PINMUX_IPSR_MSEL(IP10_23_20, RX0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_GPSR(IP10_23_20, PWM5_A),
+ PINMUX_IPSR_MSEL(IP10_23_20, TCLK1_B, SEL_TMU1_1),
+ PINMUX_IPSR_GPSR(IP10_23_20, AVB_TXD6),
+ PINMUX_IPSR_MSEL(IP10_23_20, CAN1_RX_D, SEL_CAN1_3),
+ PINMUX_IPSR_MSEL(IP10_23_20, MSIOF0_SYNC_B, SEL_MSIOF0_1),
+ PINMUX_IPSR_MSEL(IP10_27_24, SDA0_A, SEL_I2C00_0),
+ PINMUX_IPSR_MSEL(IP10_27_24, TX0_C, SEL_SCIF0_2),
+ PINMUX_IPSR_GPSR(IP10_27_24, IRQ5),
+ PINMUX_IPSR_MSEL(IP10_27_24, CAN_CLK_A, SEL_CANCLK_0),
+ PINMUX_IPSR_GPSR(IP10_27_24, AVB_GTX_CLK),
+ PINMUX_IPSR_MSEL(IP10_27_24, CAN1_TX_D, SEL_CAN1_3),
+ PINMUX_IPSR_GPSR(IP10_27_24, DVC_MUTE),
+ PINMUX_IPSR_MSEL(IP10_31_28, SCL1_A, SEL_I2C01_0),
+ PINMUX_IPSR_MSEL(IP10_31_28, RX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_GPSR(IP10_31_28, PWM5_D),
+ PINMUX_IPSR_GPSR(IP10_31_28, DU1_DR0),
+ PINMUX_IPSR_MSEL(IP10_31_28, SSI_SCK6_B, SEL_SSI6_1),
+ PINMUX_IPSR_GPSR(IP10_31_28, VI0_G0),
+
+ /* IPSR11 */
+ PINMUX_IPSR_MSEL(IP11_3_0, SDA1_A, SEL_I2C01_0),
+ PINMUX_IPSR_MSEL(IP11_3_0, TX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_GPSR(IP11_3_0, DU1_DR1),
+ PINMUX_IPSR_MSEL(IP11_3_0, SSI_WS6_B, SEL_SSI6_1),
+ PINMUX_IPSR_GPSR(IP11_3_0, VI0_G1),
+ PINMUX_IPSR_MSEL(IP11_7_4, MSIOF0_RXD_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_MSEL(IP11_7_4, RX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_MSEL(IP11_7_4, SCL2_C, SEL_I2C02_2),
+ PINMUX_IPSR_GPSR(IP11_7_4, DU1_DR2),
+ PINMUX_IPSR_GPSR(IP11_7_4, QSPI1_MOSI_QSPI1_IO0),
+ PINMUX_IPSR_MSEL(IP11_7_4, SSI_SDATA6_B, SEL_SSI6_1),
+ PINMUX_IPSR_GPSR(IP11_7_4, VI0_G2),
+ PINMUX_IPSR_MSEL(IP11_11_8, MSIOF0_TXD_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, TX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, SDA2_C, SEL_I2C02_2),
+ PINMUX_IPSR_GPSR(IP11_11_8, DU1_DR3),
+ PINMUX_IPSR_GPSR(IP11_11_8, QSPI1_MISO_QSPI1_IO1),
+ PINMUX_IPSR_MSEL(IP11_11_8, SSI_WS78_B, SEL_SSI7_1),
+ PINMUX_IPSR_GPSR(IP11_11_8, VI0_G3),
+ PINMUX_IPSR_MSEL(IP11_15_12, MSIOF0_SCK_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_GPSR(IP11_15_12, IRQ0),
+ PINMUX_IPSR_GPSR(IP11_15_12, DU1_DR4),
+ PINMUX_IPSR_GPSR(IP11_15_12, QSPI1_SPCLK),
+ PINMUX_IPSR_MSEL(IP11_15_12, SSI_SCK78_B, SEL_SSI7_1),
+ PINMUX_IPSR_GPSR(IP11_15_12, VI0_G4),
+ PINMUX_IPSR_MSEL(IP11_19_16, MSIOF0_SYNC_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_GPSR(IP11_19_16, PWM1_A),
+ PINMUX_IPSR_GPSR(IP11_19_16, DU1_DR5),
+ PINMUX_IPSR_GPSR(IP11_19_16, QSPI1_IO2),
+ PINMUX_IPSR_MSEL(IP11_19_16, SSI_SDATA7_B, SEL_SSI7_1),
+ PINMUX_IPSR_MSEL(IP11_23_20, MSIOF0_SS1_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_GPSR(IP11_23_20, DU1_DR6),
+ PINMUX_IPSR_GPSR(IP11_23_20, QSPI1_IO3),
+ PINMUX_IPSR_MSEL(IP11_23_20, SSI_SDATA8_B, SEL_SSI8_1),
+ PINMUX_IPSR_MSEL(IP11_27_24, MSIOF0_SS2_A, SEL_MSIOF0_0),
+ PINMUX_IPSR_GPSR(IP11_27_24, DU1_DR7),
+ PINMUX_IPSR_GPSR(IP11_27_24, QSPI1_SSL),
+ PINMUX_IPSR_MSEL(IP11_31_28, HRX1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP11_31_28, SCL4_A, SEL_I2C04_0),
+ PINMUX_IPSR_GPSR(IP11_31_28, PWM6_A),
+ PINMUX_IPSR_GPSR(IP11_31_28, DU1_DG0),
+ PINMUX_IPSR_MSEL(IP11_31_28, RX0_A, SEL_SCIF0_0),
+
+ /* IPSR12 */
+ PINMUX_IPSR_MSEL(IP12_3_0, HTX1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_3_0, SDA4_A, SEL_I2C04_0),
+ PINMUX_IPSR_GPSR(IP12_3_0, DU1_DG1),
+ PINMUX_IPSR_MSEL(IP12_3_0, TX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP12_7_4, HCTS1_N_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_GPSR(IP12_7_4, PWM2_A),
+ PINMUX_IPSR_GPSR(IP12_7_4, DU1_DG2),
+ PINMUX_IPSR_MSEL(IP12_7_4, REMOCON_B, SEL_RCN_1),
+ PINMUX_IPSR_MSEL(IP12_11_8, HRTS1_N_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_GPSR(IP12_11_8, DU1_DG3),
+ PINMUX_IPSR_MSEL(IP12_11_8, SSI_WS1_B, SEL_SSI1_1),
+ PINMUX_IPSR_GPSR(IP12_11_8, IRQ1),
+ PINMUX_IPSR_GPSR(IP12_15_12, SD2_CLK),
+ PINMUX_IPSR_GPSR(IP12_15_12, HSCK1),
+ PINMUX_IPSR_GPSR(IP12_15_12, DU1_DG4),
+ PINMUX_IPSR_MSEL(IP12_15_12, SSI_SCK1_B, SEL_SSI1_1),
+ PINMUX_IPSR_GPSR(IP12_19_16, SD2_CMD),
+ PINMUX_IPSR_MSEL(IP12_19_16, SCIF1_SCK_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_19_16, TCLK2_A, SEL_TMU2_0),
+ PINMUX_IPSR_GPSR(IP12_19_16, DU1_DG5),
+ PINMUX_IPSR_MSEL(IP12_19_16, SSI_SCK2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP12_19_16, PWM3_A),
+ PINMUX_IPSR_GPSR(IP12_23_20, SD2_DAT0),
+ PINMUX_IPSR_MSEL(IP12_23_20, RX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_23_20, SCL1_E, SEL_I2C01_4),
+ PINMUX_IPSR_GPSR(IP12_23_20, DU1_DG6),
+ PINMUX_IPSR_MSEL(IP12_23_20, SSI_SDATA1_B, SEL_SSI1_1),
+ PINMUX_IPSR_GPSR(IP12_27_24, SD2_DAT1),
+ PINMUX_IPSR_MSEL(IP12_27_24, TX1_A, SEL_SCIF1_0),
+ PINMUX_IPSR_MSEL(IP12_27_24, SDA1_E, SEL_I2C01_4),
+ PINMUX_IPSR_GPSR(IP12_27_24, DU1_DG7),
+ PINMUX_IPSR_MSEL(IP12_27_24, SSI_WS2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP12_31_28, SD2_DAT2),
+ PINMUX_IPSR_MSEL(IP12_31_28, RX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_GPSR(IP12_31_28, DU1_DB0),
+ PINMUX_IPSR_MSEL(IP12_31_28, SSI_SDATA2_B, SEL_SSI2_1),
+
+ /* IPSR13 */
+ PINMUX_IPSR_GPSR(IP13_3_0, SD2_DAT3),
+ PINMUX_IPSR_MSEL(IP13_3_0, TX2_A, SEL_SCIF2_0),
+ PINMUX_IPSR_GPSR(IP13_3_0, DU1_DB1),
+ PINMUX_IPSR_MSEL(IP13_3_0, SSI_WS9_B, SEL_SSI9_1),
+ PINMUX_IPSR_GPSR(IP13_7_4, SD2_CD),
+ PINMUX_IPSR_MSEL(IP13_7_4, SCIF2_SCK_A, SEL_SCIF2_CLK_0),
+ PINMUX_IPSR_GPSR(IP13_7_4, DU1_DB2),
+ PINMUX_IPSR_MSEL(IP13_7_4, SSI_SCK9_B, SEL_SSI9_1),
+ PINMUX_IPSR_GPSR(IP13_11_8, SD2_WP),
+ PINMUX_IPSR_GPSR(IP13_11_8, SCIF3_SCK),
+ PINMUX_IPSR_GPSR(IP13_11_8, DU1_DB3),
+ PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA9_B, SEL_SSI9_1),
+ PINMUX_IPSR_MSEL(IP13_15_12, RX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP13_15_12, SCL1_C, SEL_I2C01_2),
+ PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_GPSR(IP13_15_12, DU1_DB4),
+ PINMUX_IPSR_MSEL(IP13_15_12, AUDIO_CLKA_C, SEL_ADGA_2),
+ PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA4_B, SEL_SSI4_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, TX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP13_19_16, SDA1_C, SEL_I2C01_2),
+ PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_GPSR(IP13_19_16, DU1_DB5),
+ PINMUX_IPSR_MSEL(IP13_19_16, AUDIO_CLKB_C, SEL_ADGB_2),
+ PINMUX_IPSR_MSEL(IP13_19_16, SSI_WS4_B, SEL_SSI4_1),
+ PINMUX_IPSR_MSEL(IP13_23_20, SCL2_A, SEL_I2C02_0),
+ PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SCK_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_GPSR(IP13_23_20, DU1_DB6),
+ PINMUX_IPSR_MSEL(IP13_23_20, AUDIO_CLKC_C, SEL_ADGC_2),
+ PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK4_B, SEL_SSI4_1),
+ PINMUX_IPSR_MSEL(IP13_27_24, SDA2_A, SEL_I2C02_0),
+ PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SYNC_B, SEL_MSIOF1_1),
+ PINMUX_IPSR_GPSR(IP13_27_24, DU1_DB7),
+ PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT_C),
+ PINMUX_IPSR_MSEL(IP13_31_28, SSI_SCK5_A, SEL_SSI5_0),
+ PINMUX_IPSR_GPSR(IP13_31_28, DU1_DOTCLKOUT1),
+
+ /* IPSR14 */
+ PINMUX_IPSR_MSEL(IP14_3_0, SSI_WS5_A, SEL_SSI5_0),
+ PINMUX_IPSR_MSEL(IP14_3_0, SCL3_C, SEL_I2C03_2),
+ PINMUX_IPSR_GPSR(IP14_3_0, DU1_DOTCLKIN),
+ PINMUX_IPSR_MSEL(IP14_7_4, SSI_SDATA5_A, SEL_SSI5_0),
+ PINMUX_IPSR_MSEL(IP14_7_4, SDA3_C, SEL_I2C03_2),
+ PINMUX_IPSR_GPSR(IP14_7_4, DU1_DOTCLKOUT0),
+ PINMUX_IPSR_MSEL(IP14_11_8, SSI_SCK6_A, SEL_SSI6_0),
+ PINMUX_IPSR_GPSR(IP14_11_8, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_MSEL(IP14_15_12, SSI_WS6_A, SEL_SSI6_0),
+ PINMUX_IPSR_MSEL(IP14_15_12, SCL4_C, SEL_I2C04_2),
+ PINMUX_IPSR_GPSR(IP14_15_12, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_MSEL(IP14_19_16, SSI_SDATA6_A, SEL_SSI6_0),
+ PINMUX_IPSR_MSEL(IP14_19_16, SDA4_C, SEL_I2C04_2),
+ PINMUX_IPSR_GPSR(IP14_19_16, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_MSEL(IP14_23_20, SSI_SCK78_A, SEL_SSI7_0),
+ PINMUX_IPSR_MSEL(IP14_23_20, SDA4_E, SEL_I2C04_4),
+ PINMUX_IPSR_GPSR(IP14_23_20, DU1_DISP),
+ PINMUX_IPSR_MSEL(IP14_27_24, SSI_WS78_A, SEL_SSI7_0),
+ PINMUX_IPSR_MSEL(IP14_27_24, SCL4_E, SEL_I2C04_4),
+ PINMUX_IPSR_GPSR(IP14_27_24, DU1_CDE),
+ PINMUX_IPSR_MSEL(IP14_31_28, SSI_SDATA7_A, SEL_SSI7_0),
+ PINMUX_IPSR_GPSR(IP14_31_28, IRQ8),
+ PINMUX_IPSR_MSEL(IP14_31_28, AUDIO_CLKA_D, SEL_ADGA_3),
+ PINMUX_IPSR_MSEL(IP14_31_28, CAN_CLK_D, SEL_CANCLK_3),
+ PINMUX_IPSR_GPSR(IP14_31_28, VI0_G5),
+
+ /* IPSR15 */
+ PINMUX_IPSR_MSEL(IP15_3_0, SSI_SCK0129_A, SEL_SSI0_0),
+ PINMUX_IPSR_MSEL(IP15_3_0, MSIOF1_RXD_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_3_0, RX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_GPSR(IP15_3_0, VI0_G6),
+ PINMUX_IPSR_MSEL(IP15_7_4, SSI_WS0129_A, SEL_SSI0_0),
+ PINMUX_IPSR_MSEL(IP15_7_4, MSIOF1_TXD_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_MSEL(IP15_7_4, TX5_D, SEL_SCIF5_3),
+ PINMUX_IPSR_GPSR(IP15_7_4, VI0_G7),
+ PINMUX_IPSR_MSEL(IP15_11_8, SSI_SDATA0_A, SEL_SSI0_0),
+ PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SYNC_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_GPSR(IP15_11_8, PWM0_C),
+ PINMUX_IPSR_GPSR(IP15_11_8, VI0_R0),
+ PINMUX_IPSR_GPSR(IP15_15_12, SSI_SCK34),
+ PINMUX_IPSR_MSEL(IP15_15_12, MSIOF1_SCK_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_GPSR(IP15_15_12, AVB_MDC),
+ PINMUX_IPSR_GPSR(IP15_15_12, DACK1),
+ PINMUX_IPSR_GPSR(IP15_15_12, VI0_R1),
+ PINMUX_IPSR_GPSR(IP15_19_16, SSI_WS34),
+ PINMUX_IPSR_MSEL(IP15_19_16, MSIOF1_SS1_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_GPSR(IP15_19_16, AVB_MDIO),
+ PINMUX_IPSR_MSEL(IP15_19_16, CAN1_RX_A, SEL_CAN1_0),
+ PINMUX_IPSR_GPSR(IP15_19_16, DREQ1_N),
+ PINMUX_IPSR_GPSR(IP15_19_16, VI0_R2),
+ PINMUX_IPSR_GPSR(IP15_23_20, SSI_SDATA3),
+ PINMUX_IPSR_MSEL(IP15_23_20, MSIOF1_SS2_A, SEL_MSIOF1_0),
+ PINMUX_IPSR_GPSR(IP15_23_20, AVB_LINK),
+ PINMUX_IPSR_MSEL(IP15_23_20, CAN1_TX_A, SEL_CAN1_0),
+ PINMUX_IPSR_GPSR(IP15_23_20, DREQ2_N),
+ PINMUX_IPSR_GPSR(IP15_23_20, VI0_R3),
+ PINMUX_IPSR_MSEL(IP15_27_24, SSI_SCK4_A, SEL_SSI4_0),
+ PINMUX_IPSR_GPSR(IP15_27_24, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP15_27_24, VI0_R4),
+ PINMUX_IPSR_MSEL(IP15_31_28, SSI_WS4_A, SEL_SSI4_0),
+ PINMUX_IPSR_GPSR(IP15_31_28, AVB_PHY_INT),
+ PINMUX_IPSR_GPSR(IP15_31_28, VI0_R5),
+
+ /* IPSR16 */
+ PINMUX_IPSR_MSEL(IP16_3_0, SSI_SDATA4_A, SEL_SSI4_0),
+ PINMUX_IPSR_GPSR(IP16_3_0, AVB_CRS),
+ PINMUX_IPSR_GPSR(IP16_3_0, VI0_R6),
+ PINMUX_IPSR_MSEL(IP16_7_4, SSI_SCK1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MSEL(IP16_7_4, SCIF1_SCK_B, SEL_SCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_7_4, PWM1_D),
+ PINMUX_IPSR_GPSR(IP16_7_4, IRQ9),
+ PINMUX_IPSR_MSEL(IP16_7_4, REMOCON_A, SEL_RCN_0),
+ PINMUX_IPSR_GPSR(IP16_7_4, DACK2),
+ PINMUX_IPSR_GPSR(IP16_7_4, VI0_CLK),
+ PINMUX_IPSR_GPSR(IP16_7_4, AVB_COL),
+ PINMUX_IPSR_MSEL(IP16_11_8, SSI_SDATA8_A, SEL_SSI8_0),
+ PINMUX_IPSR_MSEL(IP16_11_8, RX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MSEL(IP16_11_8, CAN0_RX_D, SEL_CAN0_3),
+ PINMUX_IPSR_MSEL(IP16_11_8, AVB_AVTP_CAPTURE_B, SEL_AVB_1),
+ PINMUX_IPSR_GPSR(IP16_11_8, VI0_R7),
+ PINMUX_IPSR_MSEL(IP16_15_12, SSI_WS1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MSEL(IP16_15_12, TX1_B, SEL_SCIF1_1),
+ PINMUX_IPSR_MSEL(IP16_15_12, CAN0_TX_D, SEL_CAN0_3),
+ PINMUX_IPSR_MSEL(IP16_15_12, AVB_AVTP_MATCH_B, SEL_AVB_1),
+ PINMUX_IPSR_GPSR(IP16_15_12, VI0_DATA0_VI0_B0),
+ PINMUX_IPSR_MSEL(IP16_19_16, SSI_SDATA1_A, SEL_SSI1_0),
+ PINMUX_IPSR_MSEL(IP16_19_16, HRX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_19_16, VI0_DATA1_VI0_B1),
+ PINMUX_IPSR_MSEL(IP16_23_20, SSI_SCK2_A, SEL_SSI2_0),
+ PINMUX_IPSR_MSEL(IP16_23_20, HTX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_23_20, AVB_TXD7),
+ PINMUX_IPSR_GPSR(IP16_23_20, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_MSEL(IP16_27_24, SSI_WS2_A, SEL_SSI2_0),
+ PINMUX_IPSR_MSEL(IP16_27_24, HCTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_27_24, AVB_TX_ER),
+ PINMUX_IPSR_GPSR(IP16_27_24, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA2_A, SEL_SSI2_0),
+ PINMUX_IPSR_MSEL(IP16_31_28, HRTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP16_31_28, VI0_DATA4_VI0_B4),
+
+ /* IPSR17 */
+ PINMUX_IPSR_MSEL(IP17_3_0, SSI_SCK9_A, SEL_SSI9_0),
+ PINMUX_IPSR_MSEL(IP17_3_0, RX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MSEL(IP17_3_0, SCL3_E, SEL_I2C03_4),
+ PINMUX_IPSR_GPSR(IP17_3_0, EX_WAIT1),
+ PINMUX_IPSR_GPSR(IP17_3_0, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_MSEL(IP17_7_4, SSI_WS9_A, SEL_SSI9_0),
+ PINMUX_IPSR_MSEL(IP17_7_4, TX2_B, SEL_SCIF2_1),
+ PINMUX_IPSR_MSEL(IP17_7_4, SDA3_E, SEL_I2C03_4),
+ PINMUX_IPSR_GPSR(IP17_7_4, VI0_DATA6_VI0_B6),
+ PINMUX_IPSR_MSEL(IP17_11_8, SSI_SDATA9_A, SEL_SSI9_0),
+ PINMUX_IPSR_GPSR(IP17_11_8, SCIF2_SCK_B),
+ PINMUX_IPSR_GPSR(IP17_11_8, PWM2_D),
+ PINMUX_IPSR_GPSR(IP17_11_8, VI0_DATA7_VI0_B7),
+ PINMUX_IPSR_MSEL(IP17_15_12, AUDIO_CLKA_A, SEL_ADGA_0),
+ PINMUX_IPSR_MSEL(IP17_15_12, SCL0_B, SEL_I2C00_1),
+ PINMUX_IPSR_GPSR(IP17_15_12, VI0_CLKENB),
+ PINMUX_IPSR_MSEL(IP17_19_16, AUDIO_CLKB_A, SEL_ADGB_0),
+ PINMUX_IPSR_MSEL(IP17_19_16, SDA0_B, SEL_I2C00_1),
+ PINMUX_IPSR_GPSR(IP17_19_16, VI0_FIELD),
+ PINMUX_IPSR_MSEL(IP17_23_20, AUDIO_CLKC_A, SEL_ADGC_0),
+ PINMUX_IPSR_MSEL(IP17_23_20, SCL4_B, SEL_I2C04_1),
+ PINMUX_IPSR_GPSR(IP17_23_20, VI0_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_A),
+ PINMUX_IPSR_MSEL(IP17_27_24, SDA4_B, SEL_I2C04_1),
+ PINMUX_IPSR_GPSR(IP17_27_24, VI0_VSYNC_N),
+};
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+};
+
+/* - MMC -------------------------------------------------------------------- */
+static const unsigned int mmc_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int mmc_data1_mux[] = {
+ MMC0_D0_SDHI1_D0_MARK,
+};
+static const unsigned int mmc_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 16),
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 18),
+};
+static const unsigned int mmc_data4_mux[] = {
+ MMC0_D0_SDHI1_D0_MARK, MMC0_D1_SDHI1_D1_MARK,
+ MMC0_D2_SDHI1_D2_MARK, MMC0_D3_SDHI1_D3_MARK,
+};
+static const unsigned int mmc_data8_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 16),
+ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 18),
+ RCAR_GP_PIN(0, 19), RCAR_GP_PIN(0, 20),
+ RCAR_GP_PIN(0, 21), RCAR_GP_PIN(0, 22),
+};
+static const unsigned int mmc_data8_mux[] = {
+ MMC0_D0_SDHI1_D0_MARK, MMC0_D1_SDHI1_D1_MARK,
+ MMC0_D2_SDHI1_D2_MARK, MMC0_D3_SDHI1_D3_MARK,
+ MMC0_D4_MARK, MMC0_D5_MARK,
+ MMC0_D6_MARK, MMC0_D7_MARK,
+};
+static const unsigned int mmc_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(0, 13), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int mmc_ctrl_mux[] = {
+ MMC0_CLK_SDHI1_CLK_MARK, MMC0_CMD_SDHI1_CMD_MARK,
+};
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11),
+};
+static const unsigned int scif0_data_a_mux[] = {
+ RX0_A_MARK, TX0_A_MARK,
+};
+static const unsigned int scif0_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int scif0_data_b_mux[] = {
+ RX0_B_MARK, TX0_B_MARK,
+};
+static const unsigned int scif0_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int scif0_data_c_mux[] = {
+ RX0_C_MARK, TX0_C_MARK,
+};
+static const unsigned int scif0_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int scif0_data_d_mux[] = {
+ RX0_D_MARK, TX0_D_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 16), RCAR_GP_PIN(4, 17),
+};
+static const unsigned int scif1_data_a_mux[] = {
+ RX1_A_MARK, TX1_A_MARK,
+};
+static const unsigned int scif1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int scif1_clk_a_mux[] = {
+ SCIF1_SCK_A_MARK,
+};
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 20),
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+static const unsigned int scif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int scif1_clk_b_mux[] = {
+ SCIF1_SCK_B_MARK,
+};
+static const unsigned int scif1_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int scif1_data_c_mux[] = {
+ RX1_C_MARK, TX1_C_MARK,
+};
+static const unsigned int scif1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int scif1_clk_c_mux[] = {
+ SCIF1_SCK_C_MARK,
+};
+static const unsigned int scif1_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+};
+static const unsigned int scif1_data_d_mux[] = {
+ RX1_D_MARK, TX1_D_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 18), RCAR_GP_PIN(4, 19),
+};
+static const unsigned int scif2_data_a_mux[] = {
+ RX2_A_MARK, TX2_A_MARK,
+};
+static const unsigned int scif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int scif2_clk_a_mux[] = {
+ SCIF2_SCK_A_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 25), RCAR_GP_PIN(5, 26),
+};
+static const unsigned int scif2_data_b_mux[] = {
+ RX2_B_MARK, TX2_B_MARK,
+};
+static const unsigned int scif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 27),
+};
+static const unsigned int scif2_clk_b_mux[] = {
+ SCIF2_SCK_B_MARK,
+};
+static const unsigned int scif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+};
+static const unsigned int scif2_data_c_mux[] = {
+ RX2_C_MARK, TX2_C_MARK,
+};
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 22), RCAR_GP_PIN(4, 23),
+};
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+static const unsigned int scif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 21),
+};
+static const unsigned int scif3_clk_mux[] = {
+ SCIF3_SCK_MARK,
+};
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9),
+};
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+static const unsigned int scif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6),
+};
+static const unsigned int scif3_data_c_mux[] = {
+ RX3_C_MARK, TX3_C_MARK,
+};
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+};
+static const unsigned int scif4_data_a_mux[] = {
+ RX4_A_MARK, TX4_A_MARK,
+};
+static const unsigned int scif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+};
+static const unsigned int scif4_data_b_mux[] = {
+ RX4_B_MARK, TX4_B_MARK,
+};
+static const unsigned int scif4_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 13), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int scif4_data_c_mux[] = {
+ RX4_C_MARK, TX4_C_MARK,
+};
+static const unsigned int scif4_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11),
+};
+static const unsigned int scif4_data_d_mux[] = {
+ RX4_D_MARK, TX4_D_MARK,
+};
+static const unsigned int scif4_data_e_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 8),
+};
+static const unsigned int scif4_data_e_mux[] = {
+ RX4_E_MARK, TX4_E_MARK,
+};
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int scif5_data_a_mux[] = {
+ RX5_A_MARK, TX5_A_MARK,
+};
+static const unsigned int scif5_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+};
+static const unsigned int scif5_data_b_mux[] = {
+ RX5_B_MARK, TX5_B_MARK,
+};
+static const unsigned int scif5_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int scif5_data_c_mux[] = {
+ RX5_C_MARK, TX5_C_MARK,
+};
+static const unsigned int scif5_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int scif5_data_d_mux[] = {
+ RX5_D_MARK, TX5_D_MARK,
+};
+static const unsigned int scif5_data_e_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 10),
+};
+static const unsigned int scif5_data_e_mux[] = {
+ RX5_E_MARK, TX5_E_MARK,
+};
+static const unsigned int scif5_data_f_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 28),
+};
+static const unsigned int scif5_data_f_mux[] = {
+ RX5_F_MARK, TX5_F_MARK,
+};
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_a_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int scif_clk_a_mux[] = {
+ SCIF_CLK_A_MARK,
+};
+static const unsigned int scif_clk_b_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(3, 29),
+};
+static const unsigned int scif_clk_b_mux[] = {
+ SCIF_CLK_B_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(mmc_data1),
+ SH_PFC_PIN_GROUP(mmc_data4),
+ SH_PFC_PIN_GROUP(mmc_data8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(scif0_data_a),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif0_data_c),
+ SH_PFC_PIN_GROUP(scif0_data_d),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk_a),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk_b),
+ SH_PFC_PIN_GROUP(scif1_data_c),
+ SH_PFC_PIN_GROUP(scif1_clk_c),
+ SH_PFC_PIN_GROUP(scif1_data_d),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk_a),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif2_clk_b),
+ SH_PFC_PIN_GROUP(scif2_data_c),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_c),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_data_d),
+ SH_PFC_PIN_GROUP(scif4_data_e),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_data_c),
+ SH_PFC_PIN_GROUP(scif5_data_d),
+ SH_PFC_PIN_GROUP(scif5_data_e),
+ SH_PFC_PIN_GROUP(scif5_data_f),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+};
+
+static const char * const mmc_groups[] = {
+ "mmc_data1",
+ "mmc_data4",
+ "mmc_data8",
+ "mmc_ctrl",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data_a",
+ "scif0_data_b",
+ "scif0_data_c",
+ "scif0_data_d",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data_a",
+ "scif1_clk_a",
+ "scif1_data_b",
+ "scif1_clk_b",
+ "scif1_data_c",
+ "scif1_clk_c",
+ "scif1_data_d",
+};
+
+static const char * const scif2_groups[] = {
+ "scif2_data_a",
+ "scif2_clk_a",
+ "scif2_data_b",
+ "scif2_clk_b",
+ "scif2_data_c",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data_a",
+ "scif3_clk",
+ "scif3_data_b",
+ "scif3_data_c",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data_a",
+ "scif4_data_b",
+ "scif4_data_c",
+ "scif4_data_d",
+ "scif4_data_e",
+};
+
+static const char * const scif5_groups[] = {
+ "scif5_data_a",
+ "scif5_data_b",
+ "scif5_data_c",
+ "scif5_data_d",
+ "scif5_data_e",
+ "scif5_data_f",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk_a",
+ "scif_clk_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_0_22_FN, FN_MMC0_D7,
+ GP_0_21_FN, FN_MMC0_D6,
+ GP_0_20_FN, FN_IP1_7_4,
+ GP_0_19_FN, FN_IP1_3_0,
+ GP_0_18_FN, FN_MMC0_D3_SDHI1_D3,
+ GP_0_17_FN, FN_MMC0_D2_SDHI1_D2,
+ GP_0_16_FN, FN_MMC0_D1_SDHI1_D1,
+ GP_0_15_FN, FN_MMC0_D0_SDHI1_D0,
+ GP_0_14_FN, FN_MMC0_CMD_SDHI1_CMD,
+ GP_0_13_FN, FN_MMC0_CLK_SDHI1_CLK,
+ GP_0_12_FN, FN_IP0_31_28,
+ GP_0_11_FN, FN_IP0_27_24,
+ GP_0_10_FN, FN_IP0_23_20,
+ GP_0_9_FN, FN_IP0_19_16,
+ GP_0_8_FN, FN_IP0_15_12,
+ GP_0_7_FN, FN_IP0_11_8,
+ GP_0_6_FN, FN_IP0_7_4,
+ GP_0_5_FN, FN_IP0_3_0,
+ GP_0_4_FN, FN_CLKOUT,
+ GP_0_3_FN, FN_USB1_OVC,
+ GP_0_2_FN, FN_USB1_PWEN,
+ GP_0_1_FN, FN_USB0_OVC,
+ GP_0_0_FN, FN_USB0_PWEN, }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_22_FN, FN_IP4_3_0,
+ GP_1_21_FN, FN_IP3_31_28,
+ GP_1_20_FN, FN_IP3_27_24,
+ GP_1_19_FN, FN_IP3_23_20,
+ GP_1_18_FN, FN_IP3_19_16,
+ GP_1_17_FN, FN_IP3_15_12,
+ GP_1_16_FN, FN_IP3_11_8,
+ GP_1_15_FN, FN_IP3_7_4,
+ GP_1_14_FN, FN_IP3_3_0,
+ GP_1_13_FN, FN_IP2_31_28,
+ GP_1_12_FN, FN_IP2_27_24,
+ GP_1_11_FN, FN_IP2_23_20,
+ GP_1_10_FN, FN_IP2_19_16,
+ GP_1_9_FN, FN_IP2_15_12,
+ GP_1_8_FN, FN_IP2_11_8,
+ GP_1_7_FN, FN_IP2_7_4,
+ GP_1_6_FN, FN_IP2_3_0,
+ GP_1_5_FN, FN_IP1_31_28,
+ GP_1_4_FN, FN_IP1_27_24,
+ GP_1_3_FN, FN_IP1_23_20,
+ GP_1_2_FN, FN_IP1_19_16,
+ GP_1_1_FN, FN_IP1_15_12,
+ GP_1_0_FN, FN_IP1_11_8, }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xE606000C, 32, 1) {
+ GP_2_31_FN, FN_IP8_3_0,
+ GP_2_30_FN, FN_IP7_31_28,
+ GP_2_29_FN, FN_IP7_27_24,
+ GP_2_28_FN, FN_IP7_23_20,
+ GP_2_27_FN, FN_IP7_19_16,
+ GP_2_26_FN, FN_IP7_15_12,
+ GP_2_25_FN, FN_IP7_11_8,
+ GP_2_24_FN, FN_IP7_7_4,
+ GP_2_23_FN, FN_IP7_3_0,
+ GP_2_22_FN, FN_IP6_31_28,
+ GP_2_21_FN, FN_IP6_27_24,
+ GP_2_20_FN, FN_IP6_23_20,
+ GP_2_19_FN, FN_IP6_19_16,
+ GP_2_18_FN, FN_IP6_15_12,
+ GP_2_17_FN, FN_IP6_11_8,
+ GP_2_16_FN, FN_IP6_7_4,
+ GP_2_15_FN, FN_IP6_3_0,
+ GP_2_14_FN, FN_IP5_31_28,
+ GP_2_13_FN, FN_IP5_27_24,
+ GP_2_12_FN, FN_IP5_23_20,
+ GP_2_11_FN, FN_IP5_19_16,
+ GP_2_10_FN, FN_IP5_15_12,
+ GP_2_9_FN, FN_IP5_11_8,
+ GP_2_8_FN, FN_IP5_7_4,
+ GP_2_7_FN, FN_IP5_3_0,
+ GP_2_6_FN, FN_IP4_31_28,
+ GP_2_5_FN, FN_IP4_27_24,
+ GP_2_4_FN, FN_IP4_23_20,
+ GP_2_3_FN, FN_IP4_19_16,
+ GP_2_2_FN, FN_IP4_15_12,
+ GP_2_1_FN, FN_IP4_11_8,
+ GP_2_0_FN, FN_IP4_7_4, }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1) {
+ 0, 0,
+ 0, 0,
+ GP_3_29_FN, FN_IP10_19_16,
+ GP_3_28_FN, FN_IP10_15_12,
+ GP_3_27_FN, FN_IP10_11_8,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_3_16_FN, FN_IP10_7_4,
+ GP_3_15_FN, FN_IP10_3_0,
+ GP_3_14_FN, FN_IP9_31_28,
+ GP_3_13_FN, FN_IP9_27_24,
+ GP_3_12_FN, FN_IP9_23_20,
+ GP_3_11_FN, FN_IP9_19_16,
+ GP_3_10_FN, FN_IP9_15_12,
+ GP_3_9_FN, FN_IP9_11_8,
+ GP_3_8_FN, FN_IP9_7_4,
+ GP_3_7_FN, FN_IP9_3_0,
+ GP_3_6_FN, FN_IP8_31_28,
+ GP_3_5_FN, FN_IP8_27_24,
+ GP_3_4_FN, FN_IP8_23_20,
+ GP_3_3_FN, FN_IP8_19_16,
+ GP_3_2_FN, FN_IP8_15_12,
+ GP_3_1_FN, FN_IP8_11_8,
+ GP_3_0_FN, FN_IP8_7_4, }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_25_FN, FN_IP13_27_24,
+ GP_4_24_FN, FN_IP13_23_20,
+ GP_4_23_FN, FN_IP13_19_16,
+ GP_4_22_FN, FN_IP13_15_12,
+ GP_4_21_FN, FN_IP13_11_8,
+ GP_4_20_FN, FN_IP13_7_4,
+ GP_4_19_FN, FN_IP13_3_0,
+ GP_4_18_FN, FN_IP12_31_28,
+ GP_4_17_FN, FN_IP12_27_24,
+ GP_4_16_FN, FN_IP12_23_20,
+ GP_4_15_FN, FN_IP12_19_16,
+ GP_4_14_FN, FN_IP12_15_12,
+ GP_4_13_FN, FN_IP12_11_8,
+ GP_4_12_FN, FN_IP12_7_4,
+ GP_4_11_FN, FN_IP12_3_0,
+ GP_4_10_FN, FN_IP11_31_28,
+ GP_4_9_FN, FN_IP11_27_24,
+ GP_4_8_FN, FN_IP11_23_20,
+ GP_4_7_FN, FN_IP11_19_16,
+ GP_4_6_FN, FN_IP11_15_12,
+ GP_4_5_FN, FN_IP11_11_8,
+ GP_4_4_FN, FN_IP11_7_4,
+ GP_4_3_FN, FN_IP11_3_0,
+ GP_4_2_FN, FN_IP10_31_28,
+ GP_4_1_FN, FN_IP10_27_24,
+ GP_4_0_FN, FN_IP10_23_20, }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1) {
+ GP_5_31_FN, FN_IP17_27_24,
+ GP_5_30_FN, FN_IP17_23_20,
+ GP_5_29_FN, FN_IP17_19_16,
+ GP_5_28_FN, FN_IP17_15_12,
+ GP_5_27_FN, FN_IP17_11_8,
+ GP_5_26_FN, FN_IP17_7_4,
+ GP_5_25_FN, FN_IP17_3_0,
+ GP_5_24_FN, FN_IP16_31_28,
+ GP_5_23_FN, FN_IP16_27_24,
+ GP_5_22_FN, FN_IP16_23_20,
+ GP_5_21_FN, FN_IP16_19_16,
+ GP_5_20_FN, FN_IP16_15_12,
+ GP_5_19_FN, FN_IP16_11_8,
+ GP_5_18_FN, FN_IP16_7_4,
+ GP_5_17_FN, FN_IP16_3_0,
+ GP_5_16_FN, FN_IP15_31_28,
+ GP_5_15_FN, FN_IP15_27_24,
+ GP_5_14_FN, FN_IP15_23_20,
+ GP_5_13_FN, FN_IP15_19_16,
+ GP_5_12_FN, FN_IP15_15_12,
+ GP_5_11_FN, FN_IP15_11_8,
+ GP_5_10_FN, FN_IP15_7_4,
+ GP_5_9_FN, FN_IP15_3_0,
+ GP_5_8_FN, FN_IP14_31_28,
+ GP_5_7_FN, FN_IP14_27_24,
+ GP_5_6_FN, FN_IP14_23_20,
+ GP_5_5_FN, FN_IP14_19_16,
+ GP_5_4_FN, FN_IP14_15_12,
+ GP_5_3_FN, FN_IP14_11_8,
+ GP_5_2_FN, FN_IP14_7_4,
+ GP_5_1_FN, FN_IP14_3_0,
+ GP_5_0_FN, FN_IP13_31_28, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP0_31_28 [4] */
+ FN_SD0_WP, FN_IRQ7, FN_CAN0_TX_A, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_27_24 [4] */
+ FN_SD0_CD, 0, FN_CAN0_RX_A, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_23_20 [4] */
+ FN_SD0_DAT3, 0, 0, FN_SSI_SDATA0_B, FN_TX5_E, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_19_16 [4] */
+ FN_SD0_DAT2, 0, 0, FN_SSI_WS0129_B, FN_RX5_E, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_15_12 [4] */
+ FN_SD0_DAT1, 0, 0, FN_SSI_SCK0129_B, FN_TX4_E, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_11_8 [4] */
+ FN_SD0_DAT0, 0, 0, FN_SSI_SDATA1_C, FN_RX4_E, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_7_4 [4] */
+ FN_SD0_CMD, 0, 0, FN_SSI_WS1_C, FN_TX3_C, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP0_3_0 [4] */
+ FN_SD0_CLK, 0, 0, FN_SSI_SCK1_C, FN_RX3_C, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP1_31_28 [4] */
+ FN_D5, FN_HRX2, FN_SCL1_B, FN_PWM2_C, FN_TCLK2_B, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_27_24 [4] */
+ FN_D4, 0, FN_IRQ3, FN_TCLK1_A, FN_PWM6_C, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_23_20 [4] */
+ FN_D3, 0, FN_TX4_B, FN_SDA0_D, FN_PWM0_A,
+ FN_MSIOF2_SYNC_C, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_19_16 [4] */
+ FN_D2, 0, FN_RX4_B, FN_SCL0_D, FN_PWM1_C,
+ FN_MSIOF2_SCK_C, FN_SSI_SCK5_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_15_12 [4] */
+ FN_D1, 0, FN_SDA3_B, FN_TX5_B, 0, FN_MSIOF2_TXD_C,
+ FN_SSI_WS5_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_11_8 [4] */
+ FN_D0, 0, FN_SCL3_B, FN_RX5_B, FN_IRQ4,
+ FN_MSIOF2_RXD_C, FN_SSI_SDATA5_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_7_4 [4] */
+ FN_MMC0_D5, FN_SD1_WP, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP1_3_0 [4] */
+ FN_MMC0_D4, FN_SD1_CD, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP2_31_28 [4] */
+ FN_D13, FN_MSIOF2_SYNC_A, 0, FN_RX4_C, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_27_24 [4] */
+ FN_D12, FN_MSIOF2_SCK_A, FN_HSCK0, 0, FN_CAN_CLK_C,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_23_20 [4] */
+ FN_D11, FN_MSIOF2_TXD_A, FN_HTX0_B, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_19_16 [4] */
+ FN_D10, FN_MSIOF2_RXD_A, FN_HRX0_B, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_15_12 [4] */
+ FN_D9, FN_HRTS2_N, FN_TX1_C, FN_SDA1_D, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_11_8 [4] */
+ FN_D8, FN_HCTS2_N, FN_RX1_C, FN_SCL1_D, FN_PWM3_C, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_7_4 [4] */
+ FN_D7, FN_HSCK2, FN_SCIF1_SCK_C, FN_IRQ6, FN_PWM5_C,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP2_3_0 [4] */
+ FN_D6, FN_HTX2, FN_SDA1_B, FN_PWM4_C, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP3_31_28 [4] */
+ FN_QSPI0_SSL, FN_WE1_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP3_27_24 [4] */
+ FN_QSPI0_IO3, FN_RD_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP3_23_20 [4] */
+ FN_QSPI0_IO2, FN_CS0_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP3_19_16 [4] */
+ FN_QSPI0_MISO_QSPI0_IO1, FN_RD_WR_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP3_15_12 [4] */
+ FN_QSPI0_MOSI_QSPI0_IO0, FN_BS_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0,
+ /* IP3_11_8 [4] */
+ FN_QSPI0_SPCLK, FN_WE0_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP3_7_4 [4] */
+ FN_D15, FN_MSIOF2_SS2, FN_PWM4_A, 0, FN_CAN1_TX_B, FN_IRQ2,
+ FN_AVB_AVTP_MATCH_A, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP3_3_0 [4] */
+ FN_D14, FN_MSIOF2_SS1, 0, FN_TX4_C, FN_CAN1_RX_B,
+ 0, FN_AVB_AVTP_CAPTURE_A,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP4_31_28 [4] */
+ FN_DU0_DR6, 0, FN_RX2_C, 0, 0, 0, FN_A6, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_27_24 [4] */
+ FN_DU0_DR5, 0, FN_TX1_D, 0, FN_PWM1_B, 0, FN_A5, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_23_20 [4] */
+ FN_DU0_DR4, 0, FN_RX1_D, 0, 0, 0, FN_A4, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP4_19_16 [4] */
+ FN_DU0_DR3, 0, FN_TX0_D, FN_SDA0_E, FN_PWM0_B, 0,
+ FN_A3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_15_12 [4] */
+ FN_DU0_DR2, 0, FN_RX0_D, FN_SCL0_E, 0, 0, FN_A2, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_11_8 [4] */
+ FN_DU0_DR1, 0, FN_TX5_C, FN_SDA2_D, 0, 0, FN_A1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_7_4 [4] */
+ FN_DU0_DR0, 0, FN_RX5_C, FN_SCL2_D, 0, 0, FN_A0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP4_3_0 [4] */
+ FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK_A, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP5_31_28 [4] */
+ FN_DU0_DG6, 0, FN_HRX1_C, 0, 0, 0, FN_A14, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP5_27_24 [4] */
+ FN_DU0_DG5, 0, FN_HTX0_A, 0, FN_PWM5_B, 0, FN_A13,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_23_20 [4] */
+ FN_DU0_DG4, 0, FN_HRX0_A, 0, 0, 0, FN_A12, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP5_19_16 [4] */
+ FN_DU0_DG3, 0, FN_TX4_D, 0, FN_PWM4_B, 0, FN_A11, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_15_12 [4] */
+ FN_DU0_DG2, 0, FN_RX4_D, 0, 0, 0, FN_A10, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP5_11_8 [4] */
+ FN_DU0_DG1, 0, FN_TX3_B, FN_SDA3_D, FN_PWM3_B, 0,
+ FN_A9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_7_4 [4] */
+ FN_DU0_DG0, 0, FN_RX3_B, FN_SCL3_D, 0, 0, FN_A8, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP5_3_0 [4] */
+ FN_DU0_DR7, 0, FN_TX2_C, 0, FN_PWM2_B, 0, FN_A7, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP6_31_28 [4] */
+ FN_DU0_DB6, 0, 0, 0, 0, 0, FN_A22, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_27_24 [4] */
+ FN_DU0_DB5, 0, FN_HRTS1_N_C, 0, 0, 0,
+ FN_A21, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_23_20 [4] */
+ FN_DU0_DB4, 0, FN_HCTS1_N_C, 0, 0, 0,
+ FN_A20, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_19_16 [4] */
+ FN_DU0_DB3, 0, FN_HRTS0_N, 0, 0, 0, FN_A19, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP6_15_12 [4] */
+ FN_DU0_DB2, 0, FN_HCTS0_N, 0, 0, 0, FN_A18, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP6_11_8 [4] */
+ FN_DU0_DB1, 0, 0, FN_SDA4_D, FN_CAN0_TX_C, 0, FN_A17,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_7_4 [4] */
+ FN_DU0_DB0, 0, 0, FN_SCL4_D, FN_CAN0_RX_C, 0, FN_A16,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP6_3_0 [4] */
+ FN_DU0_DG7, 0, FN_HTX1_C, 0, FN_PWM6_B, 0, FN_A15,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP7_31_28 [4] */
+ FN_DU0_DISP, 0, 0, 0, FN_CAN1_RX_C, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP7_27_24 [4] */
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, 0, FN_MSIOF2_SCK_B,
+ 0, 0, 0, FN_DRACK0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_23_20 [4] */
+ FN_DU0_EXVSYNC_DU0_VSYNC, 0, FN_MSIOF2_SYNC_B, 0,
+ 0, 0, FN_DACK0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_19_16 [4] */
+ FN_DU0_EXHSYNC_DU0_HSYNC, 0, FN_MSIOF2_TXD_B, 0,
+ 0, 0, FN_DREQ0_N, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_15_12 [4] */
+ FN_DU0_DOTCLKOUT1, 0, FN_MSIOF2_RXD_B, 0, 0, 0,
+ FN_CS1_N_A26, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP7_11_8 [4] */
+ FN_DU0_DOTCLKOUT0, 0, 0, 0, 0, 0, FN_A25, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP7_7_4 [4] */
+ FN_DU0_DOTCLKIN, 0, 0, 0, 0, 0, FN_A24, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP7_3_0 [4] */
+ FN_DU0_DB7, 0, 0, 0, 0, 0, FN_A23, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR8", 0xE6060060, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP8_31_28 [4] */
+ FN_VI1_DATA5, 0, 0, 0, FN_AVB_RXD4, FN_ETH_LINK, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_27_24 [4] */
+ FN_VI1_DATA4, 0, 0, 0, FN_AVB_RXD3, FN_ETH_RX_ER, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_23_20 [4] */
+ FN_VI1_DATA3, 0, 0, 0, FN_AVB_RXD2, FN_ETH_MDIO, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_19_16 [4] */
+ FN_VI1_DATA2, 0, 0, 0, FN_AVB_RXD1, FN_ETH_RXD1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_15_12 [4] */
+ FN_VI1_DATA1, 0, 0, 0, FN_AVB_RXD0, FN_ETH_RXD0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP8_11_8 [4] */
+ FN_VI1_DATA0, 0, 0, 0, FN_AVB_RX_DV, FN_ETH_CRS_DV, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP8_7_4 [4] */
+ FN_VI1_CLK, 0, 0, 0, FN_AVB_RX_CLK, FN_ETH_REF_CLK, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP8_3_0 [4] */
+ FN_DU0_CDE, 0, 0, 0, FN_CAN1_TX_C, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR9", 0xE6060064, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP9_31_28 [4] */
+ FN_VI1_DATA9, 0, 0, FN_SDA2_B, FN_AVB_TXD0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP9_27_24 [4] */
+ FN_VI1_DATA8, 0, 0, FN_SCL2_B, FN_AVB_TX_EN, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP9_23_20 [4] */
+ FN_VI1_VSYNC_N, FN_TX0_B, FN_SDA0_C, FN_AUDIO_CLKOUT_B,
+ FN_AVB_TX_CLK, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP9_19_16 [4] */
+ FN_VI1_HSYNC_N, FN_RX0_B, FN_SCL0_C, 0, FN_AVB_GTXREFCLK,
+ FN_ETH_MDC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP9_15_12 [4] */
+ FN_VI1_FIELD, FN_SDA3_A, 0, 0, FN_AVB_RX_ER, FN_ETH_TXD0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP9_11_8 [4] */
+ FN_VI1_CLKENB, FN_SCL3_A, 0, 0, FN_AVB_RXD7, FN_ETH_MAGIC, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP9_7_4 [4] */
+ FN_VI1_DATA7, 0, 0, 0, FN_AVB_RXD6, FN_ETH_TX_EN, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP9_3_0 [4] */
+ FN_VI1_DATA6, 0, 0, 0, FN_AVB_RXD5, FN_ETH_TXD1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR10", 0xE6060068, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP10_31_28 [4] */
+ FN_SCL1_A, FN_RX4_A, FN_PWM5_D, FN_DU1_DR0, 0, 0,
+ FN_SSI_SCK6_B, FN_VI0_G0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_27_24 [4] */
+ FN_SDA0_A, FN_TX0_C, FN_IRQ5, FN_CAN_CLK_A, FN_AVB_GTX_CLK,
+ FN_CAN1_TX_D, FN_DVC_MUTE, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_23_20 [4] */
+ FN_SCL0_A, FN_RX0_C, FN_PWM5_A, FN_TCLK1_B, FN_AVB_TXD6,
+ FN_CAN1_RX_D, FN_MSIOF0_SYNC_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_19_16 [4] */
+ FN_AVB_TXD5, FN_SCIF_CLK_B, FN_AUDIO_CLKC_B, 0,
+ FN_SSI_SDATA1_D, 0, FN_MSIOF0_SCK_B, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP10_15_12 [4] */
+ FN_AVB_TXD4, 0, FN_AUDIO_CLKB_B, 0, FN_SSI_WS1_D, FN_TX5_F,
+ FN_MSIOF0_TXD_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_11_8 [4] */
+ FN_AVB_TXD3, 0, FN_AUDIO_CLKA_B, 0, FN_SSI_SCK1_D, FN_RX5_F,
+ FN_MSIOF0_RXD_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_7_4 [4] */
+ FN_VI1_DATA11, 0, 0, FN_CAN0_TX_B, FN_AVB_TXD2, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ /* IP10_3_0 [4] */
+ FN_VI1_DATA10, 0, 0, FN_CAN0_RX_B, FN_AVB_TXD1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR11", 0xE606006C, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP11_31_28 [4] */
+ FN_HRX1_A, FN_SCL4_A, FN_PWM6_A, FN_DU1_DG0, FN_RX0_A, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_27_24 [4] */
+ FN_MSIOF0_SS2_A, 0, 0, FN_DU1_DR7, 0,
+ FN_QSPI1_SSL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_23_20 [4] */
+ FN_MSIOF0_SS1_A, 0, 0, FN_DU1_DR6, 0,
+ FN_QSPI1_IO3, FN_SSI_SDATA8_B, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_19_16 [4] */
+ FN_MSIOF0_SYNC_A, FN_PWM1_A, 0, FN_DU1_DR5,
+ 0, FN_QSPI1_IO2, FN_SSI_SDATA7_B, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ /* IP11_15_12 [4] */
+ FN_MSIOF0_SCK_A, FN_IRQ0, 0, FN_DU1_DR4,
+ 0, FN_QSPI1_SPCLK, FN_SSI_SCK78_B, FN_VI0_G4,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_11_8 [4] */
+ FN_MSIOF0_TXD_A, FN_TX5_A, FN_SDA2_C, FN_DU1_DR3, 0,
+ FN_QSPI1_MISO_QSPI1_IO1, FN_SSI_WS78_B, FN_VI0_G3,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_7_4 [4] */
+ FN_MSIOF0_RXD_A, FN_RX5_A, FN_SCL2_C, FN_DU1_DR2, 0,
+ FN_QSPI1_MOSI_QSPI1_IO0, FN_SSI_SDATA6_B, FN_VI0_G2,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_3_0 [4] */
+ FN_SDA1_A, FN_TX4_A, 0, FN_DU1_DR1, 0, 0, FN_SSI_WS6_B,
+ FN_VI0_G1, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR12", 0xE6060070, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP12_31_28 [4] */
+ FN_SD2_DAT2, FN_RX2_A, 0, FN_DU1_DB0, FN_SSI_SDATA2_B, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_27_24 [4] */
+ FN_SD2_DAT1, FN_TX1_A, FN_SDA1_E, FN_DU1_DG7, FN_SSI_WS2_B,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_23_20 [4] */
+ FN_SD2_DAT0, FN_RX1_A, FN_SCL1_E, FN_DU1_DG6,
+ FN_SSI_SDATA1_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_19_16 [4] */
+ FN_SD2_CMD, FN_SCIF1_SCK_A, FN_TCLK2_A, FN_DU1_DG5,
+ FN_SSI_SCK2_B, FN_PWM3_A, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_15_12 [4] */
+ FN_SD2_CLK, FN_HSCK1, 0, FN_DU1_DG4, FN_SSI_SCK1_B, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_11_8 [4] */
+ FN_HRTS1_N_A, 0, 0, FN_DU1_DG3, FN_SSI_WS1_B, FN_IRQ1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_7_4 [4] */
+ FN_HCTS1_N_A, FN_PWM2_A, 0, FN_DU1_DG2, FN_REMOCON_B,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP12_3_0 [4] */
+ FN_HTX1_A, FN_SDA4_A, 0, FN_DU1_DG1, FN_TX0_A, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR13", 0xE6060074, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP13_31_28 [4] */
+ FN_SSI_SCK5_A, 0, 0, FN_DU1_DOTCLKOUT1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP13_27_24 [4] */
+ FN_SDA2_A, 0, FN_MSIOF1_SYNC_B, FN_DU1_DB7, FN_AUDIO_CLKOUT_C,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_23_20 [4] */
+ FN_SCL2_A, 0, FN_MSIOF1_SCK_B, FN_DU1_DB6, FN_AUDIO_CLKC_C,
+ FN_SSI_SCK4_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_19_16 [4] */
+ FN_TX3_A, FN_SDA1_C, FN_MSIOF1_TXD_B, FN_DU1_DB5,
+ FN_AUDIO_CLKB_C, FN_SSI_WS4_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_15_12 [4] */
+ FN_RX3_A, FN_SCL1_C, FN_MSIOF1_RXD_B, FN_DU1_DB4,
+ FN_AUDIO_CLKA_C, FN_SSI_SDATA4_B, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ /* IP13_11_8 [4] */
+ FN_SD2_WP, FN_SCIF3_SCK, 0, FN_DU1_DB3, FN_SSI_SDATA9_B, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_7_4 [4] */
+ FN_SD2_CD, FN_SCIF2_SCK_A, 0, FN_DU1_DB2, FN_SSI_SCK9_B, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP13_3_0 [4] */
+ FN_SD2_DAT3, FN_TX2_A, 0, FN_DU1_DB1, FN_SSI_WS9_B, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR14", 0xE6060078, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP14_31_28 [4] */
+ FN_SSI_SDATA7_A, 0, 0, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D,
+ FN_VI0_G5, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_27_24 [4] */
+ FN_SSI_WS78_A, 0, FN_SCL4_E, FN_DU1_CDE, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ /* IP14_23_20 [4] */
+ FN_SSI_SCK78_A, 0, FN_SDA4_E, FN_DU1_DISP, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP14_19_16 [4] */
+ FN_SSI_SDATA6_A, 0, FN_SDA4_C, FN_DU1_EXVSYNC_DU1_VSYNC, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_15_12 [4] */
+ FN_SSI_WS6_A, 0, FN_SCL4_C, FN_DU1_EXHSYNC_DU1_HSYNC, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_11_8 [4] */
+ FN_SSI_SCK6_A, 0, 0, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_7_4 [4] */
+ FN_SSI_SDATA5_A, 0, FN_SDA3_C, FN_DU1_DOTCLKOUT0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP14_3_0 [4] */
+ FN_SSI_WS5_A, 0, FN_SCL3_C, FN_DU1_DOTCLKIN, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR15", 0xE606007C, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP15_31_28 [4] */
+ FN_SSI_WS4_A, 0, FN_AVB_PHY_INT, 0, 0, 0, FN_VI0_R5, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP15_27_24 [4] */
+ FN_SSI_SCK4_A, 0, FN_AVB_MAGIC, 0, 0, 0, FN_VI0_R4, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ /* IP15_23_20 [4] */
+ FN_SSI_SDATA3, FN_MSIOF1_SS2_A, FN_AVB_LINK, 0, FN_CAN1_TX_A,
+ FN_DREQ2_N, FN_VI0_R3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_19_16 [4] */
+ FN_SSI_WS34, FN_MSIOF1_SS1_A, FN_AVB_MDIO, 0, FN_CAN1_RX_A,
+ FN_DREQ1_N, FN_VI0_R2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_15_12 [4] */
+ FN_SSI_SCK34, FN_MSIOF1_SCK_A, FN_AVB_MDC, 0, 0, FN_DACK1,
+ FN_VI0_R1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_11_8 [4] */
+ FN_SSI_SDATA0_A, FN_MSIOF1_SYNC_A, FN_PWM0_C, 0, 0, 0,
+ FN_VI0_R0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_7_4 [4] */
+ FN_SSI_WS0129_A, FN_MSIOF1_TXD_A, FN_TX5_D, 0, 0, 0,
+ FN_VI0_G7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP15_3_0 [4] */
+ FN_SSI_SCK0129_A, FN_MSIOF1_RXD_A, FN_RX5_D, 0, 0, 0,
+ FN_VI0_G6, 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR16", 0xE6060080, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP16_31_28 [4] */
+ FN_SSI_SDATA2_A, FN_HRTS1_N_B, 0, 0, 0, 0,
+ FN_VI0_DATA4_VI0_B4, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_27_24 [4] */
+ FN_SSI_WS2_A, FN_HCTS1_N_B, 0, 0, 0, FN_AVB_TX_ER,
+ FN_VI0_DATA3_VI0_B3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_23_20 [4] */
+ FN_SSI_SCK2_A, FN_HTX1_B, 0, 0, 0, FN_AVB_TXD7,
+ FN_VI0_DATA2_VI0_B2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_19_16 [4] */
+ FN_SSI_SDATA1_A, FN_HRX1_B, 0, 0, 0, 0, FN_VI0_DATA1_VI0_B1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_15_12 [4] */
+ FN_SSI_WS1_A, FN_TX1_B, 0, 0, FN_CAN0_TX_D,
+ FN_AVB_AVTP_MATCH_B, FN_VI0_DATA0_VI0_B0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0,
+ /* IP16_11_8 [4] */
+ FN_SSI_SDATA8_A, FN_RX1_B, 0, 0, FN_CAN0_RX_D,
+ FN_AVB_AVTP_CAPTURE_B, FN_VI0_R7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_7_4 [4] */
+ FN_SSI_SCK1_A, FN_SCIF1_SCK_B, FN_PWM1_D, FN_IRQ9, FN_REMOCON_A,
+ FN_DACK2, FN_VI0_CLK, FN_AVB_COL, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP16_3_0 [4] */
+ FN_SSI_SDATA4_A, 0, FN_AVB_CRS, 0, 0, 0, FN_VI0_R6, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR17", 0xE6060084, 32,
+ 4, 4, 4, 4, 4, 4, 4, 4) {
+ /* IP17_31_28 [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_27_24 [4] */
+ FN_AUDIO_CLKOUT_A, FN_SDA4_B, 0, 0, 0, 0,
+ FN_VI0_VSYNC_N, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_23_20 [4] */
+ FN_AUDIO_CLKC_A, FN_SCL4_B, 0, 0, 0, 0,
+ FN_VI0_HSYNC_N, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_19_16 [4] */
+ FN_AUDIO_CLKB_A, FN_SDA0_B, 0, 0, 0, 0,
+ FN_VI0_FIELD, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_15_12 [4] */
+ FN_AUDIO_CLKA_A, FN_SCL0_B, 0, 0, 0, 0,
+ FN_VI0_CLKENB, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_11_8 [4] */
+ FN_SSI_SDATA9_A, FN_SCIF2_SCK_B, FN_PWM2_D, 0, 0, 0,
+ FN_VI0_DATA7_VI0_B7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_7_4 [4] */
+ FN_SSI_WS9_A, FN_TX2_B, FN_SDA3_E, 0, 0, 0,
+ FN_VI0_DATA6_VI0_B6, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP17_3_0 [4] */
+ FN_SSI_SCK9_A, FN_RX2_B, FN_SCL3_E, 0, 0, FN_EX_WAIT1,
+ FN_VI0_DATA5_VI0_B5, 0, 0, 0, 0, 0, 0, 0, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xE60600C0, 32,
+ 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, 3, 3,
+ 1, 2, 3, 3, 1) {
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_ADGA [2] */
+ FN_SEL_ADGA_0, FN_SEL_ADGA_1, FN_SEL_ADGA_2, FN_SEL_ADGA_3,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_CANCLK [2] */
+ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2,
+ FN_SEL_CANCLK_3,
+ /* SEL_CAN1 [2] */
+ FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3,
+ /* SEL_CAN0 [2] */
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_I2C04 [3] */
+ FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2, FN_SEL_I2C04_3,
+ FN_SEL_I2C04_4, 0, 0, 0,
+ /* SEL_I2C03 [3] */
+ FN_SEL_I2C03_0, FN_SEL_I2C03_1, FN_SEL_I2C03_2, FN_SEL_I2C03_3,
+ FN_SEL_I2C03_4, 0, 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_I2C02 [2] */
+ FN_SEL_I2C02_0, FN_SEL_I2C02_1, FN_SEL_I2C02_2, FN_SEL_I2C02_3,
+ /* SEL_I2C01 [3] */
+ FN_SEL_I2C01_0, FN_SEL_I2C01_1, FN_SEL_I2C01_2, FN_SEL_I2C01_3,
+ FN_SEL_I2C01_4, 0, 0, 0,
+ /* SEL_I2C00 [3] */
+ FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3,
+ FN_SEL_I2C00_4, 0, 0, 0,
+ /* SEL_AVB [1] */
+ FN_SEL_AVB_0, FN_SEL_AVB_1, }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xE60600C4, 32,
+ 1, 3, 3, 2, 2, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 1) {
+ /* SEL_SCIFCLK [1] */
+ FN_SEL_SCIFCLK_0, FN_SEL_SCIFCLK_1,
+ /* SEL_SCIF5 [3] */
+ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, FN_SEL_SCIF5_2, FN_SEL_SCIF5_3,
+ FN_SEL_SCIF5_4, FN_SEL_SCIF5_5, 0, 0,
+ /* SEL_SCIF4 [3] */
+ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, FN_SEL_SCIF4_3,
+ FN_SEL_SCIF4_4, 0, 0, 0,
+ /* SEL_SCIF3 [2] */
+ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, 0,
+ /* SEL_SCIF2 [2] */
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, FN_SEL_SCIF2_2, 0,
+ /* SEL_SCIF2_CLK [1] */
+ FN_SEL_SCIF2_CLK_0, FN_SEL_SCIF2_CLK_1,
+ /* SEL_SCIF1 [2] */
+ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3,
+ /* SEL_SCIF0 [2] */
+ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
+ /* SEL_MSIOF2 [2] */
+ FN_SEL_MSIOF2_0, FN_SEL_MSIOF2_1, FN_SEL_MSIOF2_2, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_MSIOF1 [1] */
+ FN_SEL_MSIOF1_0, FN_SEL_MSIOF1_1,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_MSIOF0 [1] */
+ FN_SEL_MSIOF0_0, FN_SEL_MSIOF0_1,
+ /* SEL_RCN [1] */
+ FN_SEL_RCN_0, FN_SEL_RCN_1,
+ /* RESERVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_TMU2 [1] */
+ FN_SEL_TMU2_0, FN_SEL_TMU2_1,
+ /* SEL_TMU1 [1] */
+ FN_SEL_TMU1_0, FN_SEL_TMU1_1,
+ /* RESERVED [2] */
+ 0, 0, 0, 0,
+ /* SEL_HSCIF1 [2] */
+ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2, 0,
+ /* SEL_HSCIF0 [1] */
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,}
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE60600C8, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* RESERVED [1] */
+ 0, 0,
+ /* SEL_ADGB [2] */
+ FN_SEL_ADGB_0, FN_SEL_ADGB_1, FN_SEL_ADGB_2, 0,
+ /* SEL_ADGC [2] */
+ FN_SEL_ADGC_0, FN_SEL_ADGC_1, FN_SEL_ADGC_2, 0,
+ /* SEL_SSI9 [2] */
+ FN_SEL_SSI9_0, FN_SEL_SSI9_1, 0, 0,
+ /* SEL_SSI8 [2] */
+ FN_SEL_SSI8_0, FN_SEL_SSI8_1, 0, 0,
+ /* SEL_SSI7 [2] */
+ FN_SEL_SSI7_0, FN_SEL_SSI7_1, 0, 0,
+ /* SEL_SSI6 [2] */
+ FN_SEL_SSI6_0, FN_SEL_SSI6_1, 0, 0,
+ /* SEL_SSI5 [2] */
+ FN_SEL_SSI5_0, FN_SEL_SSI5_1, 0, 0,
+ /* SEL_SSI4 [2] */
+ FN_SEL_SSI4_0, FN_SEL_SSI4_1, 0, 0,
+ /* SEL_SSI2 [2] */
+ FN_SEL_SSI2_0, FN_SEL_SSI2_1, 0, 0,
+ /* SEL_SSI1 [2] */
+ FN_SEL_SSI1_0, FN_SEL_SSI1_1, FN_SEL_SSI1_2, FN_SEL_SSI1_3,
+ /* SEL_SSI0 [2] */
+ FN_SEL_SSI0_0, FN_SEL_SSI0_1, 0, 0, }
+ },
+ { },
+};
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77470
+const struct sh_pfc_soc_info r8a77470_pinmux_info = {
+ .name = "r8a77470_pfc",
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 82a1c411c952a..a6c5d50557e67 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -1432,10 +1432,10 @@ static const u16 pinmux_data[] = {
/*
* Static pins can not be muxed between different functions but
- * still needs a mark entry in the pinmux list. Add each static
+ * still need mark entries in the pinmux list. Add each static
* pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux then pin
- * while still applying configuration to it
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
*/
#define FM(x) PINMUX_DATA(x##_MARK, 0),
PINMUX_STATIC
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 7100a2dd65f8c..4f55b1562ad48 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1493,10 +1493,10 @@ static const u16 pinmux_data[] = {
/*
* Static pins can not be muxed between different functions but
- * still needs a mark entry in the pinmux list. Add each static
+ * still need mark entries in the pinmux list. Add each static
* pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux then pin
- * while still applying configuration to it
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
*/
#define FM(x) PINMUX_DATA(x##_MARK, 0),
PINMUX_STATIC
@@ -3122,7 +3122,7 @@ static const unsigned int msiof3_ss1_e_mux[] = {
MSIOF3_SS1_E_MARK,
};
static const unsigned int msiof3_ss2_e_pins[] = {
- /* SS1 */
+ /* SS2 */
RCAR_GP_PIN(2, 0),
};
static const unsigned int msiof3_ss2_e_mux[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 4bc5b1f820c1f..3ea133cfb241a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1499,10 +1499,10 @@ static const u16 pinmux_data[] = {
/*
* Static pins can not be muxed between different functions but
- * still needs a mark entry in the pinmux list. Add each static
+ * still need mark entries in the pinmux list. Add each static
* pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux then pin
- * while still applying configuration to it
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
*/
#define FM(x) PINMUX_DATA(x##_MARK, 0),
PINMUX_STATIC
@@ -3122,7 +3122,7 @@ static const unsigned int msiof3_ss1_e_mux[] = {
MSIOF3_SS1_E_MARK,
};
static const unsigned int msiof3_ss2_e_pins[] = {
- /* SS1 */
+ /* SS2 */
RCAR_GP_PIN(2, 0),
};
static const unsigned int msiof3_ss2_e_mux[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index cea9d0599c128..d2bbee6563813 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A77965 processor support - PFC hardware block.
*
@@ -1501,10 +1501,10 @@ static const u16 pinmux_data[] = {
/*
* Static pins can not be muxed between different functions but
- * still needs a mark entry in the pinmux list. Add each static
+ * still need mark entries in the pinmux list. Add each static
* pin to the list without an associated function. The sh-pfc
- * core will do the right thing and skip trying to mux then pin
- * while still applying configuration to it
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
*/
#define FM(x) PINMUX_DATA(x##_MARK, 0),
PINMUX_STATIC
@@ -1662,6 +1662,153 @@ static const unsigned int avb_avtp_capture_b_mux[] = {
AVB_AVTP_CAPTURE_B_MARK,
};
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+};
+
+static const unsigned int du_rgb666_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK,
+};
+
+static const unsigned int du_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 8),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+
+static const unsigned int du_rgb888_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
+};
+
+static const unsigned int du_clk_out_0_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(1, 27),
+};
+
+static const unsigned int du_clk_out_0_mux[] = {
+ DU_DOTCLKOUT0_MARK
+};
+
+static const unsigned int du_clk_out_1_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(2, 3),
+};
+
+static const unsigned int du_clk_out_1_mux[] = {
+ DU_DOTCLKOUT1_MARK
+};
+
+static const unsigned int du_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
+};
+
+static const unsigned int du_sync_mux[] = {
+ DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
+};
+
+static const unsigned int du_oddf_pins[] = {
+ /* EXDISP/EXODDF/EXCDE */
+ RCAR_GP_PIN(2, 2),
+};
+
+static const unsigned int du_oddf_mux[] = {
+ DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
+};
+
+static const unsigned int du_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(2, 0),
+};
+
+static const unsigned int du_cde_mux[] = {
+ DU_CDE_MARK,
+};
+
+static const unsigned int du_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(2, 1),
+};
+
+static const unsigned int du_disp_mux[] = {
+ DU_DISP_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+ SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+ SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+ SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+ SDA2_B_MARK, SCL2_B_MARK,
+};
+static const unsigned int i2c6_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+ SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+ SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+ SDA6_C_MARK, SCL6_C_MARK,
+};
+
/* - INTC-EX ---------------------------------------------------------------- */
static const unsigned int intc_ex_irq0_pins[] = {
/* IRQ0 */
@@ -1706,6 +1853,803 @@ static const unsigned int intc_ex_irq5_mux[] = {
IRQ5_MARK,
};
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_txd_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+static const unsigned int msiof0_rxd_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 22),
+};
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 8),
+};
+static const unsigned int msiof1_clk_a_mux[] = {
+ MSIOF1_SCK_A_MARK,
+};
+static const unsigned int msiof1_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(6, 9),
+};
+static const unsigned int msiof1_sync_a_mux[] = {
+ MSIOF1_SYNC_A_MARK,
+};
+static const unsigned int msiof1_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 5),
+};
+static const unsigned int msiof1_ss1_a_mux[] = {
+ MSIOF1_SS1_A_MARK,
+};
+static const unsigned int msiof1_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 6),
+};
+static const unsigned int msiof1_ss2_a_mux[] = {
+ MSIOF1_SS2_A_MARK,
+};
+static const unsigned int msiof1_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int msiof1_txd_a_mux[] = {
+ MSIOF1_TXD_A_MARK,
+};
+static const unsigned int msiof1_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int msiof1_rxd_a_mux[] = {
+ MSIOF1_RXD_A_MARK,
+};
+static const unsigned int msiof1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 9),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+ MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int msiof1_sync_b_mux[] = {
+ MSIOF1_SYNC_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+ MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+ MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 8),
+};
+static const unsigned int msiof1_txd_b_mux[] = {
+ MSIOF1_TXD_B_MARK,
+};
+static const unsigned int msiof1_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int msiof1_rxd_b_mux[] = {
+ MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 17),
+};
+static const unsigned int msiof1_clk_c_mux[] = {
+ MSIOF1_SCK_C_MARK,
+};
+static const unsigned int msiof1_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(6, 18),
+};
+static const unsigned int msiof1_sync_c_mux[] = {
+ MSIOF1_SYNC_C_MARK,
+};
+static const unsigned int msiof1_ss1_c_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int msiof1_ss1_c_mux[] = {
+ MSIOF1_SS1_C_MARK,
+};
+static const unsigned int msiof1_ss2_c_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 27),
+};
+static const unsigned int msiof1_ss2_c_mux[] = {
+ MSIOF1_SS2_C_MARK,
+};
+static const unsigned int msiof1_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int msiof1_txd_c_mux[] = {
+ MSIOF1_TXD_C_MARK,
+};
+static const unsigned int msiof1_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int msiof1_rxd_c_mux[] = {
+ MSIOF1_RXD_C_MARK,
+};
+static const unsigned int msiof1_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int msiof1_clk_d_mux[] = {
+ MSIOF1_SCK_D_MARK,
+};
+static const unsigned int msiof1_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int msiof1_sync_d_mux[] = {
+ MSIOF1_SYNC_D_MARK,
+};
+static const unsigned int msiof1_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int msiof1_ss1_d_mux[] = {
+ MSIOF1_SS1_D_MARK,
+};
+static const unsigned int msiof1_ss2_d_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int msiof1_ss2_d_mux[] = {
+ MSIOF1_SS2_D_MARK,
+};
+static const unsigned int msiof1_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int msiof1_txd_d_mux[] = {
+ MSIOF1_TXD_D_MARK,
+};
+static const unsigned int msiof1_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int msiof1_rxd_d_mux[] = {
+ MSIOF1_RXD_D_MARK,
+};
+static const unsigned int msiof1_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 0),
+};
+static const unsigned int msiof1_clk_e_mux[] = {
+ MSIOF1_SCK_E_MARK,
+};
+static const unsigned int msiof1_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(3, 1),
+};
+static const unsigned int msiof1_sync_e_mux[] = {
+ MSIOF1_SYNC_E_MARK,
+};
+static const unsigned int msiof1_ss1_e_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(3, 4),
+};
+static const unsigned int msiof1_ss1_e_mux[] = {
+ MSIOF1_SS1_E_MARK,
+};
+static const unsigned int msiof1_ss2_e_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(3, 5),
+};
+static const unsigned int msiof1_ss2_e_mux[] = {
+ MSIOF1_SS2_E_MARK,
+};
+static const unsigned int msiof1_txd_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(3, 3),
+};
+static const unsigned int msiof1_txd_e_mux[] = {
+ MSIOF1_TXD_E_MARK,
+};
+static const unsigned int msiof1_rxd_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(3, 2),
+};
+static const unsigned int msiof1_rxd_e_mux[] = {
+ MSIOF1_RXD_E_MARK,
+};
+static const unsigned int msiof1_clk_f_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 23),
+};
+static const unsigned int msiof1_clk_f_mux[] = {
+ MSIOF1_SCK_F_MARK,
+};
+static const unsigned int msiof1_sync_f_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 24),
+};
+static const unsigned int msiof1_sync_f_mux[] = {
+ MSIOF1_SYNC_F_MARK,
+};
+static const unsigned int msiof1_ss1_f_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(6, 1),
+};
+static const unsigned int msiof1_ss1_f_mux[] = {
+ MSIOF1_SS1_F_MARK,
+};
+static const unsigned int msiof1_ss2_f_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int msiof1_ss2_f_mux[] = {
+ MSIOF1_SS2_F_MARK,
+};
+static const unsigned int msiof1_txd_f_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(6, 0),
+};
+static const unsigned int msiof1_txd_f_mux[] = {
+ MSIOF1_TXD_F_MARK,
+};
+static const unsigned int msiof1_rxd_f_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 25),
+};
+static const unsigned int msiof1_rxd_f_mux[] = {
+ MSIOF1_RXD_F_MARK,
+};
+static const unsigned int msiof1_clk_g_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 6),
+};
+static const unsigned int msiof1_clk_g_mux[] = {
+ MSIOF1_SCK_G_MARK,
+};
+static const unsigned int msiof1_sync_g_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(3, 7),
+};
+static const unsigned int msiof1_sync_g_mux[] = {
+ MSIOF1_SYNC_G_MARK,
+};
+static const unsigned int msiof1_ss1_g_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(3, 10),
+};
+static const unsigned int msiof1_ss1_g_mux[] = {
+ MSIOF1_SS1_G_MARK,
+};
+static const unsigned int msiof1_ss2_g_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int msiof1_ss2_g_mux[] = {
+ MSIOF1_SS2_G_MARK,
+};
+static const unsigned int msiof1_txd_g_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(3, 9),
+};
+static const unsigned int msiof1_txd_g_mux[] = {
+ MSIOF1_TXD_G_MARK,
+};
+static const unsigned int msiof1_rxd_g_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int msiof1_rxd_g_mux[] = {
+ MSIOF1_RXD_G_MARK,
+};
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int msiof2_clk_a_mux[] = {
+ MSIOF2_SCK_A_MARK,
+};
+static const unsigned int msiof2_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int msiof2_sync_a_mux[] = {
+ MSIOF2_SYNC_A_MARK,
+};
+static const unsigned int msiof2_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 6),
+};
+static const unsigned int msiof2_ss1_a_mux[] = {
+ MSIOF2_SS1_A_MARK,
+};
+static const unsigned int msiof2_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 7),
+};
+static const unsigned int msiof2_ss2_a_mux[] = {
+ MSIOF2_SS2_A_MARK,
+};
+static const unsigned int msiof2_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int msiof2_txd_a_mux[] = {
+ MSIOF2_TXD_A_MARK,
+};
+static const unsigned int msiof2_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof2_rxd_a_mux[] = {
+ MSIOF2_RXD_A_MARK,
+};
+static const unsigned int msiof2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 4),
+};
+static const unsigned int msiof2_clk_b_mux[] = {
+ MSIOF2_SCK_B_MARK,
+};
+static const unsigned int msiof2_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 5),
+};
+static const unsigned int msiof2_sync_b_mux[] = {
+ MSIOF2_SYNC_B_MARK,
+};
+static const unsigned int msiof2_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof2_ss1_b_mux[] = {
+ MSIOF2_SS1_B_MARK,
+};
+static const unsigned int msiof2_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof2_ss2_b_mux[] = {
+ MSIOF2_SS2_B_MARK,
+};
+static const unsigned int msiof2_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int msiof2_txd_b_mux[] = {
+ MSIOF2_TXD_B_MARK,
+};
+static const unsigned int msiof2_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 6),
+};
+static const unsigned int msiof2_rxd_b_mux[] = {
+ MSIOF2_RXD_B_MARK,
+};
+static const unsigned int msiof2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int msiof2_clk_c_mux[] = {
+ MSIOF2_SCK_C_MARK,
+};
+static const unsigned int msiof2_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 11),
+};
+static const unsigned int msiof2_sync_c_mux[] = {
+ MSIOF2_SYNC_C_MARK,
+};
+static const unsigned int msiof2_ss1_c_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 10),
+};
+static const unsigned int msiof2_ss1_c_mux[] = {
+ MSIOF2_SS1_C_MARK,
+};
+static const unsigned int msiof2_ss2_c_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int msiof2_ss2_c_mux[] = {
+ MSIOF2_SS2_C_MARK,
+};
+static const unsigned int msiof2_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof2_txd_c_mux[] = {
+ MSIOF2_TXD_C_MARK,
+};
+static const unsigned int msiof2_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int msiof2_rxd_c_mux[] = {
+ MSIOF2_RXD_C_MARK,
+};
+static const unsigned int msiof2_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 8),
+};
+static const unsigned int msiof2_clk_d_mux[] = {
+ MSIOF2_SCK_D_MARK,
+};
+static const unsigned int msiof2_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+static const unsigned int msiof2_sync_d_mux[] = {
+ MSIOF2_SYNC_D_MARK,
+};
+static const unsigned int msiof2_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int msiof2_ss1_d_mux[] = {
+ MSIOF2_SS1_D_MARK,
+};
+static const unsigned int msiof2_ss2_d_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int msiof2_ss2_d_mux[] = {
+ MSIOF2_SS2_D_MARK,
+};
+static const unsigned int msiof2_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 11),
+};
+static const unsigned int msiof2_txd_d_mux[] = {
+ MSIOF2_TXD_D_MARK,
+};
+static const unsigned int msiof2_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 10),
+};
+static const unsigned int msiof2_rxd_d_mux[] = {
+ MSIOF2_RXD_D_MARK,
+};
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof3_clk_a_mux[] = {
+ MSIOF3_SCK_A_MARK,
+};
+static const unsigned int msiof3_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_sync_a_mux[] = {
+ MSIOF3_SYNC_A_MARK,
+};
+static const unsigned int msiof3_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int msiof3_ss1_a_mux[] = {
+ MSIOF3_SS1_A_MARK,
+};
+static const unsigned int msiof3_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int msiof3_ss2_a_mux[] = {
+ MSIOF3_SS2_A_MARK,
+};
+static const unsigned int msiof3_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_txd_a_mux[] = {
+ MSIOF3_TXD_A_MARK,
+};
+static const unsigned int msiof3_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_rxd_a_mux[] = {
+ MSIOF3_RXD_A_MARK,
+};
+static const unsigned int msiof3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int msiof3_clk_b_mux[] = {
+ MSIOF3_SCK_B_MARK,
+};
+static const unsigned int msiof3_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int msiof3_sync_b_mux[] = {
+ MSIOF3_SYNC_B_MARK,
+};
+static const unsigned int msiof3_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 4),
+};
+static const unsigned int msiof3_ss1_b_mux[] = {
+ MSIOF3_SS1_B_MARK,
+};
+static const unsigned int msiof3_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 5),
+};
+static const unsigned int msiof3_ss2_b_mux[] = {
+ MSIOF3_SS2_B_MARK,
+};
+static const unsigned int msiof3_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int msiof3_txd_b_mux[] = {
+ MSIOF3_TXD_B_MARK,
+};
+static const unsigned int msiof3_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int msiof3_rxd_b_mux[] = {
+ MSIOF3_RXD_B_MARK,
+};
+static const unsigned int msiof3_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof3_clk_c_mux[] = {
+ MSIOF3_SCK_C_MARK,
+};
+static const unsigned int msiof3_sync_c_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 13),
+};
+static const unsigned int msiof3_sync_c_mux[] = {
+ MSIOF3_SYNC_C_MARK,
+};
+static const unsigned int msiof3_txd_c_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int msiof3_txd_c_mux[] = {
+ MSIOF3_TXD_C_MARK,
+};
+static const unsigned int msiof3_rxd_c_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int msiof3_rxd_c_mux[] = {
+ MSIOF3_RXD_C_MARK,
+};
+static const unsigned int msiof3_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int msiof3_clk_d_mux[] = {
+ MSIOF3_SCK_D_MARK,
+};
+static const unsigned int msiof3_sync_d_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof3_sync_d_mux[] = {
+ MSIOF3_SYNC_D_MARK,
+};
+static const unsigned int msiof3_ss1_d_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 26),
+};
+static const unsigned int msiof3_ss1_d_mux[] = {
+ MSIOF3_SS1_D_MARK,
+};
+static const unsigned int msiof3_txd_d_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int msiof3_txd_d_mux[] = {
+ MSIOF3_TXD_D_MARK,
+};
+static const unsigned int msiof3_rxd_d_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 24),
+};
+static const unsigned int msiof3_rxd_d_mux[] = {
+ MSIOF3_RXD_D_MARK,
+};
+static const unsigned int msiof3_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int msiof3_clk_e_mux[] = {
+ MSIOF3_SCK_E_MARK,
+};
+static const unsigned int msiof3_sync_e_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int msiof3_sync_e_mux[] = {
+ MSIOF3_SYNC_E_MARK,
+};
+static const unsigned int msiof3_ss1_e_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int msiof3_ss1_e_mux[] = {
+ MSIOF3_SS1_E_MARK,
+};
+static const unsigned int msiof3_ss2_e_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int msiof3_ss2_e_mux[] = {
+ MSIOF3_SS2_E_MARK,
+};
+static const unsigned int msiof3_txd_e_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int msiof3_txd_e_mux[] = {
+ MSIOF3_TXD_E_MARK,
+};
+static const unsigned int msiof3_rxd_e_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int msiof3_rxd_e_mux[] = {
+ MSIOF3_RXD_E_MARK,
+};
+
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int pwm0_mux[] = {
+ PWM0_MARK,
+};
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int pwm4_a_mux[] = {
+ PWM4_A_MARK,
+};
+static const unsigned int pwm4_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int pwm4_b_mux[] = {
+ PWM4_B_MARK,
+};
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int pwm5_a_mux[] = {
+ PWM5_A_MARK,
+};
+static const unsigned int pwm5_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int pwm5_b_mux[] = {
+ PWM5_B_MARK,
+};
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int pwm6_a_mux[] = {
+ PWM6_A_MARK,
+};
+static const unsigned int pwm6_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int pwm6_b_mux[] = {
+ PWM6_B_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -1917,6 +2861,264 @@ static const unsigned int scif_clk_b_mux[] = {
SCIF_CLK_B_MARK,
};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 2),
+};
+
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DAT0_MARK,
+};
+
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DAT0_MARK, SD0_DAT1_MARK,
+ SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+};
+
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CLK_MARK, SD0_CMD_MARK,
+};
+
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 12),
+};
+
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 13),
+};
+
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 8),
+};
+
+static const unsigned int sdhi1_data1_mux[] = {
+ SD1_DAT0_MARK,
+};
+
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+
+static const unsigned int sdhi1_data4_mux[] = {
+ SD1_DAT0_MARK, SD1_DAT1_MARK,
+ SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SD1_CLK_MARK, SD1_CMD_MARK,
+};
+
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 14),
+};
+
+static const unsigned int sdhi1_cd_mux[] = {
+ SD1_CD_MARK,
+};
+
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 15),
+};
+
+static const unsigned int sdhi1_wp_mux[] = {
+ SD1_WP_MARK,
+};
+
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 2),
+};
+
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DAT0_MARK,
+};
+
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+
+static const unsigned int sdhi2_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+
+static const unsigned int sdhi2_data8_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+ SD2_DAT4_MARK, SD2_DAT5_MARK,
+ SD2_DAT6_MARK, SD2_DAT7_MARK,
+};
+
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CLK_MARK, SD2_CMD_MARK,
+};
+
+static const unsigned int sdhi2_cd_a_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 13),
+};
+
+static const unsigned int sdhi2_cd_a_mux[] = {
+ SD2_CD_A_MARK,
+};
+
+static const unsigned int sdhi2_cd_b_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(5, 10),
+};
+
+static const unsigned int sdhi2_cd_b_mux[] = {
+ SD2_CD_B_MARK,
+};
+
+static const unsigned int sdhi2_wp_a_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 14),
+};
+
+static const unsigned int sdhi2_wp_a_mux[] = {
+ SD2_WP_A_MARK,
+};
+
+static const unsigned int sdhi2_wp_b_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(5, 11),
+};
+
+static const unsigned int sdhi2_wp_b_mux[] = {
+ SD2_WP_B_MARK,
+};
+
+static const unsigned int sdhi2_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 6),
+};
+
+static const unsigned int sdhi2_ds_mux[] = {
+ SD2_DS_MARK,
+};
+
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 9),
+};
+
+static const unsigned int sdhi3_data1_mux[] = {
+ SD3_DAT0_MARK,
+};
+
+static const unsigned int sdhi3_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+
+static const unsigned int sdhi3_data4_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+
+static const unsigned int sdhi3_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+
+static const unsigned int sdhi3_data8_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+ SD3_DAT4_MARK, SD3_DAT5_MARK,
+ SD3_DAT6_MARK, SD3_DAT7_MARK,
+};
+
+static const unsigned int sdhi3_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+};
+
+static const unsigned int sdhi3_ctrl_mux[] = {
+ SD3_CLK_MARK, SD3_CMD_MARK,
+};
+
+static const unsigned int sdhi3_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 15),
+};
+
+static const unsigned int sdhi3_cd_mux[] = {
+ SD3_CD_MARK,
+};
+
+static const unsigned int sdhi3_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 16),
+};
+
+static const unsigned int sdhi3_wp_mux[] = {
+ SD3_WP_MARK,
+};
+
+static const unsigned int sdhi3_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 17),
+};
+
+static const unsigned int sdhi3_ds_mux[] = {
+ SD3_DS_MARK,
+};
+
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
/* PWEN, OVC */
@@ -1959,12 +3161,139 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
SH_PFC_PIN_GROUP(intc_ex_irq0),
SH_PFC_PIN_GROUP(intc_ex_irq1),
SH_PFC_PIN_GROUP(intc_ex_irq2),
SH_PFC_PIN_GROUP(intc_ex_irq3),
SH_PFC_PIN_GROUP(intc_ex_irq4),
SH_PFC_PIN_GROUP(intc_ex_irq5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk_a),
+ SH_PFC_PIN_GROUP(msiof1_sync_a),
+ SH_PFC_PIN_GROUP(msiof1_ss1_a),
+ SH_PFC_PIN_GROUP(msiof1_ss2_a),
+ SH_PFC_PIN_GROUP(msiof1_txd_a),
+ SH_PFC_PIN_GROUP(msiof1_rxd_a),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_txd_b),
+ SH_PFC_PIN_GROUP(msiof1_rxd_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_ss1_c),
+ SH_PFC_PIN_GROUP(msiof1_ss2_c),
+ SH_PFC_PIN_GROUP(msiof1_txd_c),
+ SH_PFC_PIN_GROUP(msiof1_rxd_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_ss2_d),
+ SH_PFC_PIN_GROUP(msiof1_txd_d),
+ SH_PFC_PIN_GROUP(msiof1_rxd_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_ss1_e),
+ SH_PFC_PIN_GROUP(msiof1_ss2_e),
+ SH_PFC_PIN_GROUP(msiof1_txd_e),
+ SH_PFC_PIN_GROUP(msiof1_rxd_e),
+ SH_PFC_PIN_GROUP(msiof1_clk_f),
+ SH_PFC_PIN_GROUP(msiof1_sync_f),
+ SH_PFC_PIN_GROUP(msiof1_ss1_f),
+ SH_PFC_PIN_GROUP(msiof1_ss2_f),
+ SH_PFC_PIN_GROUP(msiof1_txd_f),
+ SH_PFC_PIN_GROUP(msiof1_rxd_f),
+ SH_PFC_PIN_GROUP(msiof1_clk_g),
+ SH_PFC_PIN_GROUP(msiof1_sync_g),
+ SH_PFC_PIN_GROUP(msiof1_ss1_g),
+ SH_PFC_PIN_GROUP(msiof1_ss2_g),
+ SH_PFC_PIN_GROUP(msiof1_txd_g),
+ SH_PFC_PIN_GROUP(msiof1_rxd_g),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_ss1_c),
+ SH_PFC_PIN_GROUP(msiof2_ss2_c),
+ SH_PFC_PIN_GROUP(msiof2_txd_c),
+ SH_PFC_PIN_GROUP(msiof2_rxd_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_txd_d),
+ SH_PFC_PIN_GROUP(msiof2_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_c),
+ SH_PFC_PIN_GROUP(msiof3_sync_c),
+ SH_PFC_PIN_GROUP(msiof3_txd_c),
+ SH_PFC_PIN_GROUP(msiof3_rxd_c),
+ SH_PFC_PIN_GROUP(msiof3_clk_d),
+ SH_PFC_PIN_GROUP(msiof3_sync_d),
+ SH_PFC_PIN_GROUP(msiof3_ss1_d),
+ SH_PFC_PIN_GROUP(msiof3_txd_d),
+ SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_e),
+ SH_PFC_PIN_GROUP(msiof3_sync_e),
+ SH_PFC_PIN_GROUP(msiof3_ss1_e),
+ SH_PFC_PIN_GROUP(msiof3_ss2_e),
+ SH_PFC_PIN_GROUP(msiof3_txd_e),
+ SH_PFC_PIN_GROUP(msiof3_rxd_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -1994,6 +3323,32 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_clk_b),
SH_PFC_PIN_GROUP(scif_clk_a),
SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb30),
@@ -2013,6 +3368,33 @@ static const char * const avb_groups[] = {
"avb_avtp_capture_b",
};
+static const char * const du_groups[] = {
+ "du_rgb666",
+ "du_rgb888",
+ "du_clk_out_0",
+ "du_clk_out_1",
+ "du_sync",
+ "du_oddf",
+ "du_cde",
+ "du_disp",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_a",
+ "i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_a",
+ "i2c2_b",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6_a",
+ "i2c6_b",
+ "i2c6_c",
+};
+
static const char * const intc_ex_groups[] = {
"intc_ex_irq0",
"intc_ex_irq1",
@@ -2022,6 +3404,151 @@ static const char * const intc_ex_groups[] = {
"intc_ex_irq5",
};
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk_a",
+ "msiof1_sync_a",
+ "msiof1_ss1_a",
+ "msiof1_ss2_a",
+ "msiof1_txd_a",
+ "msiof1_rxd_a",
+ "msiof1_clk_b",
+ "msiof1_sync_b",
+ "msiof1_ss1_b",
+ "msiof1_ss2_b",
+ "msiof1_txd_b",
+ "msiof1_rxd_b",
+ "msiof1_clk_c",
+ "msiof1_sync_c",
+ "msiof1_ss1_c",
+ "msiof1_ss2_c",
+ "msiof1_txd_c",
+ "msiof1_rxd_c",
+ "msiof1_clk_d",
+ "msiof1_sync_d",
+ "msiof1_ss1_d",
+ "msiof1_ss2_d",
+ "msiof1_txd_d",
+ "msiof1_rxd_d",
+ "msiof1_clk_e",
+ "msiof1_sync_e",
+ "msiof1_ss1_e",
+ "msiof1_ss2_e",
+ "msiof1_txd_e",
+ "msiof1_rxd_e",
+ "msiof1_clk_f",
+ "msiof1_sync_f",
+ "msiof1_ss1_f",
+ "msiof1_ss2_f",
+ "msiof1_txd_f",
+ "msiof1_rxd_f",
+ "msiof1_clk_g",
+ "msiof1_sync_g",
+ "msiof1_ss1_g",
+ "msiof1_ss2_g",
+ "msiof1_txd_g",
+ "msiof1_rxd_g",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk_a",
+ "msiof2_sync_a",
+ "msiof2_ss1_a",
+ "msiof2_ss2_a",
+ "msiof2_txd_a",
+ "msiof2_rxd_a",
+ "msiof2_clk_b",
+ "msiof2_sync_b",
+ "msiof2_ss1_b",
+ "msiof2_ss2_b",
+ "msiof2_txd_b",
+ "msiof2_rxd_b",
+ "msiof2_clk_c",
+ "msiof2_sync_c",
+ "msiof2_ss1_c",
+ "msiof2_ss2_c",
+ "msiof2_txd_c",
+ "msiof2_rxd_c",
+ "msiof2_clk_d",
+ "msiof2_sync_d",
+ "msiof2_ss1_d",
+ "msiof2_ss2_d",
+ "msiof2_txd_d",
+ "msiof2_rxd_d",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk_a",
+ "msiof3_sync_a",
+ "msiof3_ss1_a",
+ "msiof3_ss2_a",
+ "msiof3_txd_a",
+ "msiof3_rxd_a",
+ "msiof3_clk_b",
+ "msiof3_sync_b",
+ "msiof3_ss1_b",
+ "msiof3_ss2_b",
+ "msiof3_txd_b",
+ "msiof3_rxd_b",
+ "msiof3_clk_c",
+ "msiof3_sync_c",
+ "msiof3_txd_c",
+ "msiof3_rxd_c",
+ "msiof3_clk_d",
+ "msiof3_sync_d",
+ "msiof3_ss1_d",
+ "msiof3_txd_d",
+ "msiof3_rxd_d",
+ "msiof3_clk_e",
+ "msiof3_sync_e",
+ "msiof3_ss1_e",
+ "msiof3_ss2_e",
+ "msiof3_txd_e",
+ "msiof3_rxd_e",
+};
+
+static const char * const pwm0_groups[] = {
+ "pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4_a",
+ "pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5_a",
+ "pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6_a",
+ "pwm6_b",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -2071,6 +3598,44 @@ static const char * const scif_clk_groups[] = {
"scif_clk_b",
};
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_data8",
+ "sdhi2_ctrl",
+ "sdhi2_cd_a",
+ "sdhi2_wp_a",
+ "sdhi2_cd_b",
+ "sdhi2_wp_b",
+ "sdhi2_ds",
+};
+
+static const char * const sdhi3_groups[] = {
+ "sdhi3_data1",
+ "sdhi3_data4",
+ "sdhi3_data8",
+ "sdhi3_ctrl",
+ "sdhi3_cd",
+ "sdhi3_wp",
+ "sdhi3_ds",
+};
+
static const char * const usb0_groups[] = {
"usb0",
};
@@ -2085,7 +3650,22 @@ static const char * const usb30_groups[] = {
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c6),
SH_PFC_FUNCTION(intc_ex),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -2093,6 +3673,10 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb30),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
index b1bb7263532b3..b02caf3167118 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
@@ -21,13 +21,15 @@
#include "core.h"
#include "sh_pfc.h"
+#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH
+
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_28(1, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_6(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_15(5, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH)
+ PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_6(4, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS)
/*
* F_() : just information
* FM() : macro for FN_xxx / xxx_MARK
@@ -2382,18 +2384,31 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+enum ioctrl_regs {
+ IOCTRL30,
+ IOCTRL31,
+ IOCTRL32,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [IOCTRL30] = { 0xe6060380 },
+ [IOCTRL31] = { 0xe6060384 },
+ [IOCTRL32] = { 0xe6060388 },
+ { /* sentinel */ },
+};
+
static int r8a77970_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
u32 *pocctrl)
{
int bit = pin & 0x1f;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL30].reg;
if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 21))
return bit;
if (pin >= RCAR_GP_PIN(2, 0) && pin <= RCAR_GP_PIN(2, 9))
return bit + 22;
- *pocctrl += 4;
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL31].reg;
if (pin >= RCAR_GP_PIN(2, 10) && pin <= RCAR_GP_PIN(2, 16))
return bit - 10;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 16))
@@ -2421,6 +2436,7 @@ const struct sh_pfc_soc_info r8a77970_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
index 84c8f1c2f1d16..3f6967331f646 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
@@ -19,10 +19,10 @@
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_22(0, fn, sfx), \
+ PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
PORT_GP_28(1, fn, sfx), \
- PORT_GP_30(2, fn, sfx), \
- PORT_GP_17(3, fn, sfx), \
+ PORT_GP_CFG_30(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
PORT_GP_25(4, fn, sfx), \
PORT_GP_15(5, fn, sfx)
@@ -2779,8 +2779,53 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+enum ioctrl_regs {
+ IOCTRL30,
+ IOCTRL31,
+ IOCTRL32,
+ IOCTRL33,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [IOCTRL30] = { 0xe6060380, },
+ [IOCTRL31] = { 0xe6060384, },
+ [IOCTRL32] = { 0xe6060388, },
+ [IOCTRL33] = { 0xe606038c, },
+ { /* sentinel */ },
+};
+
+static int r8a77980_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
+ u32 *pocctrl)
+{
+ int bit = pin & 0x1f;
+
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL30].reg;
+ if (pin >= RCAR_GP_PIN(0, 0) && pin <= RCAR_GP_PIN(0, 21))
+ return bit;
+ else if (pin >= RCAR_GP_PIN(2, 0) && pin <= RCAR_GP_PIN(2, 9))
+ return bit + 22;
+
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL31].reg;
+ if (pin >= RCAR_GP_PIN(2, 10) && pin <= RCAR_GP_PIN(2, 16))
+ return bit - 10;
+ if ((pin >= RCAR_GP_PIN(2, 17) && pin <= RCAR_GP_PIN(2, 24)) ||
+ (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 16)))
+ return bit + 7;
+
+ *pocctrl = pinmux_ioctrl_regs[IOCTRL32].reg;
+ if (pin >= RCAR_GP_PIN(2, 25) && pin <= RCAR_GP_PIN(2, 29))
+ return pin - 25;
+
+ return -EINVAL;
+}
+
+static const struct sh_pfc_soc_operations pinmux_ops = {
+ .pin_to_pocctrl = r8a77980_pin_to_pocctrl,
+};
+
const struct sh_pfc_soc_info r8a77980_pinmux_info = {
.name = "r8a77980_pfc",
+ .ops = &pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
@@ -2793,6 +2838,7 @@ const struct sh_pfc_soc_info r8a77980_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
new file mode 100644
index 0000000000000..a68fd658aada7
--- /dev/null
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -0,0 +1,2695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R8A77990 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ *
+ * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+ *
+ * R8A7796 processor support - PFC hardware block.
+ *
+ * Copyright (C) 2016-2017 Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+
+#include "core.h"
+#include "sh_pfc.h"
+
+#define CFG_FLAGS (SH_PFC_PIN_CFG_PULL_UP | \
+ SH_PFC_PIN_CFG_PULL_DOWN)
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_CFG_18(0, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_26(2, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_16(3, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_11(4, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_20(5, fn, sfx, CFG_FLAGS), \
+ PORT_GP_CFG_18(6, fn, sfx, CFG_FLAGS)
+/*
+ * F_() : just information
+ * FM() : macro for FN_xxx / xxx_MARK
+ */
+
+/* GPSR0 */
+#define GPSR0_17 F_(SDA4, IP7_27_24)
+#define GPSR0_16 F_(SCL4, IP7_23_20)
+#define GPSR0_15 F_(D15, IP7_19_16)
+#define GPSR0_14 F_(D14, IP7_15_12)
+#define GPSR0_13 F_(D13, IP7_11_8)
+#define GPSR0_12 F_(D12, IP7_7_4)
+#define GPSR0_11 F_(D11, IP7_3_0)
+#define GPSR0_10 F_(D10, IP6_31_28)
+#define GPSR0_9 F_(D9, IP6_27_24)
+#define GPSR0_8 F_(D8, IP6_23_20)
+#define GPSR0_7 F_(D7, IP6_19_16)
+#define GPSR0_6 F_(D6, IP6_15_12)
+#define GPSR0_5 F_(D5, IP6_11_8)
+#define GPSR0_4 F_(D4, IP6_7_4)
+#define GPSR0_3 F_(D3, IP6_3_0)
+#define GPSR0_2 F_(D2, IP5_31_28)
+#define GPSR0_1 F_(D1, IP5_27_24)
+#define GPSR0_0 F_(D0, IP5_23_20)
+
+/* GPSR1 */
+#define GPSR1_22 F_(WE0_N, IP5_19_16)
+#define GPSR1_21 F_(CS0_N, IP5_15_12)
+#define GPSR1_20 FM(CLKOUT)
+#define GPSR1_19 F_(A19, IP5_11_8)
+#define GPSR1_18 F_(A18, IP5_7_4)
+#define GPSR1_17 F_(A17, IP5_3_0)
+#define GPSR1_16 F_(A16, IP4_31_28)
+#define GPSR1_15 F_(A15, IP4_27_24)
+#define GPSR1_14 F_(A14, IP4_23_20)
+#define GPSR1_13 F_(A13, IP4_19_16)
+#define GPSR1_12 F_(A12, IP4_15_12)
+#define GPSR1_11 F_(A11, IP4_11_8)
+#define GPSR1_10 F_(A10, IP4_7_4)
+#define GPSR1_9 F_(A9, IP4_3_0)
+#define GPSR1_8 F_(A8, IP3_31_28)
+#define GPSR1_7 F_(A7, IP3_27_24)
+#define GPSR1_6 F_(A6, IP3_23_20)
+#define GPSR1_5 F_(A5, IP3_19_16)
+#define GPSR1_4 F_(A4, IP3_15_12)
+#define GPSR1_3 F_(A3, IP3_11_8)
+#define GPSR1_2 F_(A2, IP3_7_4)
+#define GPSR1_1 F_(A1, IP3_3_0)
+#define GPSR1_0 F_(A0, IP2_31_28)
+
+/* GPSR2 */
+#define GPSR2_25 F_(EX_WAIT0, IP2_27_24)
+#define GPSR2_24 F_(RD_WR_N, IP2_23_20)
+#define GPSR2_23 F_(RD_N, IP2_19_16)
+#define GPSR2_22 F_(BS_N, IP2_15_12)
+#define GPSR2_21 FM(AVB_PHY_INT)
+#define GPSR2_20 F_(AVB_TXCREFCLK, IP2_3_0)
+#define GPSR2_19 FM(AVB_RD3)
+#define GPSR2_18 F_(AVB_RD2, IP1_31_28)
+#define GPSR2_17 F_(AVB_RD1, IP1_27_24)
+#define GPSR2_16 F_(AVB_RD0, IP1_23_20)
+#define GPSR2_15 FM(AVB_RXC)
+#define GPSR2_14 FM(AVB_RX_CTL)
+#define GPSR2_13 F_(RPC_RESET_N, IP1_19_16)
+#define GPSR2_12 F_(RPC_INT_N, IP1_15_12)
+#define GPSR2_11 F_(QSPI1_SSL, IP1_11_8)
+#define GPSR2_10 F_(QSPI1_IO3, IP1_7_4)
+#define GPSR2_9 F_(QSPI1_IO2, IP1_3_0)
+#define GPSR2_8 F_(QSPI1_MISO_IO1, IP0_31_28)
+#define GPSR2_7 F_(QSPI1_MOSI_IO0, IP0_27_24)
+#define GPSR2_6 F_(QSPI1_SPCLK, IP0_23_20)
+#define GPSR2_5 FM(QSPI0_SSL)
+#define GPSR2_4 F_(QSPI0_IO3, IP0_19_16)
+#define GPSR2_3 F_(QSPI0_IO2, IP0_15_12)
+#define GPSR2_2 F_(QSPI0_MISO_IO1, IP0_11_8)
+#define GPSR2_1 F_(QSPI0_MOSI_IO0, IP0_7_4)
+#define GPSR2_0 F_(QSPI0_SPCLK, IP0_3_0)
+
+/* GPSR3 */
+#define GPSR3_15 F_(SD1_WP, IP11_7_4)
+#define GPSR3_14 F_(SD1_CD, IP11_3_0)
+#define GPSR3_13 F_(SD0_WP, IP10_31_28)
+#define GPSR3_12 F_(SD0_CD, IP10_27_24)
+#define GPSR3_11 F_(SD1_DAT3, IP9_11_8)
+#define GPSR3_10 F_(SD1_DAT2, IP9_7_4)
+#define GPSR3_9 F_(SD1_DAT1, IP9_3_0)
+#define GPSR3_8 F_(SD1_DAT0, IP8_31_28)
+#define GPSR3_7 F_(SD1_CMD, IP8_27_24)
+#define GPSR3_6 F_(SD1_CLK, IP8_23_20)
+#define GPSR3_5 F_(SD0_DAT3, IP8_19_16)
+#define GPSR3_4 F_(SD0_DAT2, IP8_15_12)
+#define GPSR3_3 F_(SD0_DAT1, IP8_11_8)
+#define GPSR3_2 F_(SD0_DAT0, IP8_7_4)
+#define GPSR3_1 F_(SD0_CMD, IP8_3_0)
+#define GPSR3_0 F_(SD0_CLK, IP7_31_28)
+
+/* GPSR4 */
+#define GPSR4_10 F_(SD3_DS, IP10_23_20)
+#define GPSR4_9 F_(SD3_DAT7, IP10_19_16)
+#define GPSR4_8 F_(SD3_DAT6, IP10_15_12)
+#define GPSR4_7 F_(SD3_DAT5, IP10_11_8)
+#define GPSR4_6 F_(SD3_DAT4, IP10_7_4)
+#define GPSR4_5 F_(SD3_DAT3, IP10_3_0)
+#define GPSR4_4 F_(SD3_DAT2, IP9_31_28)
+#define GPSR4_3 F_(SD3_DAT1, IP9_27_24)
+#define GPSR4_2 F_(SD3_DAT0, IP9_23_20)
+#define GPSR4_1 F_(SD3_CMD, IP9_19_16)
+#define GPSR4_0 F_(SD3_CLK, IP9_15_12)
+
+/* GPSR5 */
+#define GPSR5_19 F_(MLB_DAT, IP13_23_20)
+#define GPSR5_18 F_(MLB_SIG, IP13_19_16)
+#define GPSR5_17 F_(MLB_CLK, IP13_15_12)
+#define GPSR5_16 F_(SSI_SDATA9, IP13_11_8)
+#define GPSR5_15 F_(MSIOF0_SS2, IP13_7_4)
+#define GPSR5_14 F_(MSIOF0_SS1, IP13_3_0)
+#define GPSR5_13 F_(MSIOF0_SYNC, IP12_31_28)
+#define GPSR5_12 F_(MSIOF0_TXD, IP12_27_24)
+#define GPSR5_11 F_(MSIOF0_RXD, IP12_23_20)
+#define GPSR5_10 F_(MSIOF0_SCK, IP12_19_16)
+#define GPSR5_9 F_(RX2_A, IP12_15_12)
+#define GPSR5_8 F_(TX2_A, IP12_11_8)
+#define GPSR5_7 F_(SCK2_A, IP12_7_4)
+#define GPSR5_6 F_(TX1, IP12_3_0)
+#define GPSR5_5 F_(RX1, IP11_31_28)
+#define GPSR5_4 F_(RTS0_N_TANS_A, IP11_23_20)
+#define GPSR5_3 F_(CTS0_N_A, IP11_19_16)
+#define GPSR5_2 F_(TX0_A, IP11_15_12)
+#define GPSR5_1 F_(RX0_A, IP11_11_8)
+#define GPSR5_0 F_(SCK0_A, IP11_27_24)
+
+/* GPSR6 */
+#define GPSR6_17 F_(USB30_PWEN, IP15_27_24)
+#define GPSR6_16 F_(SSI_SDATA6, IP15_19_16)
+#define GPSR6_15 F_(SSI_WS6, IP15_15_12)
+#define GPSR6_14 F_(SSI_SCK6, IP15_11_8)
+#define GPSR6_13 F_(SSI_SDATA5, IP15_7_4)
+#define GPSR6_12 F_(SSI_WS5, IP15_3_0)
+#define GPSR6_11 F_(SSI_SCK5, IP14_31_28)
+#define GPSR6_10 F_(SSI_SDATA4, IP14_27_24)
+#define GPSR6_9 F_(USB30_OVC, IP15_31_28)
+#define GPSR6_8 F_(AUDIO_CLKA, IP15_23_20)
+#define GPSR6_7 F_(SSI_SDATA3, IP14_23_20)
+#define GPSR6_6 F_(SSI_WS349, IP14_19_16)
+#define GPSR6_5 F_(SSI_SCK349, IP14_15_12)
+#define GPSR6_4 F_(SSI_SDATA2, IP14_11_8)
+#define GPSR6_3 F_(SSI_SDATA1, IP14_7_4)
+#define GPSR6_2 F_(SSI_SDATA0, IP14_3_0)
+#define GPSR6_1 F_(SSI_WS01239, IP13_31_28)
+#define GPSR6_0 F_(SSI_SCK01239, IP13_27_24)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
+#define IP0_3_0 FM(QSPI0_SPCLK) FM(HSCK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4 FM(QSPI0_MOSI_IO0) FM(HCTS4_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_11_8 FM(QSPI0_MISO_IO1) FM(HRTS4_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_15_12 FM(QSPI0_IO2) FM(HTX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_19_16 FM(QSPI0_IO3) FM(HRX4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_23_20 FM(QSPI1_SPCLK) FM(RIF2_CLK_A) FM(HSCK4_B) FM(VI4_DATA0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_27_24 FM(QSPI1_MOSI_IO0) FM(RIF2_SYNC_A) FM(HTX4_B) FM(VI4_DATA1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_31_28 FM(QSPI1_MISO_IO1) FM(RIF2_D0_A) FM(HRX4_B) FM(VI4_DATA2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_3_0 FM(QSPI1_IO2) FM(RIF2_D1_A) FM(HTX3_C) FM(VI4_DATA3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_7_4 FM(QSPI1_IO3) FM(RIF3_CLK_A) FM(HRX3_C) FM(VI4_DATA4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_11_8 FM(QSPI1_SSL) FM(RIF3_SYNC_A) FM(HSCK3_C) FM(VI4_DATA5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_15_12 FM(RPC_INT_N) FM(RIF3_D0_A) FM(HCTS3_N_C) FM(VI4_DATA6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_19_16 FM(RPC_RESET_N) FM(RIF3_D1_A) FM(HRTS3_N_C) FM(VI4_DATA7_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_23_20 FM(AVB_RD0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_27_24 FM(AVB_RD1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP1_31_28 FM(AVB_RD2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_3_0 FM(AVB_TXCREFCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_7_4 FM(AVB_MDIO) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_11_8 FM(AVB_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_15_12 FM(BS_N) FM(PWM0_A) FM(AVB_MAGIC) FM(VI4_CLK) F_(0, 0) FM(TX3_C) F_(0, 0) FM(VI5_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_19_16 FM(RD_N) FM(PWM1_A) FM(AVB_LINK) FM(VI4_FIELD) F_(0, 0) FM(RX3_C) FM(FSCLKST2_N_A) FM(VI5_DATA0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH_A) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE_A) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_31_28 FM(A0) FM(IRQ0) FM(PWM2_A) FM(MSIOF3_SS1_B) FM(VI5_CLK_A) FM(DU_CDE) FM(HRX3_D) FM(IERX) FM(QSTB_QHE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_3_0 FM(A1) FM(IRQ1) FM(PWM3_A) FM(DU_DOTCLKIN1) FM(VI5_DATA0_A) FM(DU_DISP_CDE) FM(SDA6_B) FM(IETX) FM(QCPV_QDE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_7_4 FM(A2) FM(IRQ2) FM(AVB_AVTP_PPS) FM(VI4_CLKENB) FM(VI5_DATA1_A) FM(DU_DISP) FM(SCL6_B) F_(0, 0) FM(QSTVB_QVE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_11_8 FM(A3) FM(CTS4_N_A) FM(PWM4_A) FM(VI4_DATA12) F_(0, 0) FM(DU_DOTCLKOUT0) FM(HTX3_D) FM(IECLK) FM(LCDOUT12) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_15_12 FM(A4) FM(RTS4_N_TANS_A) FM(MSIOF3_SYNC_B) FM(VI4_DATA8) FM(PWM2_B) FM(DU_DG4) FM(RIF2_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_19_16 FM(A5) FM(SCK4_A) FM(MSIOF3_SCK_B) FM(VI4_DATA9) FM(PWM3_B) F_(0, 0) FM(RIF2_SYNC_B) F_(0, 0) FM(QPOLA) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_23_20 FM(A6) FM(RX4_A) FM(MSIOF3_RXD_B) FM(VI4_DATA10) F_(0, 0) F_(0, 0) FM(RIF2_D0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_27_24 FM(A7) FM(TX4_A) FM(MSIOF3_TXD_B) FM(VI4_DATA11) F_(0, 0) F_(0, 0) FM(RIF2_D1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP3_31_28 FM(A8) FM(SDA6_A) FM(RX3_B) FM(HRX4_C) FM(VI5_HSYNC_N_A) FM(DU_HSYNC) FM(VI4_DATA0_B) F_(0, 0) FM(QSTH_QHS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
+#define IP4_3_0 FM(A9) FM(TX5_A) FM(IRQ3) FM(VI4_DATA16) FM(VI5_VSYNC_N_A) FM(DU_DG7) F_(0, 0) F_(0, 0) FM(LCDOUT15) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_7_4 FM(A10) FM(IRQ4) FM(MSIOF2_SYNC_B) FM(VI4_DATA13) FM(VI5_FIELD_A) FM(DU_DG5) FM(FSCLKST2_N_B) F_(0, 0) FM(LCDOUT13) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_11_8 FM(A11) FM(SCL6_A) FM(TX3_B) FM(HTX4_C) F_(0, 0) FM(DU_VSYNC) FM(VI4_DATA1_B) F_(0, 0) FM(QSTVA_QVS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_15_12 FM(A12) FM(RX5_A) FM(MSIOF2_SS2_B) FM(VI4_DATA17) FM(VI5_DATA3_A) FM(DU_DG6) F_(0, 0) F_(0, 0) FM(LCDOUT14) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_19_16 FM(A13) FM(SCK5_A) FM(MSIOF2_SCK_B) FM(VI4_DATA14) FM(HRX4_D) FM(DU_DB2) F_(0, 0) F_(0, 0) FM(LCDOUT2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_23_20 FM(A14) FM(MSIOF1_SS1) FM(MSIOF2_RXD_B) FM(VI4_DATA15) FM(HTX4_D) FM(DU_DB3) F_(0, 0) F_(0, 0) FM(LCDOUT3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_27_24 FM(A15) FM(MSIOF1_SS2) FM(MSIOF2_TXD_B) FM(VI4_DATA18) FM(VI5_DATA4_A) FM(DU_DB4) F_(0, 0) F_(0, 0) FM(LCDOUT4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP4_31_28 FM(A16) FM(MSIOF1_SYNC) FM(MSIOF2_SS1_B) FM(VI4_DATA19) FM(VI5_DATA5_A) FM(DU_DB5) F_(0, 0) F_(0, 0) FM(LCDOUT5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_3_0 FM(A17) FM(MSIOF1_RXD) F_(0, 0) FM(VI4_DATA20) FM(VI5_DATA6_A) FM(DU_DB6) F_(0, 0) F_(0, 0) FM(LCDOUT6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_7_4 FM(A18) FM(MSIOF1_TXD) F_(0, 0) FM(VI4_DATA21) FM(VI5_DATA7_A) FM(DU_DB0) F_(0, 0) FM(HRX4_E) FM(LCDOUT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_11_8 FM(A19) FM(MSIOF1_SCK) F_(0, 0) FM(VI4_DATA22) FM(VI5_DATA2_A) FM(DU_DB1) F_(0, 0) FM(HTX4_E) FM(LCDOUT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_15_12 FM(CS0_N) FM(SCL5) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR0) FM(VI4_DATA2_B) F_(0, 0) FM(LCDOUT16) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_19_16 FM(WE0_N) FM(SDA5) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR1) FM(VI4_DATA3_B) F_(0, 0) FM(LCDOUT17) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_23_20 FM(D0) FM(MSIOF3_SCK_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DR2) FM(CTS4_N_C) F_(0, 0) FM(LCDOUT18) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_27_24 FM(D1) FM(MSIOF3_SYNC_A) FM(SCK3_A) FM(VI4_DATA23) FM(VI5_CLKENB_A) FM(DU_DB7) FM(RTS4_N_TANS_C) F_(0, 0) FM(LCDOUT7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP5_31_28 FM(D2) FM(MSIOF3_RXD_A) FM(RX5_C) F_(0, 0) FM(VI5_DATA14_A) FM(DU_DR3) FM(RX4_C) F_(0, 0) FM(LCDOUT19) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_3_0 FM(D3) FM(MSIOF3_TXD_A) FM(TX5_C) F_(0, 0) FM(VI5_DATA15_A) FM(DU_DR4) FM(TX4_C) F_(0, 0) FM(LCDOUT20) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_7_4 FM(D4) FM(CANFD1_TX) FM(HSCK3_B) FM(CAN1_TX) FM(RTS3_N_TANS_A) FM(MSIOF3_SS2_A) F_(0, 0) FM(VI5_DATA1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_11_8 FM(D5) FM(RX3_A) FM(HRX3_B) F_(0, 0) F_(0, 0) FM(DU_DR5) FM(VI4_DATA4_B) F_(0, 0) FM(LCDOUT21) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_15_12 FM(D6) FM(TX3_A) FM(HTX3_B) F_(0, 0) F_(0, 0) FM(DU_DR6) FM(VI4_DATA5_B) F_(0, 0) FM(LCDOUT22) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_19_16 FM(D7) FM(CANFD1_RX) FM(IRQ5) FM(CAN1_RX) FM(CTS3_N_A) F_(0, 0) F_(0, 0) FM(VI5_DATA2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_23_20 FM(D8) FM(MSIOF2_SCK_A) FM(SCK4_B) F_(0, 0) FM(VI5_DATA12_A) FM(DU_DR7) FM(RIF3_CLK_B) FM(HCTS3_N_E) FM(LCDOUT23) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_27_24 FM(D9) FM(MSIOF2_SYNC_A) F_(0, 0) F_(0, 0) FM(VI5_DATA10_A) FM(DU_DG0) FM(RIF3_SYNC_B) FM(HRX3_E) FM(LCDOUT8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP6_31_28 FM(D10) FM(MSIOF2_RXD_A) F_(0, 0) F_(0, 0) FM(VI5_DATA13_A) FM(DU_DG1) FM(RIF3_D0_B) FM(HTX3_E) FM(LCDOUT9) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_3_0 FM(D11) FM(MSIOF2_TXD_A) F_(0, 0) F_(0, 0) FM(VI5_DATA11_A) FM(DU_DG2) FM(RIF3_D1_B) FM(HRTS3_N_E) FM(LCDOUT10) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_7_4 FM(D12) FM(CANFD0_TX) FM(TX4_B) FM(CAN0_TX) FM(VI5_DATA8_A) F_(0, 0) F_(0, 0) FM(VI5_DATA3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_11_8 FM(D13) FM(CANFD0_RX) FM(RX4_B) FM(CAN0_RX) FM(VI5_DATA9_A) FM(SCL7_B) F_(0, 0) FM(VI5_DATA4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_15_12 FM(D14) FM(CAN_CLK) FM(HRX3_A) FM(MSIOF2_SS2_A) F_(0, 0) FM(SDA7_B) F_(0, 0) FM(VI5_DATA5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_19_16 FM(D15) FM(MSIOF2_SS1_A) FM(HTX3_A) FM(MSIOF3_SS1_A) F_(0, 0) FM(DU_DG3) F_(0, 0) F_(0, 0) FM(LCDOUT11) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_23_20 FM(SCL4) FM(CS1_N_A26) F_(0, 0) F_(0, 0) F_(0, 0) FM(DU_DOTCLKIN0) FM(VI4_DATA6_B) FM(VI5_DATA6_B) FM(QCLK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_27_24 FM(SDA4) FM(WE1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(VI4_DATA7_B) FM(VI5_DATA7_B) FM(QPOLB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP7_31_28 FM(SD0_CLK) FM(NFDATA8) FM(SCL1_C) FM(HSCK1_B) FM(SDA2_E) FM(FMCLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
+#define IP8_3_0 FM(SD0_CMD) FM(NFDATA9) F_(0, 0) FM(HRX1_B) F_(0, 0) FM(SPEEDIN_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_7_4 FM(SD0_DAT0) FM(NFDATA10) F_(0, 0) FM(HTX1_B) F_(0, 0) FM(REMOCON_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_11_8 FM(SD0_DAT1) FM(NFDATA11) FM(SDA2_C) FM(HCTS1_N_B) F_(0, 0) FM(FMIN_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_15_12 FM(SD0_DAT2) FM(NFDATA12) FM(SCL2_C) FM(HRTS1_N_B) F_(0, 0) FM(BPFCLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_19_16 FM(SD0_DAT3) FM(NFDATA13) FM(SDA1_C) FM(SCL2_E) FM(SPEEDIN_C) FM(REMOCON_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_23_20 FM(SD1_CLK) FM(NFDATA14_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_27_24 FM(SD1_CMD) FM(NFDATA15_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP8_31_28 FM(SD1_DAT0) FM(NFWP_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_3_0 FM(SD1_DAT1) FM(NFCE_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_7_4 FM(SD1_DAT2) FM(NFALE_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_11_8 FM(SD1_DAT3) FM(NFRB_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_15_12 FM(SD3_CLK) FM(NFWE_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_19_16 FM(SD3_CMD) FM(NFRE_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_23_20 FM(SD3_DAT0) FM(NFDATA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_27_24 FM(SD3_DAT1) FM(NFDATA1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP9_31_28 FM(SD3_DAT2) FM(NFDATA2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_3_0 FM(SD3_DAT3) FM(NFDATA3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_7_4 FM(SD3_DAT4) FM(NFDATA4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_11_8 FM(SD3_DAT5) FM(NFDATA5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_15_12 FM(SD3_DAT6) FM(NFDATA6) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_19_16 FM(SD3_DAT7) FM(NFDATA7) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_23_20 FM(SD3_DS) FM(NFCLE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_27_24 FM(SD0_CD) FM(NFALE_A) FM(SD3_CD) FM(RIF0_CLK_B) FM(SCL2_B) FM(TCLK1_A) FM(SSI_SCK2_B) FM(TS_SCK0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP10_31_28 FM(SD0_WP) FM(NFRB_N_A) FM(SD3_WP) FM(RIF0_D0_B) FM(SDA2_B) FM(TCLK2_A) FM(SSI_WS2_B) FM(TS_SDAT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_3_0 FM(SD1_CD) FM(NFCE_N_A) FM(SSI_SCK1) FM(RIF0_D1_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDEN0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_7_4 FM(SD1_WP) FM(NFWP_N_A) FM(SSI_WS1) FM(RIF0_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SPSYNC0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_11_8 FM(RX0_A) FM(HRX1_A) FM(SSI_SCK2_A) FM(RIF1_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_15_12 FM(TX0_A) FM(HTX1_A) FM(SSI_WS2_A) FM(RIF1_D0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TS_SDAT1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_19_16 FM(CTS0_N_A) FM(NFDATA14_A) FM(AUDIO_CLKOUT_A) FM(RIF1_D1) FM(SCIF_CLK_A) FM(FMCLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_23_20 FM(RTS0_N_TANS_A) FM(NFDATA15_A) FM(AUDIO_CLKOUT1_A) FM(RIF1_CLK) FM(SCL2_A) FM(FMIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_27_24 FM(SCK0_A) FM(HSCK1_A) FM(USB3HS0_ID) FM(RTS1_N_TANS) FM(SDA2_A) FM(FMCLK_C) F_(0, 0) F_(0, 0) FM(USB1_ID) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP11_31_28 FM(RX1) FM(HRX2_B) FM(SSI_SCK9_B) FM(AUDIO_CLKOUT1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 - F */
+#define IP12_3_0 FM(TX1) FM(HTX2_B) FM(SSI_WS9_B) FM(AUDIO_CLKOUT3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_7_4 FM(SCK2_A) FM(HSCK0_A) FM(AUDIO_CLKB_A) FM(CTS1_N) FM(RIF0_CLK_A) FM(REMOCON_A) FM(SCIF_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_11_8 FM(TX2_A) FM(HRX0_A) FM(AUDIO_CLKOUT2_A) F_(0, 0) FM(SCL1_A) F_(0, 0) FM(FSO_CFE_0_N_A) FM(TS_SDEN1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_15_12 FM(RX2_A) FM(HTX0_A) FM(AUDIO_CLKOUT3_A) F_(0, 0) FM(SDA1_A) F_(0, 0) FM(FSO_CFE_1_N_A) FM(TS_SPSYNC1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_19_16 FM(MSIOF0_SCK) F_(0, 0) FM(SSI_SCK78) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_23_20 FM(MSIOF0_RXD) F_(0, 0) FM(SSI_WS78) F_(0, 0) F_(0, 0) FM(TX2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_27_24 FM(MSIOF0_TXD) F_(0, 0) FM(SSI_SDATA7) F_(0, 0) F_(0, 0) FM(RX2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP12_31_28 FM(MSIOF0_SYNC) FM(AUDIO_CLKOUT_B) FM(SSI_SDATA8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_3_0 FM(MSIOF0_SS1) FM(HRX2_A) FM(SSI_SCK4) FM(HCTS0_N_A) FM(BPFCLK_C) FM(SPEEDIN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_7_4 FM(MSIOF0_SS2) FM(HTX2_A) FM(SSI_WS4) FM(HRTS0_N_A) FM(FMIN_C) FM(BPFCLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_11_8 FM(SSI_SDATA9) F_(0, 0) FM(AUDIO_CLKC_A) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_15_12 FM(MLB_CLK) FM(RX0_B) F_(0, 0) FM(RIF0_D0_A) FM(SCL1_B) FM(TCLK1_B) F_(0, 0) F_(0, 0) FM(SIM0_RST_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_19_16 FM(MLB_SIG) FM(SCK0_B) F_(0, 0) FM(RIF0_D1_A) FM(SDA1_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) FM(SIM0_D_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_23_20 FM(MLB_DAT) FM(TX0_B) F_(0, 0) FM(RIF0_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(SIM0_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_27_24 FM(SSI_SCK01239) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_31_28 FM(SSI_WS01239) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_3_0 FM(SSI_SDATA0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_7_4 FM(SSI_SDATA1) FM(AUDIO_CLKC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_11_8 FM(SSI_SDATA2) FM(AUDIO_CLKOUT2_B) FM(SSI_SCK9_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_15_12 FM(SSI_SCK349) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_19_16 FM(SSI_WS349) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_23_20 FM(SSI_SDATA3) FM(AUDIO_CLKOUT1_C) FM(AUDIO_CLKB_B) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_27_24 FM(SSI_SDATA4) F_(0, 0) FM(SSI_WS9_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(PWM5_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_31_28 FM(SSI_SCK5) FM(HRX0_B) F_(0, 0) FM(USB0_PWEN_B) FM(SCL2_D) F_(0, 0) FM(PWM6_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_3_0 FM(SSI_WS5) FM(HTX0_B) F_(0, 0) FM(USB0_OVC_B) FM(SDA2_D) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_7_4 FM(SSI_SDATA5) FM(HSCK0_B) FM(AUDIO_CLKB_C) FM(TPU0TO0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_11_8 FM(SSI_SCK6) FM(HSCK2_A) FM(AUDIO_CLKC_C) FM(TPU0TO1) F_(0, 0) F_(0, 0) FM(FSO_CFE_0_N_B) F_(0, 0) FM(SIM0_RST_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_15_12 FM(SSI_WS6) FM(HCTS2_N_A) FM(AUDIO_CLKOUT2_C) FM(TPU0TO2) FM(SDA1_D) F_(0, 0) FM(FSO_CFE_1_N_B) F_(0, 0) FM(SIM0_D_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_19_16 FM(SSI_SDATA6) FM(HRTS2_N_A) FM(AUDIO_CLKOUT3_C) FM(TPU0TO3) FM(SCL1_D) F_(0, 0) FM(FSO_TOE_N_B) F_(0, 0) FM(SIM0_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_23_20 FM(AUDIO_CLKA) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_27_24 FM(USB30_PWEN) FM(USB0_PWEN_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_31_28 FM(USB30_OVC) FM(USB0_OVC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(FSO_TOE_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+
+#define PINMUX_GPSR \
+\
+ \
+ \
+ \
+ \
+ \
+ \
+ GPSR2_25 \
+ GPSR2_24 \
+ GPSR2_23 \
+ GPSR1_22 GPSR2_22 \
+ GPSR1_21 GPSR2_21 \
+ GPSR1_20 GPSR2_20 \
+ GPSR1_19 GPSR2_19 GPSR5_19 \
+ GPSR1_18 GPSR2_18 GPSR5_18 \
+GPSR0_17 GPSR1_17 GPSR2_17 GPSR5_17 GPSR6_17 \
+GPSR0_16 GPSR1_16 GPSR2_16 GPSR5_16 GPSR6_16 \
+GPSR0_15 GPSR1_15 GPSR2_15 GPSR3_15 GPSR5_15 GPSR6_15 \
+GPSR0_14 GPSR1_14 GPSR2_14 GPSR3_14 GPSR5_14 GPSR6_14 \
+GPSR0_13 GPSR1_13 GPSR2_13 GPSR3_13 GPSR5_13 GPSR6_13 \
+GPSR0_12 GPSR1_12 GPSR2_12 GPSR3_12 GPSR5_12 GPSR6_12 \
+GPSR0_11 GPSR1_11 GPSR2_11 GPSR3_11 GPSR5_11 GPSR6_11 \
+GPSR0_10 GPSR1_10 GPSR2_10 GPSR3_10 GPSR4_10 GPSR5_10 GPSR6_10 \
+GPSR0_9 GPSR1_9 GPSR2_9 GPSR3_9 GPSR4_9 GPSR5_9 GPSR6_9 \
+GPSR0_8 GPSR1_8 GPSR2_8 GPSR3_8 GPSR4_8 GPSR5_8 GPSR6_8 \
+GPSR0_7 GPSR1_7 GPSR2_7 GPSR3_7 GPSR4_7 GPSR5_7 GPSR6_7 \
+GPSR0_6 GPSR1_6 GPSR2_6 GPSR3_6 GPSR4_6 GPSR5_6 GPSR6_6 \
+GPSR0_5 GPSR1_5 GPSR2_5 GPSR3_5 GPSR4_5 GPSR5_5 GPSR6_5 \
+GPSR0_4 GPSR1_4 GPSR2_4 GPSR3_4 GPSR4_4 GPSR5_4 GPSR6_4 \
+GPSR0_3 GPSR1_3 GPSR2_3 GPSR3_3 GPSR4_3 GPSR5_3 GPSR6_3 \
+GPSR0_2 GPSR1_2 GPSR2_2 GPSR3_2 GPSR4_2 GPSR5_2 GPSR6_2 \
+GPSR0_1 GPSR1_1 GPSR2_1 GPSR3_1 GPSR4_1 GPSR5_1 GPSR6_1 \
+GPSR0_0 GPSR1_0 GPSR2_0 GPSR3_0 GPSR4_0 GPSR5_0 GPSR6_0
+
+#define PINMUX_IPSR \
+\
+FM(IP0_3_0) IP0_3_0 FM(IP1_3_0) IP1_3_0 FM(IP2_3_0) IP2_3_0 FM(IP3_3_0) IP3_3_0 \
+FM(IP0_7_4) IP0_7_4 FM(IP1_7_4) IP1_7_4 FM(IP2_7_4) IP2_7_4 FM(IP3_7_4) IP3_7_4 \
+FM(IP0_11_8) IP0_11_8 FM(IP1_11_8) IP1_11_8 FM(IP2_11_8) IP2_11_8 FM(IP3_11_8) IP3_11_8 \
+FM(IP0_15_12) IP0_15_12 FM(IP1_15_12) IP1_15_12 FM(IP2_15_12) IP2_15_12 FM(IP3_15_12) IP3_15_12 \
+FM(IP0_19_16) IP0_19_16 FM(IP1_19_16) IP1_19_16 FM(IP2_19_16) IP2_19_16 FM(IP3_19_16) IP3_19_16 \
+FM(IP0_23_20) IP0_23_20 FM(IP1_23_20) IP1_23_20 FM(IP2_23_20) IP2_23_20 FM(IP3_23_20) IP3_23_20 \
+FM(IP0_27_24) IP0_27_24 FM(IP1_27_24) IP1_27_24 FM(IP2_27_24) IP2_27_24 FM(IP3_27_24) IP3_27_24 \
+FM(IP0_31_28) IP0_31_28 FM(IP1_31_28) IP1_31_28 FM(IP2_31_28) IP2_31_28 FM(IP3_31_28) IP3_31_28 \
+\
+FM(IP4_3_0) IP4_3_0 FM(IP5_3_0) IP5_3_0 FM(IP6_3_0) IP6_3_0 FM(IP7_3_0) IP7_3_0 \
+FM(IP4_7_4) IP4_7_4 FM(IP5_7_4) IP5_7_4 FM(IP6_7_4) IP6_7_4 FM(IP7_7_4) IP7_7_4 \
+FM(IP4_11_8) IP4_11_8 FM(IP5_11_8) IP5_11_8 FM(IP6_11_8) IP6_11_8 FM(IP7_11_8) IP7_11_8 \
+FM(IP4_15_12) IP4_15_12 FM(IP5_15_12) IP5_15_12 FM(IP6_15_12) IP6_15_12 FM(IP7_15_12) IP7_15_12 \
+FM(IP4_19_16) IP4_19_16 FM(IP5_19_16) IP5_19_16 FM(IP6_19_16) IP6_19_16 FM(IP7_19_16) IP7_19_16 \
+FM(IP4_23_20) IP4_23_20 FM(IP5_23_20) IP5_23_20 FM(IP6_23_20) IP6_23_20 FM(IP7_23_20) IP7_23_20 \
+FM(IP4_27_24) IP4_27_24 FM(IP5_27_24) IP5_27_24 FM(IP6_27_24) IP6_27_24 FM(IP7_27_24) IP7_27_24 \
+FM(IP4_31_28) IP4_31_28 FM(IP5_31_28) IP5_31_28 FM(IP6_31_28) IP6_31_28 FM(IP7_31_28) IP7_31_28 \
+\
+FM(IP8_3_0) IP8_3_0 FM(IP9_3_0) IP9_3_0 FM(IP10_3_0) IP10_3_0 FM(IP11_3_0) IP11_3_0 \
+FM(IP8_7_4) IP8_7_4 FM(IP9_7_4) IP9_7_4 FM(IP10_7_4) IP10_7_4 FM(IP11_7_4) IP11_7_4 \
+FM(IP8_11_8) IP8_11_8 FM(IP9_11_8) IP9_11_8 FM(IP10_11_8) IP10_11_8 FM(IP11_11_8) IP11_11_8 \
+FM(IP8_15_12) IP8_15_12 FM(IP9_15_12) IP9_15_12 FM(IP10_15_12) IP10_15_12 FM(IP11_15_12) IP11_15_12 \
+FM(IP8_19_16) IP8_19_16 FM(IP9_19_16) IP9_19_16 FM(IP10_19_16) IP10_19_16 FM(IP11_19_16) IP11_19_16 \
+FM(IP8_23_20) IP8_23_20 FM(IP9_23_20) IP9_23_20 FM(IP10_23_20) IP10_23_20 FM(IP11_23_20) IP11_23_20 \
+FM(IP8_27_24) IP8_27_24 FM(IP9_27_24) IP9_27_24 FM(IP10_27_24) IP10_27_24 FM(IP11_27_24) IP11_27_24 \
+FM(IP8_31_28) IP8_31_28 FM(IP9_31_28) IP9_31_28 FM(IP10_31_28) IP10_31_28 FM(IP11_31_28) IP11_31_28 \
+\
+FM(IP12_3_0) IP12_3_0 FM(IP13_3_0) IP13_3_0 FM(IP14_3_0) IP14_3_0 FM(IP15_3_0) IP15_3_0 \
+FM(IP12_7_4) IP12_7_4 FM(IP13_7_4) IP13_7_4 FM(IP14_7_4) IP14_7_4 FM(IP15_7_4) IP15_7_4 \
+FM(IP12_11_8) IP12_11_8 FM(IP13_11_8) IP13_11_8 FM(IP14_11_8) IP14_11_8 FM(IP15_11_8) IP15_11_8 \
+FM(IP12_15_12) IP12_15_12 FM(IP13_15_12) IP13_15_12 FM(IP14_15_12) IP14_15_12 FM(IP15_15_12) IP15_15_12 \
+FM(IP12_19_16) IP12_19_16 FM(IP13_19_16) IP13_19_16 FM(IP14_19_16) IP14_19_16 FM(IP15_19_16) IP15_19_16 \
+FM(IP12_23_20) IP12_23_20 FM(IP13_23_20) IP13_23_20 FM(IP14_23_20) IP14_23_20 FM(IP15_23_20) IP15_23_20 \
+FM(IP12_27_24) IP12_27_24 FM(IP13_27_24) IP13_27_24 FM(IP14_27_24) IP14_27_24 FM(IP15_27_24) IP15_27_24 \
+FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM(IP15_31_28) IP15_31_28
+
+/* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL0_30_29 FM(SEL_ADGB_0) FM(SEL_ADGB_1) FM(SEL_ADGB_2) F_(0, 0)
+#define MOD_SEL0_28 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1)
+#define MOD_SEL0_27_26 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) F_(0, 0)
+#define MOD_SEL0_25 FM(SEL_FSO_0) FM(SEL_FSO_1)
+#define MOD_SEL0_24 FM(SEL_HSCIF0_0) FM(SEL_HSCIF0_1)
+#define MOD_SEL0_23 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1)
+#define MOD_SEL0_22 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1)
+#define MOD_SEL0_21_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1) FM(SEL_I2C1_2) FM(SEL_I2C1_3) FM(SEL_I2C1_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL0_19_18_17 FM(SEL_I2C2_0) FM(SEL_I2C2_1) FM(SEL_I2C2_2) FM(SEL_I2C2_3) FM(SEL_I2C2_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL0_16 FM(SEL_NDFC_0) FM(SEL_NDFC_1)
+#define MOD_SEL0_15 FM(SEL_PWM0_0) FM(SEL_PWM0_1)
+#define MOD_SEL0_14 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
+#define MOD_SEL0_13_12 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
+#define MOD_SEL0_11_10 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
+#define MOD_SEL0_9 FM(SEL_PWM4_0) FM(SEL_PWM4_1)
+#define MOD_SEL0_8 FM(SEL_PWM5_0) FM(SEL_PWM5_1)
+#define MOD_SEL0_7 FM(SEL_PWM6_0) FM(SEL_PWM6_1)
+#define MOD_SEL0_6_5 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1) FM(SEL_REMOCON_2) F_(0, 0)
+#define MOD_SEL0_4 FM(SEL_SCIF_0) FM(SEL_SCIF_1)
+#define MOD_SEL0_3 FM(SEL_SCIF0_0) FM(SEL_SCIF0_1)
+#define MOD_SEL0_2 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1)
+#define MOD_SEL0_1_0 FM(SEL_SPEED_PULSE_IF_0) FM(SEL_SPEED_PULSE_IF_1) FM(SEL_SPEED_PULSE_IF_2) F_(0, 0)
+
+/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
+#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
+#define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
+#define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
+#define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
+#define MOD_SEL1_25 FM(SEL_DRIF3_0) FM(SEL_DRIF3_1)
+#define MOD_SEL1_24_23_22 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) FM(SEL_HSCIF3_2) FM(SEL_HSCIF3_3) FM(SEL_HSCIF3_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL1_21_20_19 FM(SEL_HSCIF4_0) FM(SEL_HSCIF4_1) FM(SEL_HSCIF4_2) FM(SEL_HSCIF4_3) FM(SEL_HSCIF4_4) F_(0, 0) F_(0, 0) F_(0, 0)
+#define MOD_SEL1_18 FM(SEL_I2C6_0) FM(SEL_I2C6_1)
+#define MOD_SEL1_17 FM(SEL_I2C7_0) FM(SEL_I2C7_1)
+#define MOD_SEL1_16 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1)
+#define MOD_SEL1_15 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
+#define MOD_SEL1_14_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1) FM(SEL_SCIF3_2) F_(0, 0)
+#define MOD_SEL1_12_11 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) FM(SEL_SCIF4_2) F_(0, 0)
+#define MOD_SEL1_10_9 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1) FM(SEL_SCIF5_2) F_(0, 0)
+#define MOD_SEL1_8 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
+#define MOD_SEL1_7 FM(SEL_VIN5_0) FM(SEL_VIN5_1)
+#define MOD_SEL1_6_5 FM(SEL_ADGC_0) FM(SEL_ADGC_1) FM(SEL_ADGC_2) F_(0, 0)
+#define MOD_SEL1_4 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
+
+#define PINMUX_MOD_SELS \
+\
+ MOD_SEL1_31 \
+MOD_SEL0_30_29 MOD_SEL1_30 \
+ MOD_SEL1_29 \
+MOD_SEL0_28 MOD_SEL1_28 \
+MOD_SEL0_27_26 \
+ MOD_SEL1_26 \
+MOD_SEL0_25 MOD_SEL1_25 \
+MOD_SEL0_24 MOD_SEL1_24_23_22 \
+MOD_SEL0_23 \
+MOD_SEL0_22 \
+MOD_SEL0_21_20 MOD_SEL1_21_20_19 \
+MOD_SEL0_19_18_17 MOD_SEL1_18 \
+ MOD_SEL1_17 \
+MOD_SEL0_16 MOD_SEL1_16 \
+MOD_SEL0_15 MOD_SEL1_15 \
+MOD_SEL0_14 MOD_SEL1_14_13 \
+MOD_SEL0_13_12 \
+ MOD_SEL1_12_11 \
+MOD_SEL0_11_10 \
+ MOD_SEL1_10_9 \
+MOD_SEL0_9 \
+MOD_SEL0_8 MOD_SEL1_8 \
+MOD_SEL0_7 MOD_SEL1_7 \
+MOD_SEL0_6_5 MOD_SEL1_6_5 \
+MOD_SEL0_4 MOD_SEL1_4 \
+MOD_SEL0_3 \
+MOD_SEL0_2 \
+MOD_SEL0_1_0
+
+/*
+ * These pins are not able to be muxed but have other properties
+ * that can be set, such as pull-up/pull-down enable.
+ */
+#define PINMUX_STATIC \
+ FM(AVB_TX_CTL) FM(AVB_TXC) FM(AVB_TD0) FM(AVB_TD1) FM(AVB_TD2) \
+ FM(AVB_TD3) \
+ FM(PRESETOUT_N) FM(FSCLKST_N) FM(TRST_N) FM(TCK) FM(TMS) FM(TDI) \
+ FM(ASEBRK) \
+ FM(MLB_REF)
+
+enum {
+ PINMUX_RESERVED = 0,
+
+ PINMUX_DATA_BEGIN,
+ GP_ALL(DATA),
+ PINMUX_DATA_END,
+
+#define F_(x, y)
+#define FM(x) FN_##x,
+ PINMUX_FUNCTION_BEGIN,
+ GP_ALL(FN),
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_FUNCTION_END,
+#undef F_
+#undef FM
+
+#define F_(x, y)
+#define FM(x) x##_MARK,
+ PINMUX_MARK_BEGIN,
+ PINMUX_GPSR
+ PINMUX_IPSR
+ PINMUX_MOD_SELS
+ PINMUX_STATIC
+ PINMUX_MARK_END,
+#undef F_
+#undef FM
+};
+
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_GP_ALL(),
+
+ PINMUX_SINGLE(CLKOUT),
+ PINMUX_SINGLE(AVB_PHY_INT),
+ PINMUX_SINGLE(AVB_RD3),
+ PINMUX_SINGLE(AVB_RXC),
+ PINMUX_SINGLE(AVB_RX_CTL),
+ PINMUX_SINGLE(QSPI0_SSL),
+
+ /* IPSR0 */
+ PINMUX_IPSR_GPSR(IP0_3_0, QSPI0_SPCLK),
+ PINMUX_IPSR_MSEL(IP0_3_0, HSCK4_A, SEL_HSCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_7_4, QSPI0_MOSI_IO0),
+ PINMUX_IPSR_MSEL(IP0_7_4, HCTS4_N_A, SEL_HSCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_11_8, QSPI0_MISO_IO1),
+ PINMUX_IPSR_MSEL(IP0_11_8, HRTS4_N_A, SEL_HSCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_15_12, QSPI0_IO2),
+ PINMUX_IPSR_GPSR(IP0_15_12, HTX4_A),
+
+ PINMUX_IPSR_GPSR(IP0_19_16, QSPI0_IO3),
+ PINMUX_IPSR_MSEL(IP0_19_16, HRX4_A, SEL_HSCIF4_0),
+
+ PINMUX_IPSR_GPSR(IP0_23_20, QSPI1_SPCLK),
+ PINMUX_IPSR_MSEL(IP0_23_20, RIF2_CLK_A, SEL_DRIF2_0),
+ PINMUX_IPSR_MSEL(IP0_23_20, HSCK4_B, SEL_HSCIF4_1),
+ PINMUX_IPSR_MSEL(IP0_23_20, VI4_DATA0_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP0_27_24, QSPI1_MOSI_IO0),
+ PINMUX_IPSR_MSEL(IP0_27_24, RIF2_SYNC_A, SEL_DRIF2_0),
+ PINMUX_IPSR_GPSR(IP0_27_24, HTX4_B),
+ PINMUX_IPSR_MSEL(IP0_27_24, VI4_DATA1_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP0_31_28, QSPI1_MISO_IO1),
+ PINMUX_IPSR_MSEL(IP0_31_28, RIF2_D0_A, SEL_DRIF2_0),
+ PINMUX_IPSR_MSEL(IP0_31_28, HRX4_B, SEL_HSCIF4_1),
+ PINMUX_IPSR_MSEL(IP0_31_28, VI4_DATA2_A, SEL_VIN4_0),
+
+ /* IPSR1 */
+ PINMUX_IPSR_GPSR(IP1_3_0, QSPI1_IO2),
+ PINMUX_IPSR_MSEL(IP1_3_0, RIF2_D1_A, SEL_DRIF2_0),
+ PINMUX_IPSR_GPSR(IP1_3_0, HTX3_C),
+ PINMUX_IPSR_MSEL(IP1_3_0, VI4_DATA3_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_7_4, QSPI1_IO3),
+ PINMUX_IPSR_MSEL(IP1_7_4, RIF3_CLK_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP1_7_4, HRX3_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP1_7_4, VI4_DATA4_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_11_8, QSPI1_SSL),
+ PINMUX_IPSR_MSEL(IP1_11_8, RIF3_SYNC_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP1_11_8, HSCK3_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP1_11_8, VI4_DATA5_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_15_12, RPC_INT_N),
+ PINMUX_IPSR_MSEL(IP1_15_12, RIF3_D0_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP1_15_12, HCTS3_N_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP1_15_12, VI4_DATA6_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_19_16, RPC_RESET_N),
+ PINMUX_IPSR_MSEL(IP1_19_16, RIF3_D1_A, SEL_DRIF3_0),
+ PINMUX_IPSR_MSEL(IP1_19_16, HRTS3_N_C, SEL_HSCIF3_2),
+ PINMUX_IPSR_MSEL(IP1_19_16, VI4_DATA7_A, SEL_VIN4_0),
+
+ PINMUX_IPSR_GPSR(IP1_23_20, AVB_RD0),
+
+ PINMUX_IPSR_GPSR(IP1_27_24, AVB_RD1),
+
+ PINMUX_IPSR_GPSR(IP1_31_28, AVB_RD2),
+
+ /* IPSR2 */
+ PINMUX_IPSR_GPSR(IP2_3_0, AVB_TXCREFCLK),
+
+ PINMUX_IPSR_GPSR(IP2_7_4, AVB_MDIO),
+
+ PINMUX_IPSR_GPSR(IP2_11_8, AVB_MDC),
+
+ PINMUX_IPSR_GPSR(IP2_15_12, BS_N),
+ PINMUX_IPSR_MSEL(IP2_15_12, PWM0_A, SEL_PWM0_0),
+ PINMUX_IPSR_GPSR(IP2_15_12, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP2_15_12, VI4_CLK),
+ PINMUX_IPSR_GPSR(IP2_15_12, TX3_C),
+ PINMUX_IPSR_MSEL(IP2_15_12, VI5_CLK_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP2_19_16, RD_N),
+ PINMUX_IPSR_MSEL(IP2_19_16, PWM1_A, SEL_PWM1_0),
+ PINMUX_IPSR_GPSR(IP2_19_16, AVB_LINK),
+ PINMUX_IPSR_GPSR(IP2_19_16, VI4_FIELD),
+ PINMUX_IPSR_MSEL(IP2_19_16, RX3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_GPSR(IP2_19_16, FSCLKST2_N_A),
+ PINMUX_IPSR_MSEL(IP2_19_16, VI5_DATA0_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP2_23_20, RD_WR_N),
+ PINMUX_IPSR_MSEL(IP2_23_20, SCL7_A, SEL_I2C7_0),
+ PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH_A),
+ PINMUX_IPSR_GPSR(IP2_23_20, VI4_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP2_23_20, TX5_B),
+ PINMUX_IPSR_MSEL(IP2_23_20, SCK3_C, SEL_SCIF3_2),
+ PINMUX_IPSR_MSEL(IP2_23_20, PWM5_A, SEL_PWM5_0),
+
+ PINMUX_IPSR_GPSR(IP2_27_24, EX_WAIT0),
+ PINMUX_IPSR_MSEL(IP2_27_24, SDA7_A, SEL_I2C7_0),
+ PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE_A),
+ PINMUX_IPSR_GPSR(IP2_27_24, VI4_HSYNC_N),
+ PINMUX_IPSR_MSEL(IP2_27_24, RX5_B, SEL_SCIF5_1),
+ PINMUX_IPSR_MSEL(IP2_27_24, PWM6_A, SEL_PWM6_0),
+
+ PINMUX_IPSR_GPSR(IP2_31_28, A0),
+ PINMUX_IPSR_GPSR(IP2_31_28, IRQ0),
+ PINMUX_IPSR_MSEL(IP2_31_28, PWM2_A, SEL_PWM2_0),
+ PINMUX_IPSR_MSEL(IP2_31_28, MSIOF3_SS1_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_MSEL(IP2_31_28, VI5_CLK_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP2_31_28, DU_CDE),
+ PINMUX_IPSR_MSEL(IP2_31_28, HRX3_D, SEL_HSCIF3_3),
+ PINMUX_IPSR_GPSR(IP2_31_28, IERX),
+ PINMUX_IPSR_GPSR(IP2_31_28, QSTB_QHE),
+
+ /* IPSR3 */
+ PINMUX_IPSR_GPSR(IP3_3_0, A1),
+ PINMUX_IPSR_GPSR(IP3_3_0, IRQ1),
+ PINMUX_IPSR_MSEL(IP3_3_0, PWM3_A, SEL_PWM3_0),
+ PINMUX_IPSR_GPSR(IP3_3_0, DU_DOTCLKIN1),
+ PINMUX_IPSR_MSEL(IP3_3_0, VI5_DATA0_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP3_3_0, DU_DISP_CDE),
+ PINMUX_IPSR_MSEL(IP3_3_0, SDA6_B, SEL_I2C6_1),
+ PINMUX_IPSR_GPSR(IP3_3_0, IETX),
+ PINMUX_IPSR_GPSR(IP3_3_0, QCPV_QDE),
+
+ PINMUX_IPSR_GPSR(IP3_7_4, A2),
+ PINMUX_IPSR_GPSR(IP3_7_4, IRQ2),
+ PINMUX_IPSR_GPSR(IP3_7_4, AVB_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP3_7_4, VI4_CLKENB),
+ PINMUX_IPSR_MSEL(IP3_7_4, VI5_DATA1_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP3_7_4, DU_DISP),
+ PINMUX_IPSR_MSEL(IP3_7_4, SCL6_B, SEL_I2C6_1),
+ PINMUX_IPSR_GPSR(IP3_7_4, QSTVB_QVE),
+
+ PINMUX_IPSR_GPSR(IP3_11_8, A3),
+ PINMUX_IPSR_MSEL(IP3_11_8, CTS4_N_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_11_8, PWM4_A, SEL_PWM4_0),
+ PINMUX_IPSR_GPSR(IP3_11_8, VI4_DATA12),
+ PINMUX_IPSR_GPSR(IP3_11_8, DU_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP3_11_8, HTX3_D),
+ PINMUX_IPSR_GPSR(IP3_11_8, IECLK),
+ PINMUX_IPSR_GPSR(IP3_11_8, LCDOUT12),
+
+ PINMUX_IPSR_GPSR(IP3_15_12, A4),
+ PINMUX_IPSR_MSEL(IP3_15_12, RTS4_N_TANS_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_15_12, MSIOF3_SYNC_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP3_15_12, VI4_DATA8),
+ PINMUX_IPSR_MSEL(IP3_15_12, PWM2_B, SEL_PWM2_1),
+ PINMUX_IPSR_GPSR(IP3_15_12, DU_DG4),
+ PINMUX_IPSR_MSEL(IP3_15_12, RIF2_CLK_B, SEL_DRIF2_1),
+
+ PINMUX_IPSR_GPSR(IP3_19_16, A5),
+ PINMUX_IPSR_MSEL(IP3_19_16, SCK4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_19_16, MSIOF3_SCK_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP3_19_16, VI4_DATA9),
+ PINMUX_IPSR_MSEL(IP3_19_16, PWM3_B, SEL_PWM3_1),
+ PINMUX_IPSR_MSEL(IP3_19_16, RIF2_SYNC_B, SEL_DRIF2_1),
+ PINMUX_IPSR_GPSR(IP3_19_16, QPOLA),
+
+ PINMUX_IPSR_GPSR(IP3_23_20, A6),
+ PINMUX_IPSR_MSEL(IP3_23_20, RX4_A, SEL_SCIF4_0),
+ PINMUX_IPSR_MSEL(IP3_23_20, MSIOF3_RXD_B, SEL_MSIOF3_1),
+ PINMUX_IPSR_GPSR(IP3_23_20, VI4_DATA10),
+ PINMUX_IPSR_MSEL(IP3_23_20, RIF2_D0_B, SEL_DRIF2_1),
+
+ PINMUX_IPSR_GPSR(IP3_27_24, A7),
+ PINMUX_IPSR_GPSR(IP3_27_24, TX4_A),
+ PINMUX_IPSR_GPSR(IP3_27_24, MSIOF3_TXD_B),
+ PINMUX_IPSR_GPSR(IP3_27_24, VI4_DATA11),
+ PINMUX_IPSR_MSEL(IP3_27_24, RIF2_D1_B, SEL_DRIF2_1),
+
+ PINMUX_IPSR_GPSR(IP3_31_28, A8),
+ PINMUX_IPSR_MSEL(IP3_31_28, SDA6_A, SEL_I2C6_0),
+ PINMUX_IPSR_MSEL(IP3_31_28, RX3_B, SEL_SCIF3_1),
+ PINMUX_IPSR_MSEL(IP3_31_28, HRX4_C, SEL_HSCIF4_2),
+ PINMUX_IPSR_MSEL(IP3_31_28, VI5_HSYNC_N_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP3_31_28, DU_HSYNC),
+ PINMUX_IPSR_MSEL(IP3_31_28, VI4_DATA0_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP3_31_28, QSTH_QHS),
+
+ /* IPSR4 */
+ PINMUX_IPSR_GPSR(IP4_3_0, A9),
+ PINMUX_IPSR_GPSR(IP4_3_0, TX5_A),
+ PINMUX_IPSR_GPSR(IP4_3_0, IRQ3),
+ PINMUX_IPSR_GPSR(IP4_3_0, VI4_DATA16),
+ PINMUX_IPSR_MSEL(IP4_3_0, VI5_VSYNC_N_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_3_0, DU_DG7),
+ PINMUX_IPSR_GPSR(IP4_3_0, LCDOUT15),
+
+ PINMUX_IPSR_GPSR(IP4_7_4, A10),
+ PINMUX_IPSR_GPSR(IP4_7_4, IRQ4),
+ PINMUX_IPSR_MSEL(IP4_7_4, MSIOF2_SYNC_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP4_7_4, VI4_DATA13),
+ PINMUX_IPSR_MSEL(IP4_7_4, VI5_FIELD_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_7_4, DU_DG5),
+ PINMUX_IPSR_GPSR(IP4_7_4, FSCLKST2_N_B),
+ PINMUX_IPSR_GPSR(IP4_7_4, LCDOUT13),
+
+ PINMUX_IPSR_GPSR(IP4_11_8, A11),
+ PINMUX_IPSR_MSEL(IP4_11_8, SCL6_A, SEL_I2C6_0),
+ PINMUX_IPSR_GPSR(IP4_11_8, TX3_B),
+ PINMUX_IPSR_GPSR(IP4_11_8, HTX4_C),
+ PINMUX_IPSR_GPSR(IP4_11_8, DU_VSYNC),
+ PINMUX_IPSR_MSEL(IP4_11_8, VI4_DATA1_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP4_11_8, QSTVA_QVS),
+
+ PINMUX_IPSR_GPSR(IP4_15_12, A12),
+ PINMUX_IPSR_MSEL(IP4_15_12, RX5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_GPSR(IP4_15_12, MSIOF2_SS2_B),
+ PINMUX_IPSR_GPSR(IP4_15_12, VI4_DATA17),
+ PINMUX_IPSR_MSEL(IP4_15_12, VI5_DATA3_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_15_12, DU_DG6),
+ PINMUX_IPSR_GPSR(IP4_15_12, LCDOUT14),
+
+ PINMUX_IPSR_GPSR(IP4_19_16, A13),
+ PINMUX_IPSR_MSEL(IP4_19_16, SCK5_A, SEL_SCIF5_0),
+ PINMUX_IPSR_MSEL(IP4_19_16, MSIOF2_SCK_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP4_19_16, VI4_DATA14),
+ PINMUX_IPSR_MSEL(IP4_19_16, HRX4_D, SEL_HSCIF4_3),
+ PINMUX_IPSR_GPSR(IP4_19_16, DU_DB2),
+ PINMUX_IPSR_GPSR(IP4_19_16, LCDOUT2),
+
+ PINMUX_IPSR_GPSR(IP4_23_20, A14),
+ PINMUX_IPSR_GPSR(IP4_23_20, MSIOF1_SS1),
+ PINMUX_IPSR_MSEL(IP4_23_20, MSIOF2_RXD_B, SEL_MSIOF2_1),
+ PINMUX_IPSR_GPSR(IP4_23_20, VI4_DATA15),
+ PINMUX_IPSR_GPSR(IP4_23_20, HTX4_D),
+ PINMUX_IPSR_GPSR(IP4_23_20, DU_DB3),
+ PINMUX_IPSR_GPSR(IP4_23_20, LCDOUT3),
+
+ PINMUX_IPSR_GPSR(IP4_27_24, A15),
+ PINMUX_IPSR_GPSR(IP4_27_24, MSIOF1_SS2),
+ PINMUX_IPSR_GPSR(IP4_27_24, MSIOF2_TXD_B),
+ PINMUX_IPSR_GPSR(IP4_27_24, VI4_DATA18),
+ PINMUX_IPSR_MSEL(IP4_27_24, VI5_DATA4_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_27_24, DU_DB4),
+ PINMUX_IPSR_GPSR(IP4_27_24, LCDOUT4),
+
+ PINMUX_IPSR_GPSR(IP4_31_28, A16),
+ PINMUX_IPSR_GPSR(IP4_31_28, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP4_31_28, MSIOF2_SS1_B),
+ PINMUX_IPSR_GPSR(IP4_31_28, VI4_DATA19),
+ PINMUX_IPSR_MSEL(IP4_31_28, VI5_DATA5_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP4_31_28, DU_DB5),
+ PINMUX_IPSR_GPSR(IP4_31_28, LCDOUT5),
+
+ /* IPSR5 */
+ PINMUX_IPSR_GPSR(IP5_3_0, A17),
+ PINMUX_IPSR_GPSR(IP5_3_0, MSIOF1_RXD),
+ PINMUX_IPSR_GPSR(IP5_3_0, VI4_DATA20),
+ PINMUX_IPSR_MSEL(IP5_3_0, VI5_DATA6_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_3_0, DU_DB6),
+ PINMUX_IPSR_GPSR(IP5_3_0, LCDOUT6),
+
+ PINMUX_IPSR_GPSR(IP5_7_4, A18),
+ PINMUX_IPSR_GPSR(IP5_7_4, MSIOF1_TXD),
+ PINMUX_IPSR_GPSR(IP5_7_4, VI4_DATA21),
+ PINMUX_IPSR_MSEL(IP5_7_4, VI5_DATA7_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_7_4, DU_DB0),
+ PINMUX_IPSR_MSEL(IP5_7_4, HRX4_E, SEL_HSCIF4_4),
+ PINMUX_IPSR_GPSR(IP5_7_4, LCDOUT0),
+
+ PINMUX_IPSR_GPSR(IP5_11_8, A19),
+ PINMUX_IPSR_GPSR(IP5_11_8, MSIOF1_SCK),
+ PINMUX_IPSR_GPSR(IP5_11_8, VI4_DATA22),
+ PINMUX_IPSR_MSEL(IP5_11_8, VI5_DATA2_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_11_8, DU_DB1),
+ PINMUX_IPSR_GPSR(IP5_11_8, HTX4_E),
+ PINMUX_IPSR_GPSR(IP5_11_8, LCDOUT1),
+
+ PINMUX_IPSR_GPSR(IP5_15_12, CS0_N),
+ PINMUX_IPSR_GPSR(IP5_15_12, SCL5),
+ PINMUX_IPSR_GPSR(IP5_15_12, DU_DR0),
+ PINMUX_IPSR_MSEL(IP5_15_12, VI4_DATA2_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP5_15_12, LCDOUT16),
+
+ PINMUX_IPSR_GPSR(IP5_19_16, WE0_N),
+ PINMUX_IPSR_GPSR(IP5_19_16, SDA5),
+ PINMUX_IPSR_GPSR(IP5_19_16, DU_DR1),
+ PINMUX_IPSR_MSEL(IP5_19_16, VI4_DATA3_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP5_19_16, LCDOUT17),
+
+ PINMUX_IPSR_GPSR(IP5_23_20, D0),
+ PINMUX_IPSR_MSEL(IP5_23_20, MSIOF3_SCK_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_GPSR(IP5_23_20, DU_DR2),
+ PINMUX_IPSR_MSEL(IP5_23_20, CTS4_N_C, SEL_SCIF4_2),
+ PINMUX_IPSR_GPSR(IP5_23_20, LCDOUT18),
+
+ PINMUX_IPSR_GPSR(IP5_27_24, D1),
+ PINMUX_IPSR_MSEL(IP5_27_24, MSIOF3_SYNC_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_MSEL(IP5_27_24, SCK3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI4_DATA23),
+ PINMUX_IPSR_MSEL(IP5_27_24, VI5_CLKENB_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_27_24, DU_DB7),
+ PINMUX_IPSR_MSEL(IP5_27_24, RTS4_N_TANS_C, SEL_SCIF4_2),
+ PINMUX_IPSR_GPSR(IP5_27_24, LCDOUT7),
+
+ PINMUX_IPSR_GPSR(IP5_31_28, D2),
+ PINMUX_IPSR_MSEL(IP5_31_28, MSIOF3_RXD_A, SEL_MSIOF3_0),
+ PINMUX_IPSR_MSEL(IP5_31_28, RX5_C, SEL_SCIF5_2),
+ PINMUX_IPSR_MSEL(IP5_31_28, VI5_DATA14_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP5_31_28, DU_DR3),
+ PINMUX_IPSR_MSEL(IP5_31_28, RX4_C, SEL_SCIF4_2),
+ PINMUX_IPSR_GPSR(IP5_31_28, LCDOUT19),
+
+ /* IPSR6 */
+ PINMUX_IPSR_GPSR(IP6_3_0, D3),
+ PINMUX_IPSR_GPSR(IP6_3_0, MSIOF3_TXD_A),
+ PINMUX_IPSR_GPSR(IP6_3_0, TX5_C),
+ PINMUX_IPSR_MSEL(IP6_3_0, VI5_DATA15_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP6_3_0, DU_DR4),
+ PINMUX_IPSR_GPSR(IP6_3_0, TX4_C),
+ PINMUX_IPSR_GPSR(IP6_3_0, LCDOUT20),
+
+ PINMUX_IPSR_GPSR(IP6_7_4, D4),
+ PINMUX_IPSR_GPSR(IP6_7_4, CANFD1_TX),
+ PINMUX_IPSR_MSEL(IP6_7_4, HSCK3_B, SEL_HSCIF3_1),
+ PINMUX_IPSR_GPSR(IP6_7_4, CAN1_TX),
+ PINMUX_IPSR_MSEL(IP6_7_4, RTS3_N_TANS_A, SEL_SCIF3_0),
+ PINMUX_IPSR_GPSR(IP6_7_4, MSIOF3_SS2_A),
+ PINMUX_IPSR_MSEL(IP6_7_4, VI5_DATA1_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP6_11_8, D5),
+ PINMUX_IPSR_MSEL(IP6_11_8, RX3_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP6_11_8, HRX3_B, SEL_HSCIF3_1),
+ PINMUX_IPSR_GPSR(IP6_11_8, DU_DR5),
+ PINMUX_IPSR_MSEL(IP6_11_8, VI4_DATA4_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP6_11_8, LCDOUT21),
+
+ PINMUX_IPSR_GPSR(IP6_15_12, D6),
+ PINMUX_IPSR_GPSR(IP6_15_12, TX3_A),
+ PINMUX_IPSR_GPSR(IP6_15_12, HTX3_B),
+ PINMUX_IPSR_GPSR(IP6_15_12, DU_DR6),
+ PINMUX_IPSR_MSEL(IP6_15_12, VI4_DATA5_B, SEL_VIN4_1),
+ PINMUX_IPSR_GPSR(IP6_15_12, LCDOUT22),
+
+ PINMUX_IPSR_GPSR(IP6_19_16, D7),
+ PINMUX_IPSR_GPSR(IP6_19_16, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP6_19_16, IRQ5),
+ PINMUX_IPSR_GPSR(IP6_19_16, CAN1_RX),
+ PINMUX_IPSR_MSEL(IP6_19_16, CTS3_N_A, SEL_SCIF3_0),
+ PINMUX_IPSR_MSEL(IP6_19_16, VI5_DATA2_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP6_23_20, D8),
+ PINMUX_IPSR_MSEL(IP6_23_20, MSIOF2_SCK_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP6_23_20, SCK4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_MSEL(IP6_23_20, VI5_DATA12_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP6_23_20, DU_DR7),
+ PINMUX_IPSR_MSEL(IP6_23_20, RIF3_CLK_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP6_23_20, HCTS3_N_E, SEL_HSCIF3_4),
+ PINMUX_IPSR_GPSR(IP6_23_20, LCDOUT23),
+
+ PINMUX_IPSR_GPSR(IP6_27_24, D9),
+ PINMUX_IPSR_MSEL(IP6_27_24, MSIOF2_SYNC_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP6_27_24, VI5_DATA10_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP6_27_24, DU_DG0),
+ PINMUX_IPSR_MSEL(IP6_27_24, RIF3_SYNC_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP6_27_24, HRX3_E, SEL_HSCIF3_4),
+ PINMUX_IPSR_GPSR(IP6_27_24, LCDOUT8),
+
+ PINMUX_IPSR_GPSR(IP6_31_28, D10),
+ PINMUX_IPSR_MSEL(IP6_31_28, MSIOF2_RXD_A, SEL_MSIOF2_0),
+ PINMUX_IPSR_MSEL(IP6_31_28, VI5_DATA13_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP6_31_28, DU_DG1),
+ PINMUX_IPSR_MSEL(IP6_31_28, RIF3_D0_B, SEL_DRIF3_1),
+ PINMUX_IPSR_GPSR(IP6_31_28, HTX3_E),
+ PINMUX_IPSR_GPSR(IP6_31_28, LCDOUT9),
+
+ /* IPSR7 */
+ PINMUX_IPSR_GPSR(IP7_3_0, D11),
+ PINMUX_IPSR_GPSR(IP7_3_0, MSIOF2_TXD_A),
+ PINMUX_IPSR_MSEL(IP7_3_0, VI5_DATA11_A, SEL_VIN5_0),
+ PINMUX_IPSR_GPSR(IP7_3_0, DU_DG2),
+ PINMUX_IPSR_MSEL(IP7_3_0, RIF3_D1_B, SEL_DRIF3_1),
+ PINMUX_IPSR_MSEL(IP7_3_0, HRTS3_N_E, SEL_HSCIF3_4),
+ PINMUX_IPSR_GPSR(IP7_3_0, LCDOUT10),
+
+ PINMUX_IPSR_GPSR(IP7_7_4, D12),
+ PINMUX_IPSR_GPSR(IP7_7_4, CANFD0_TX),
+ PINMUX_IPSR_GPSR(IP7_7_4, TX4_B),
+ PINMUX_IPSR_GPSR(IP7_7_4, CAN0_TX),
+ PINMUX_IPSR_MSEL(IP7_7_4, VI5_DATA8_A, SEL_VIN5_0),
+ PINMUX_IPSR_MSEL(IP7_7_4, VI5_DATA3_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP7_11_8, D13),
+ PINMUX_IPSR_GPSR(IP7_11_8, CANFD0_RX),
+ PINMUX_IPSR_MSEL(IP7_11_8, RX4_B, SEL_SCIF4_1),
+ PINMUX_IPSR_GPSR(IP7_11_8, CAN0_RX),
+ PINMUX_IPSR_MSEL(IP7_11_8, VI5_DATA9_A, SEL_VIN5_0),
+ PINMUX_IPSR_MSEL(IP7_11_8, SCL7_B, SEL_I2C7_1),
+ PINMUX_IPSR_MSEL(IP7_11_8, VI5_DATA4_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP7_15_12, D14),
+ PINMUX_IPSR_GPSR(IP7_15_12, CAN_CLK),
+ PINMUX_IPSR_MSEL(IP7_15_12, HRX3_A, SEL_HSCIF3_0),
+ PINMUX_IPSR_GPSR(IP7_15_12, MSIOF2_SS2_A),
+ PINMUX_IPSR_MSEL(IP7_15_12, SDA7_B, SEL_I2C7_1),
+ PINMUX_IPSR_MSEL(IP7_15_12, VI5_DATA5_B, SEL_VIN5_1),
+
+ PINMUX_IPSR_GPSR(IP7_19_16, D15),
+ PINMUX_IPSR_GPSR(IP7_19_16, MSIOF2_SS1_A),
+ PINMUX_IPSR_GPSR(IP7_19_16, HTX3_A),
+ PINMUX_IPSR_GPSR(IP7_19_16, MSIOF3_SS1_A),
+ PINMUX_IPSR_GPSR(IP7_19_16, DU_DG3),
+ PINMUX_IPSR_GPSR(IP7_19_16, LCDOUT11),
+
+ PINMUX_IPSR_GPSR(IP7_23_20, SCL4),
+ PINMUX_IPSR_GPSR(IP7_23_20, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP7_23_20, DU_DOTCLKIN0),
+ PINMUX_IPSR_MSEL(IP7_23_20, VI4_DATA6_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP7_23_20, VI5_DATA6_B, SEL_VIN5_1),
+ PINMUX_IPSR_GPSR(IP7_23_20, QCLK),
+
+ PINMUX_IPSR_GPSR(IP7_27_24, SDA4),
+ PINMUX_IPSR_GPSR(IP7_27_24, WE1_N),
+ PINMUX_IPSR_MSEL(IP7_27_24, VI4_DATA7_B, SEL_VIN4_1),
+ PINMUX_IPSR_MSEL(IP7_27_24, VI5_DATA7_B, SEL_VIN5_1),
+ PINMUX_IPSR_GPSR(IP7_27_24, QPOLB),
+
+ PINMUX_IPSR_GPSR(IP7_31_28, SD0_CLK),
+ PINMUX_IPSR_GPSR(IP7_31_28, NFDATA8),
+ PINMUX_IPSR_MSEL(IP7_31_28, SCL1_C, SEL_I2C1_2),
+ PINMUX_IPSR_MSEL(IP7_31_28, HSCK1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP7_31_28, SDA2_E, SEL_I2C2_4),
+ PINMUX_IPSR_MSEL(IP7_31_28, FMCLK_B, SEL_FM_1),
+
+ /* IPSR8 */
+ PINMUX_IPSR_GPSR(IP8_3_0, SD0_CMD),
+ PINMUX_IPSR_GPSR(IP8_3_0, NFDATA9),
+ PINMUX_IPSR_MSEL(IP8_3_0, HRX1_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP8_3_0, SPEEDIN_B, SEL_SPEED_PULSE_IF_1),
+
+ PINMUX_IPSR_GPSR(IP8_7_4, SD0_DAT0),
+ PINMUX_IPSR_GPSR(IP8_7_4, NFDATA10),
+ PINMUX_IPSR_GPSR(IP8_7_4, HTX1_B),
+ PINMUX_IPSR_MSEL(IP8_7_4, REMOCON_B, SEL_REMOCON_1),
+
+ PINMUX_IPSR_GPSR(IP8_11_8, SD0_DAT1),
+ PINMUX_IPSR_GPSR(IP8_11_8, NFDATA11),
+ PINMUX_IPSR_MSEL(IP8_11_8, SDA2_C, SEL_I2C2_2),
+ PINMUX_IPSR_MSEL(IP8_11_8, HCTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_MSEL(IP8_11_8, FMIN_B, SEL_FM_1),
+
+ PINMUX_IPSR_GPSR(IP8_15_12, SD0_DAT2),
+ PINMUX_IPSR_GPSR(IP8_15_12, NFDATA12),
+ PINMUX_IPSR_MSEL(IP8_15_12, SCL2_C, SEL_I2C2_2),
+ PINMUX_IPSR_MSEL(IP8_15_12, HRTS1_N_B, SEL_HSCIF1_1),
+ PINMUX_IPSR_GPSR(IP8_15_12, BPFCLK_B),
+
+ PINMUX_IPSR_GPSR(IP8_19_16, SD0_DAT3),
+ PINMUX_IPSR_GPSR(IP8_19_16, NFDATA13),
+ PINMUX_IPSR_MSEL(IP8_19_16, SDA1_C, SEL_I2C1_2),
+ PINMUX_IPSR_MSEL(IP8_19_16, SCL2_E, SEL_I2C2_4),
+ PINMUX_IPSR_MSEL(IP8_19_16, SPEEDIN_C, SEL_SPEED_PULSE_IF_2),
+ PINMUX_IPSR_MSEL(IP8_19_16, REMOCON_C, SEL_REMOCON_2),
+
+ PINMUX_IPSR_GPSR(IP8_23_20, SD1_CLK),
+ PINMUX_IPSR_MSEL(IP8_23_20, NFDATA14_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP8_27_24, SD1_CMD),
+ PINMUX_IPSR_MSEL(IP8_27_24, NFDATA15_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP8_31_28, SD1_DAT0),
+ PINMUX_IPSR_MSEL(IP8_31_28, NFWP_N_B, SEL_NDFC_1),
+
+ /* IPSR9 */
+ PINMUX_IPSR_GPSR(IP9_3_0, SD1_DAT1),
+ PINMUX_IPSR_MSEL(IP9_3_0, NFCE_N_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP9_7_4, SD1_DAT2),
+ PINMUX_IPSR_MSEL(IP9_7_4, NFALE_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP9_11_8, SD1_DAT3),
+ PINMUX_IPSR_MSEL(IP9_11_8, NFRB_N_B, SEL_NDFC_1),
+
+ PINMUX_IPSR_GPSR(IP9_15_12, SD3_CLK),
+ PINMUX_IPSR_GPSR(IP9_15_12, NFWE_N),
+
+ PINMUX_IPSR_GPSR(IP9_19_16, SD3_CMD),
+ PINMUX_IPSR_GPSR(IP9_19_16, NFRE_N),
+
+ PINMUX_IPSR_GPSR(IP9_23_20, SD3_DAT0),
+ PINMUX_IPSR_GPSR(IP9_23_20, NFDATA0),
+
+ PINMUX_IPSR_GPSR(IP9_27_24, SD3_DAT1),
+ PINMUX_IPSR_GPSR(IP9_27_24, NFDATA1),
+
+ PINMUX_IPSR_GPSR(IP9_31_28, SD3_DAT2),
+ PINMUX_IPSR_GPSR(IP9_31_28, NFDATA2),
+
+ /* IPSR10 */
+ PINMUX_IPSR_GPSR(IP10_3_0, SD3_DAT3),
+ PINMUX_IPSR_GPSR(IP10_3_0, NFDATA3),
+
+ PINMUX_IPSR_GPSR(IP10_7_4, SD3_DAT4),
+ PINMUX_IPSR_GPSR(IP10_7_4, NFDATA4),
+
+ PINMUX_IPSR_GPSR(IP10_11_8, SD3_DAT5),
+ PINMUX_IPSR_GPSR(IP10_11_8, NFDATA5),
+
+ PINMUX_IPSR_GPSR(IP10_15_12, SD3_DAT6),
+ PINMUX_IPSR_GPSR(IP10_15_12, NFDATA6),
+
+ PINMUX_IPSR_GPSR(IP10_19_16, SD3_DAT7),
+ PINMUX_IPSR_GPSR(IP10_19_16, NFDATA7),
+
+ PINMUX_IPSR_GPSR(IP10_23_20, SD3_DS),
+ PINMUX_IPSR_GPSR(IP10_23_20, NFCLE),
+
+ PINMUX_IPSR_GPSR(IP10_27_24, SD0_CD),
+ PINMUX_IPSR_GPSR(IP10_27_24, NFALE_A),
+ PINMUX_IPSR_GPSR(IP10_27_24, SD3_CD),
+ PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
+ PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1),
+ PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0),
+ PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0),
+
+ PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP),
+ PINMUX_IPSR_GPSR(IP10_31_28, NFRB_N_A),
+ PINMUX_IPSR_GPSR(IP10_31_28, SD3_WP),
+ PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
+ PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1),
+ PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0),
+ PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0),
+
+ /* IPSR11 */
+ PINMUX_IPSR_GPSR(IP11_3_0, SD1_CD),
+ PINMUX_IPSR_MSEL(IP11_3_0, NFCE_N_A, SEL_NDFC_0),
+ PINMUX_IPSR_GPSR(IP11_3_0, SSI_SCK1),
+ PINMUX_IPSR_MSEL(IP11_3_0, RIF0_D1_B, SEL_DRIF0_1),
+ PINMUX_IPSR_GPSR(IP11_3_0, TS_SDEN0),
+
+ PINMUX_IPSR_GPSR(IP11_7_4, SD1_WP),
+ PINMUX_IPSR_MSEL(IP11_7_4, NFWP_N_A, SEL_NDFC_0),
+ PINMUX_IPSR_GPSR(IP11_7_4, SSI_WS1),
+ PINMUX_IPSR_MSEL(IP11_7_4, RIF0_SYNC_B, SEL_DRIF0_1),
+ PINMUX_IPSR_GPSR(IP11_7_4, TS_SPSYNC0),
+
+ PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0),
+ PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
+ PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
+
+ PINMUX_IPSR_GPSR(IP11_15_12, TX0_A),
+ PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
+ PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
+ PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
+ PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1),
+
+ PINMUX_IPSR_MSEL(IP11_19_16, CTS0_N_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_19_16, NFDATA14_A, SEL_NDFC_0),
+ PINMUX_IPSR_GPSR(IP11_19_16, AUDIO_CLKOUT_A),
+ PINMUX_IPSR_GPSR(IP11_19_16, RIF1_D1),
+ PINMUX_IPSR_MSEL(IP11_19_16, SCIF_CLK_A, SEL_SCIF_0),
+ PINMUX_IPSR_MSEL(IP11_19_16, FMCLK_A, SEL_FM_0),
+
+ PINMUX_IPSR_MSEL(IP11_23_20, RTS0_N_TANS_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_23_20, NFDATA15_A, SEL_NDFC_0),
+ PINMUX_IPSR_GPSR(IP11_23_20, AUDIO_CLKOUT1_A),
+ PINMUX_IPSR_GPSR(IP11_23_20, RIF1_CLK),
+ PINMUX_IPSR_MSEL(IP11_23_20, SCL2_A, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP11_23_20, FMIN_A, SEL_FM_0),
+
+ PINMUX_IPSR_MSEL(IP11_27_24, SCK0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_27_24, HSCK1_A, SEL_HSCIF1_0),
+ PINMUX_IPSR_GPSR(IP11_27_24, USB3HS0_ID),
+ PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N_TANS),
+ PINMUX_IPSR_MSEL(IP11_27_24, SDA2_A, SEL_I2C2_0),
+ PINMUX_IPSR_MSEL(IP11_27_24, FMCLK_C, SEL_FM_2),
+ PINMUX_IPSR_GPSR(IP11_27_24, USB1_ID),
+
+ PINMUX_IPSR_GPSR(IP11_31_28, RX1),
+ PINMUX_IPSR_MSEL(IP11_31_28, HRX2_B, SEL_HSCIF2_1),
+ PINMUX_IPSR_MSEL(IP11_31_28, SSI_SCK9_B, SEL_SSI9_1),
+ PINMUX_IPSR_GPSR(IP11_31_28, AUDIO_CLKOUT1_B),
+
+ /* IPSR12 */
+ PINMUX_IPSR_GPSR(IP12_3_0, TX1),
+ PINMUX_IPSR_GPSR(IP12_3_0, HTX2_B),
+ PINMUX_IPSR_MSEL(IP12_3_0, SSI_WS9_B, SEL_SSI9_1),
+ PINMUX_IPSR_GPSR(IP12_3_0, AUDIO_CLKOUT3_B),
+
+ PINMUX_IPSR_GPSR(IP12_7_4, SCK2_A),
+ PINMUX_IPSR_MSEL(IP12_7_4, HSCK0_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_MSEL(IP12_7_4, AUDIO_CLKB_A, SEL_ADGB_0),
+ PINMUX_IPSR_GPSR(IP12_7_4, CTS1_N),
+ PINMUX_IPSR_MSEL(IP12_7_4, RIF0_CLK_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP12_7_4, REMOCON_A, SEL_REMOCON_0),
+ PINMUX_IPSR_MSEL(IP12_7_4, SCIF_CLK_B, SEL_SCIF_1),
+
+ PINMUX_IPSR_GPSR(IP12_11_8, TX2_A),
+ PINMUX_IPSR_MSEL(IP12_11_8, HRX0_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_GPSR(IP12_11_8, AUDIO_CLKOUT2_A),
+ PINMUX_IPSR_MSEL(IP12_11_8, SCL1_A, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP12_11_8, FSO_CFE_0_N_A, SEL_FSO_0),
+ PINMUX_IPSR_GPSR(IP12_11_8, TS_SDEN1),
+
+ PINMUX_IPSR_GPSR(IP12_15_12, RX2_A),
+ PINMUX_IPSR_GPSR(IP12_15_12, HTX0_A),
+ PINMUX_IPSR_GPSR(IP12_15_12, AUDIO_CLKOUT3_A),
+ PINMUX_IPSR_MSEL(IP12_15_12, SDA1_A, SEL_I2C1_0),
+ PINMUX_IPSR_MSEL(IP12_15_12, FSO_CFE_1_N_A, SEL_FSO_0),
+ PINMUX_IPSR_GPSR(IP12_15_12, TS_SPSYNC1),
+
+ PINMUX_IPSR_GPSR(IP12_19_16, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP12_19_16, SSI_SCK78),
+
+ PINMUX_IPSR_GPSR(IP12_23_20, MSIOF0_RXD),
+ PINMUX_IPSR_GPSR(IP12_23_20, SSI_WS78),
+ PINMUX_IPSR_GPSR(IP12_23_20, TX2_B),
+
+ PINMUX_IPSR_GPSR(IP12_27_24, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP12_27_24, SSI_SDATA7),
+ PINMUX_IPSR_GPSR(IP12_27_24, RX2_B),
+
+ PINMUX_IPSR_GPSR(IP12_31_28, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP12_31_28, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_GPSR(IP12_31_28, SSI_SDATA8),
+
+ /* IPSR13 */
+ PINMUX_IPSR_GPSR(IP13_3_0, MSIOF0_SS1),
+ PINMUX_IPSR_MSEL(IP13_3_0, HRX2_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_GPSR(IP13_3_0, SSI_SCK4),
+ PINMUX_IPSR_MSEL(IP13_3_0, HCTS0_N_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_GPSR(IP13_3_0, BPFCLK_C),
+ PINMUX_IPSR_MSEL(IP13_3_0, SPEEDIN_A, SEL_SPEED_PULSE_IF_0),
+
+ PINMUX_IPSR_GPSR(IP13_7_4, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP13_7_4, HTX2_A),
+ PINMUX_IPSR_GPSR(IP13_7_4, SSI_WS4),
+ PINMUX_IPSR_MSEL(IP13_7_4, HRTS0_N_A, SEL_HSCIF0_0),
+ PINMUX_IPSR_MSEL(IP13_7_4, FMIN_C, SEL_FM_2),
+ PINMUX_IPSR_GPSR(IP13_7_4, BPFCLK_A),
+
+ PINMUX_IPSR_GPSR(IP13_11_8, SSI_SDATA9),
+ PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKC_A, SEL_ADGC_0),
+ PINMUX_IPSR_GPSR(IP13_11_8, SCK1),
+
+ PINMUX_IPSR_GPSR(IP13_15_12, MLB_CLK),
+ PINMUX_IPSR_MSEL(IP13_15_12, RX0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP13_15_12, SCL1_B, SEL_I2C1_1),
+ PINMUX_IPSR_MSEL(IP13_15_12, TCLK1_B, SEL_TIMER_TMU_1),
+ PINMUX_IPSR_GPSR(IP13_15_12, SIM0_RST_A),
+
+ PINMUX_IPSR_GPSR(IP13_19_16, MLB_SIG),
+ PINMUX_IPSR_MSEL(IP13_19_16, SCK0_B, SEL_SCIF0_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
+
+ PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP13_23_20, TX0_B),
+ PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_A, SEL_DRIF0_0),
+ PINMUX_IPSR_GPSR(IP13_23_20, SIM0_CLK_A),
+
+ PINMUX_IPSR_GPSR(IP13_27_24, SSI_SCK01239),
+
+ PINMUX_IPSR_GPSR(IP13_31_28, SSI_WS01239),
+
+ /* IPSR14 */
+ PINMUX_IPSR_GPSR(IP14_3_0, SSI_SDATA0),
+
+ PINMUX_IPSR_GPSR(IP14_7_4, SSI_SDATA1),
+ PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_B, SEL_ADGC_1),
+ PINMUX_IPSR_MSEL(IP14_7_4, PWM0_B, SEL_PWM0_1),
+
+ PINMUX_IPSR_GPSR(IP14_11_8, SSI_SDATA2),
+ PINMUX_IPSR_GPSR(IP14_11_8, AUDIO_CLKOUT2_B),
+ PINMUX_IPSR_MSEL(IP14_11_8, SSI_SCK9_A, SEL_SSI9_0),
+ PINMUX_IPSR_MSEL(IP14_11_8, PWM1_B, SEL_PWM1_1),
+
+ PINMUX_IPSR_GPSR(IP14_15_12, SSI_SCK349),
+ PINMUX_IPSR_MSEL(IP14_15_12, PWM2_C, SEL_PWM2_2),
+
+ PINMUX_IPSR_GPSR(IP14_19_16, SSI_WS349),
+ PINMUX_IPSR_MSEL(IP14_19_16, PWM3_C, SEL_PWM3_2),
+
+ PINMUX_IPSR_GPSR(IP14_23_20, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP14_23_20, AUDIO_CLKOUT1_C),
+ PINMUX_IPSR_MSEL(IP14_23_20, AUDIO_CLKB_B, SEL_ADGB_1),
+ PINMUX_IPSR_MSEL(IP14_23_20, PWM4_B, SEL_PWM4_1),
+
+ PINMUX_IPSR_GPSR(IP14_27_24, SSI_SDATA4),
+ PINMUX_IPSR_MSEL(IP14_27_24, SSI_WS9_A, SEL_SSI9_0),
+ PINMUX_IPSR_MSEL(IP14_27_24, PWM5_B, SEL_PWM5_1),
+
+ PINMUX_IPSR_GPSR(IP14_31_28, SSI_SCK5),
+ PINMUX_IPSR_MSEL(IP14_31_28, HRX0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_GPSR(IP14_31_28, USB0_PWEN_B),
+ PINMUX_IPSR_MSEL(IP14_31_28, SCL2_D, SEL_I2C2_3),
+ PINMUX_IPSR_MSEL(IP14_31_28, PWM6_B, SEL_PWM6_1),
+
+ /* IPSR15 */
+ PINMUX_IPSR_GPSR(IP15_3_0, SSI_WS5),
+ PINMUX_IPSR_GPSR(IP15_3_0, HTX0_B),
+ PINMUX_IPSR_MSEL(IP15_3_0, USB0_OVC_B, SEL_USB_20_CH0_1),
+ PINMUX_IPSR_MSEL(IP15_3_0, SDA2_D, SEL_I2C2_3),
+
+ PINMUX_IPSR_GPSR(IP15_7_4, SSI_SDATA5),
+ PINMUX_IPSR_MSEL(IP15_7_4, HSCK0_B, SEL_HSCIF0_1),
+ PINMUX_IPSR_MSEL(IP15_7_4, AUDIO_CLKB_C, SEL_ADGB_2),
+ PINMUX_IPSR_GPSR(IP15_7_4, TPU0TO0),
+
+ PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK6),
+ PINMUX_IPSR_MSEL(IP15_11_8, HSCK2_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_MSEL(IP15_11_8, AUDIO_CLKC_C, SEL_ADGC_2),
+ PINMUX_IPSR_GPSR(IP15_11_8, TPU0TO1),
+ PINMUX_IPSR_MSEL(IP15_11_8, FSO_CFE_0_N_B, SEL_FSO_1),
+ PINMUX_IPSR_GPSR(IP15_11_8, SIM0_RST_B),
+
+ PINMUX_IPSR_GPSR(IP15_15_12, SSI_WS6),
+ PINMUX_IPSR_MSEL(IP15_15_12, HCTS2_N_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_GPSR(IP15_15_12, AUDIO_CLKOUT2_C),
+ PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2),
+ PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3),
+ PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1),
+ PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1),
+
+ PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6),
+ PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
+ PINMUX_IPSR_GPSR(IP15_19_16, AUDIO_CLKOUT3_C),
+ PINMUX_IPSR_GPSR(IP15_19_16, TPU0TO3),
+ PINMUX_IPSR_MSEL(IP15_19_16, SCL1_D, SEL_I2C1_3),
+ PINMUX_IPSR_MSEL(IP15_19_16, FSO_TOE_N_B, SEL_FSO_1),
+ PINMUX_IPSR_GPSR(IP15_19_16, SIM0_CLK_B),
+
+ PINMUX_IPSR_GPSR(IP15_23_20, AUDIO_CLKA),
+
+ PINMUX_IPSR_GPSR(IP15_27_24, USB30_PWEN),
+ PINMUX_IPSR_GPSR(IP15_27_24, USB0_PWEN_A),
+
+ PINMUX_IPSR_GPSR(IP15_31_28, USB30_OVC),
+ PINMUX_IPSR_MSEL(IP15_31_28, USB0_OVC_A, SEL_USB_20_CH0_0),
+
+/*
+ * Static pins can not be muxed between different functions but
+ * still need mark entries in the pinmux list. Add each static
+ * pin to the list without an associated function. The sh-pfc
+ * core will do the right thing and skip trying to mux the pin
+ * while still applying configuration to it.
+ */
+#define FM(x) PINMUX_DATA(x##_MARK, 0),
+ PINMUX_STATIC
+#undef FM
+};
+
+/*
+ * R8A77990 has 7 banks with 32 GPIOs in each => 224 GPIOs.
+ * Physical layout rows: A - AE, cols: 1 - 25.
+ */
+#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
+#define PIN_NUMBER(r, c) (((r) - 'A') * 25 + (c) + 300)
+#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
+
+static const struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+
+ /*
+ * Pins not associated with a GPIO port.
+ *
+ * The pin positions are different between different R8A77990
+ * packages, all that is needed for the pfc driver is a unique
+ * number for each pin. To this end use the pin layout from
+ * R8A77990 to calculate a unique number for each pin.
+ */
+ SH_PFC_PIN_NAMED_CFG('F', 1, TRST_N, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('F', 3, TMS, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('F', 4, TCK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('G', 2, TDI, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('G', 3, FSCLKST_N, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('H', 1, ASEBRK, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 1, AVB_TXC, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 2, AVB_TD0, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 3, AVB_TD1, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 5, AVB_TD2, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('N', 6, AVB_TD3, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('P', 3, AVB_TX_CTL, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('P', 4, AVB_MDIO, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('P', 5, AVB_MDC, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG('T', 21, MLB_REF, CFG_FLAGS),
+ SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('D'), 3, PRESETOUT_N, CFG_FLAGS),
+};
+
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+ /* AVB_LINK */
+ RCAR_GP_PIN(2, 23),
+};
+
+static const unsigned int avb_link_mux[] = {
+ AVB_LINK_MARK,
+};
+
+static const unsigned int avb_magic_pins[] = {
+ /* AVB_MAGIC */
+ RCAR_GP_PIN(2, 22),
+};
+
+static const unsigned int avb_magic_mux[] = {
+ AVB_MAGIC_MARK,
+};
+
+static const unsigned int avb_phy_int_pins[] = {
+ /* AVB_PHY_INT */
+ RCAR_GP_PIN(2, 21),
+};
+
+static const unsigned int avb_phy_int_mux[] = {
+ AVB_PHY_INT_MARK,
+};
+
+static const unsigned int avb_mii_pins[] = {
+ /*
+ * AVB_RX_CTL, AVB_RXC, AVB_RD0,
+ * AVB_RD1, AVB_RD2, AVB_RD3,
+ * AVB_TXCREFCLK
+ */
+ RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 16),
+ RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19),
+ RCAR_GP_PIN(2, 20),
+};
+
+static const unsigned int avb_mii_mux[] = {
+ AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
+ AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
+ AVB_TXCREFCLK_MARK,
+};
+
+static const unsigned int avb_avtp_pps_pins[] = {
+ /* AVB_AVTP_PPS */
+ RCAR_GP_PIN(1, 2),
+};
+
+static const unsigned int avb_avtp_pps_mux[] = {
+ AVB_AVTP_PPS_MARK,
+};
+
+static const unsigned int avb_avtp_match_a_pins[] = {
+ /* AVB_AVTP_MATCH_A */
+ RCAR_GP_PIN(2, 24),
+};
+
+static const unsigned int avb_avtp_match_a_mux[] = {
+ AVB_AVTP_MATCH_A_MARK,
+};
+
+static const unsigned int avb_avtp_capture_a_pins[] = {
+ /* AVB_AVTP_CAPTURE_A */
+ RCAR_GP_PIN(2, 25),
+};
+
+static const unsigned int avb_avtp_capture_a_mux[] = {
+ AVB_AVTP_CAPTURE_A_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 9),
+};
+
+static const unsigned int i2c1_a_mux[] = {
+ SCL1_A_MARK, SDA1_A_MARK,
+};
+
+static const unsigned int i2c1_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 18),
+};
+
+static const unsigned int i2c1_b_mux[] = {
+ SCL1_B_MARK, SDA1_B_MARK,
+};
+
+static const unsigned int i2c1_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 5),
+};
+
+static const unsigned int i2c1_c_mux[] = {
+ SCL1_C_MARK, SDA1_C_MARK,
+};
+
+static const unsigned int i2c1_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 16), RCAR_GP_PIN(6, 15),
+};
+
+static const unsigned int i2c1_d_mux[] = {
+ SCL1_D_MARK, SDA1_D_MARK,
+};
+
+static const unsigned int i2c2_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 0),
+};
+
+static const unsigned int i2c2_a_mux[] = {
+ SCL2_A_MARK, SDA2_A_MARK,
+};
+
+static const unsigned int i2c2_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
+};
+
+static const unsigned int i2c2_b_mux[] = {
+ SCL2_B_MARK, SDA2_B_MARK,
+};
+
+static const unsigned int i2c2_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 3),
+};
+
+static const unsigned int i2c2_c_mux[] = {
+ SCL2_C_MARK, SDA2_C_MARK,
+};
+
+static const unsigned int i2c2_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+
+static const unsigned int i2c2_d_mux[] = {
+ SCL2_D_MARK, SDA2_D_MARK,
+};
+
+static const unsigned int i2c2_e_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 0),
+};
+
+static const unsigned int i2c2_e_mux[] = {
+ SCL2_E_MARK, SDA2_E_MARK,
+};
+
+static const unsigned int i2c4_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 16), RCAR_GP_PIN(0, 17),
+};
+
+static const unsigned int i2c4_mux[] = {
+ SCL4_MARK, SDA4_MARK,
+};
+
+static const unsigned int i2c5_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 21), RCAR_GP_PIN(1, 22),
+};
+
+static const unsigned int i2c5_mux[] = {
+ SCL5_MARK, SDA5_MARK,
+};
+
+static const unsigned int i2c6_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 11), RCAR_GP_PIN(1, 8),
+};
+
+static const unsigned int i2c6_a_mux[] = {
+ SCL6_A_MARK, SDA6_A_MARK,
+};
+
+static const unsigned int i2c6_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1),
+};
+
+static const unsigned int i2c6_b_mux[] = {
+ SCL6_B_MARK, SDA6_B_MARK,
+};
+
+static const unsigned int i2c7_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 24), RCAR_GP_PIN(2, 25),
+};
+
+static const unsigned int i2c7_a_mux[] = {
+ SCL7_A_MARK, SDA7_A_MARK,
+};
+
+static const unsigned int i2c7_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 13), RCAR_GP_PIN(0, 14),
+};
+
+static const unsigned int i2c7_b_mux[] = {
+ SCL7_B_MARK, SDA7_B_MARK,
+};
+
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+
+static const unsigned int scif0_data_a_mux[] = {
+ RX0_A_MARK, TX0_A_MARK,
+};
+
+static const unsigned int scif0_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+
+static const unsigned int scif0_clk_a_mux[] = {
+ SCK0_A_MARK,
+};
+
+static const unsigned int scif0_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+
+static const unsigned int scif0_ctrl_a_mux[] = {
+ RTS0_N_TANS_A_MARK, CTS0_N_A_MARK,
+};
+
+static const unsigned int scif0_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 19),
+};
+
+static const unsigned int scif0_data_b_mux[] = {
+ RX0_B_MARK, TX0_B_MARK,
+};
+
+static const unsigned int scif0_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 18),
+};
+
+static const unsigned int scif0_clk_b_mux[] = {
+ SCK0_B_MARK,
+};
+
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 16),
+};
+
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_TANS_MARK, CTS1_N_MARK,
+};
+
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 8),
+};
+
+static const unsigned int scif2_data_a_mux[] = {
+ RX2_A_MARK, TX2_A_MARK,
+};
+
+static const unsigned int scif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int scif2_clk_a_mux[] = {
+ SCK2_A_MARK,
+};
+
+static const unsigned int scif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 11),
+};
+
+static const unsigned int scif2_data_b_mux[] = {
+ RX2_B_MARK, TX2_B_MARK,
+};
+
+/* - SCIF3 ------------------------------------------------------------------ */
+static const unsigned int scif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6),
+};
+
+static const unsigned int scif3_data_a_mux[] = {
+ RX3_A_MARK, TX3_A_MARK,
+};
+
+static const unsigned int scif3_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 1),
+};
+
+static const unsigned int scif3_clk_a_mux[] = {
+ SCK3_A_MARK,
+};
+
+static const unsigned int scif3_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 7),
+};
+
+static const unsigned int scif3_ctrl_a_mux[] = {
+ RTS3_N_TANS_A_MARK, CTS3_N_A_MARK,
+};
+
+static const unsigned int scif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+
+static const unsigned int scif3_data_b_mux[] = {
+ RX3_B_MARK, TX3_B_MARK,
+};
+
+static const unsigned int scif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22),
+};
+
+static const unsigned int scif3_data_c_mux[] = {
+ RX3_C_MARK, TX3_C_MARK,
+};
+
+static const unsigned int scif3_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 24),
+};
+
+static const unsigned int scif3_clk_c_mux[] = {
+ SCK3_C_MARK,
+};
+
+/* - SCIF4 ------------------------------------------------------------------ */
+static const unsigned int scif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int scif4_data_a_mux[] = {
+ RX4_A_MARK, TX4_A_MARK,
+};
+
+static const unsigned int scif4_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 5),
+};
+
+static const unsigned int scif4_clk_a_mux[] = {
+ SCK4_A_MARK,
+};
+
+static const unsigned int scif4_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3),
+};
+
+static const unsigned int scif4_ctrl_a_mux[] = {
+ RTS4_N_TANS_A_MARK, CTS4_N_A_MARK,
+};
+
+static const unsigned int scif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 13), RCAR_GP_PIN(0, 12),
+};
+
+static const unsigned int scif4_data_b_mux[] = {
+ RX4_B_MARK, TX4_B_MARK,
+};
+
+static const unsigned int scif4_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 8),
+};
+
+static const unsigned int scif4_clk_b_mux[] = {
+ SCK4_B_MARK,
+};
+
+static const unsigned int scif4_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+};
+
+static const unsigned int scif4_data_c_mux[] = {
+ RX4_C_MARK, TX4_C_MARK,
+};
+
+static const unsigned int scif4_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(0, 1), RCAR_GP_PIN(0, 0),
+};
+
+static const unsigned int scif4_ctrl_c_mux[] = {
+ RTS4_N_TANS_C_MARK, CTS4_N_C_MARK,
+};
+
+/* - SCIF5 ------------------------------------------------------------------ */
+static const unsigned int scif5_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 9),
+};
+
+static const unsigned int scif5_data_a_mux[] = {
+ RX5_A_MARK, TX5_A_MARK,
+};
+
+static const unsigned int scif5_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 13),
+};
+
+static const unsigned int scif5_clk_a_mux[] = {
+ SCK5_A_MARK,
+};
+
+static const unsigned int scif5_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 25), RCAR_GP_PIN(2, 24),
+};
+
+static const unsigned int scif5_data_b_mux[] = {
+ RX5_B_MARK, TX5_B_MARK,
+};
+
+static const unsigned int scif5_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+};
+
+static const unsigned int scif5_data_c_mux[] = {
+ RX5_C_MARK, TX5_C_MARK,
+};
+
+/* - SCIF Clock ------------------------------------------------------------- */
+static const unsigned int scif_clk_a_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(5, 3),
+};
+
+static const unsigned int scif_clk_a_mux[] = {
+ SCIF_CLK_A_MARK,
+};
+
+static const unsigned int scif_clk_b_pins[] = {
+ /* SCIF_CLK */
+ RCAR_GP_PIN(5, 7),
+};
+
+static const unsigned int scif_clk_b_mux[] = {
+ SCIF_CLK_B_MARK,
+};
+
+static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c1_d),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c2_e),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c7_a),
+ SH_PFC_PIN_GROUP(i2c7_b),
+ SH_PFC_PIN_GROUP(scif0_data_a),
+ SH_PFC_PIN_GROUP(scif0_clk_a),
+ SH_PFC_PIN_GROUP(scif0_ctrl_a),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif0_clk_b),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk_a),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk_a),
+ SH_PFC_PIN_GROUP(scif3_ctrl_a),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_c),
+ SH_PFC_PIN_GROUP(scif3_clk_c),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_data_c),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+};
+
+static const char * const avb_groups[] = {
+ "avb_link",
+ "avb_magic",
+ "avb_phy_int",
+ "avb_mii",
+ "avb_avtp_pps",
+ "avb_avtp_match_a",
+ "avb_avtp_capture_a",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_a",
+ "i2c1_b",
+ "i2c1_c",
+ "i2c1_d",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_a",
+ "i2c2_b",
+ "i2c2_c",
+ "i2c2_d",
+ "i2c2_e",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4",
+};
+
+static const char * const i2c5_groups[] = {
+ "i2c5",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6_a",
+ "i2c6_b",
+};
+
+static const char * const i2c7_groups[] = {
+ "i2c7_a",
+ "i2c7_b",
+};
+
+static const char * const scif0_groups[] = {
+ "scif0_data_a",
+ "scif0_clk_a",
+ "scif0_ctrl_a",
+ "scif0_data_b",
+ "scif0_clk_b",
+};
+
+static const char * const scif1_groups[] = {
+ "scif1_data",
+ "scif1_clk",
+ "scif1_ctrl",
+};
+
+static const char * const scif2_groups[] = {
+ "scif2_data_a",
+ "scif2_clk_a",
+ "scif2_data_b",
+};
+
+static const char * const scif3_groups[] = {
+ "scif3_data_a",
+ "scif3_clk_a",
+ "scif3_ctrl_a",
+ "scif3_data_b",
+ "scif3_data_c",
+ "scif3_clk_c",
+};
+
+static const char * const scif4_groups[] = {
+ "scif4_data_a",
+ "scif4_clk_a",
+ "scif4_ctrl_a",
+ "scif4_data_b",
+ "scif4_clk_b",
+ "scif4_data_c",
+ "scif4_ctrl_c",
+};
+
+static const char * const scif5_groups[] = {
+ "scif5_data_a",
+ "scif5_clk_a",
+ "scif5_data_b",
+ "scif5_data_c",
+};
+
+static const char * const scif_clk_groups[] = {
+ "scif_clk_a",
+ "scif_clk_b",
+};
+
+static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(i2c7),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+};
+
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+#define F_(x, y) FN_##y
+#define FM(x) FN_##x
+ { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_0_17_FN, GPSR0_17,
+ GP_0_16_FN, GPSR0_16,
+ GP_0_15_FN, GPSR0_15,
+ GP_0_14_FN, GPSR0_14,
+ GP_0_13_FN, GPSR0_13,
+ GP_0_12_FN, GPSR0_12,
+ GP_0_11_FN, GPSR0_11,
+ GP_0_10_FN, GPSR0_10,
+ GP_0_9_FN, GPSR0_9,
+ GP_0_8_FN, GPSR0_8,
+ GP_0_7_FN, GPSR0_7,
+ GP_0_6_FN, GPSR0_6,
+ GP_0_5_FN, GPSR0_5,
+ GP_0_4_FN, GPSR0_4,
+ GP_0_3_FN, GPSR0_3,
+ GP_0_2_FN, GPSR0_2,
+ GP_0_1_FN, GPSR0_1,
+ GP_0_0_FN, GPSR0_0, }
+ },
+ { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_1_22_FN, GPSR1_22,
+ GP_1_21_FN, GPSR1_21,
+ GP_1_20_FN, GPSR1_20,
+ GP_1_19_FN, GPSR1_19,
+ GP_1_18_FN, GPSR1_18,
+ GP_1_17_FN, GPSR1_17,
+ GP_1_16_FN, GPSR1_16,
+ GP_1_15_FN, GPSR1_15,
+ GP_1_14_FN, GPSR1_14,
+ GP_1_13_FN, GPSR1_13,
+ GP_1_12_FN, GPSR1_12,
+ GP_1_11_FN, GPSR1_11,
+ GP_1_10_FN, GPSR1_10,
+ GP_1_9_FN, GPSR1_9,
+ GP_1_8_FN, GPSR1_8,
+ GP_1_7_FN, GPSR1_7,
+ GP_1_6_FN, GPSR1_6,
+ GP_1_5_FN, GPSR1_5,
+ GP_1_4_FN, GPSR1_4,
+ GP_1_3_FN, GPSR1_3,
+ GP_1_2_FN, GPSR1_2,
+ GP_1_1_FN, GPSR1_1,
+ GP_1_0_FN, GPSR1_0, }
+ },
+ { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_2_25_FN, GPSR2_25,
+ GP_2_24_FN, GPSR2_24,
+ GP_2_23_FN, GPSR2_23,
+ GP_2_22_FN, GPSR2_22,
+ GP_2_21_FN, GPSR2_21,
+ GP_2_20_FN, GPSR2_20,
+ GP_2_19_FN, GPSR2_19,
+ GP_2_18_FN, GPSR2_18,
+ GP_2_17_FN, GPSR2_17,
+ GP_2_16_FN, GPSR2_16,
+ GP_2_15_FN, GPSR2_15,
+ GP_2_14_FN, GPSR2_14,
+ GP_2_13_FN, GPSR2_13,
+ GP_2_12_FN, GPSR2_12,
+ GP_2_11_FN, GPSR2_11,
+ GP_2_10_FN, GPSR2_10,
+ GP_2_9_FN, GPSR2_9,
+ GP_2_8_FN, GPSR2_8,
+ GP_2_7_FN, GPSR2_7,
+ GP_2_6_FN, GPSR2_6,
+ GP_2_5_FN, GPSR2_5,
+ GP_2_4_FN, GPSR2_4,
+ GP_2_3_FN, GPSR2_3,
+ GP_2_2_FN, GPSR2_2,
+ GP_2_1_FN, GPSR2_1,
+ GP_2_0_FN, GPSR2_0, }
+ },
+ { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_3_15_FN, GPSR3_15,
+ GP_3_14_FN, GPSR3_14,
+ GP_3_13_FN, GPSR3_13,
+ GP_3_12_FN, GPSR3_12,
+ GP_3_11_FN, GPSR3_11,
+ GP_3_10_FN, GPSR3_10,
+ GP_3_9_FN, GPSR3_9,
+ GP_3_8_FN, GPSR3_8,
+ GP_3_7_FN, GPSR3_7,
+ GP_3_6_FN, GPSR3_6,
+ GP_3_5_FN, GPSR3_5,
+ GP_3_4_FN, GPSR3_4,
+ GP_3_3_FN, GPSR3_3,
+ GP_3_2_FN, GPSR3_2,
+ GP_3_1_FN, GPSR3_1,
+ GP_3_0_FN, GPSR3_0, }
+ },
+ { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_4_10_FN, GPSR4_10,
+ GP_4_9_FN, GPSR4_9,
+ GP_4_8_FN, GPSR4_8,
+ GP_4_7_FN, GPSR4_7,
+ GP_4_6_FN, GPSR4_6,
+ GP_4_5_FN, GPSR4_5,
+ GP_4_4_FN, GPSR4_4,
+ GP_4_3_FN, GPSR4_3,
+ GP_4_2_FN, GPSR4_2,
+ GP_4_1_FN, GPSR4_1,
+ GP_4_0_FN, GPSR4_0, }
+ },
+ { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_5_19_FN, GPSR5_19,
+ GP_5_18_FN, GPSR5_18,
+ GP_5_17_FN, GPSR5_17,
+ GP_5_16_FN, GPSR5_16,
+ GP_5_15_FN, GPSR5_15,
+ GP_5_14_FN, GPSR5_14,
+ GP_5_13_FN, GPSR5_13,
+ GP_5_12_FN, GPSR5_12,
+ GP_5_11_FN, GPSR5_11,
+ GP_5_10_FN, GPSR5_10,
+ GP_5_9_FN, GPSR5_9,
+ GP_5_8_FN, GPSR5_8,
+ GP_5_7_FN, GPSR5_7,
+ GP_5_6_FN, GPSR5_6,
+ GP_5_5_FN, GPSR5_5,
+ GP_5_4_FN, GPSR5_4,
+ GP_5_3_FN, GPSR5_3,
+ GP_5_2_FN, GPSR5_2,
+ GP_5_1_FN, GPSR5_1,
+ GP_5_0_FN, GPSR5_0, }
+ },
+ { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1) {
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ 0, 0,
+ GP_6_17_FN, GPSR6_17,
+ GP_6_16_FN, GPSR6_16,
+ GP_6_15_FN, GPSR6_15,
+ GP_6_14_FN, GPSR6_14,
+ GP_6_13_FN, GPSR6_13,
+ GP_6_12_FN, GPSR6_12,
+ GP_6_11_FN, GPSR6_11,
+ GP_6_10_FN, GPSR6_10,
+ GP_6_9_FN, GPSR6_9,
+ GP_6_8_FN, GPSR6_8,
+ GP_6_7_FN, GPSR6_7,
+ GP_6_6_FN, GPSR6_6,
+ GP_6_5_FN, GPSR6_5,
+ GP_6_4_FN, GPSR6_4,
+ GP_6_3_FN, GPSR6_3,
+ GP_6_2_FN, GPSR6_2,
+ GP_6_1_FN, GPSR6_1,
+ GP_6_0_FN, GPSR6_0, }
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG("IPSR0", 0xe6060200, 32, 4) {
+ IP0_31_28
+ IP0_27_24
+ IP0_23_20
+ IP0_19_16
+ IP0_15_12
+ IP0_11_8
+ IP0_7_4
+ IP0_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR1", 0xe6060204, 32, 4) {
+ IP1_31_28
+ IP1_27_24
+ IP1_23_20
+ IP1_19_16
+ IP1_15_12
+ IP1_11_8
+ IP1_7_4
+ IP1_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR2", 0xe6060208, 32, 4) {
+ IP2_31_28
+ IP2_27_24
+ IP2_23_20
+ IP2_19_16
+ IP2_15_12
+ IP2_11_8
+ IP2_7_4
+ IP2_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR3", 0xe606020c, 32, 4) {
+ IP3_31_28
+ IP3_27_24
+ IP3_23_20
+ IP3_19_16
+ IP3_15_12
+ IP3_11_8
+ IP3_7_4
+ IP3_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR4", 0xe6060210, 32, 4) {
+ IP4_31_28
+ IP4_27_24
+ IP4_23_20
+ IP4_19_16
+ IP4_15_12
+ IP4_11_8
+ IP4_7_4
+ IP4_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR5", 0xe6060214, 32, 4) {
+ IP5_31_28
+ IP5_27_24
+ IP5_23_20
+ IP5_19_16
+ IP5_15_12
+ IP5_11_8
+ IP5_7_4
+ IP5_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR6", 0xe6060218, 32, 4) {
+ IP6_31_28
+ IP6_27_24
+ IP6_23_20
+ IP6_19_16
+ IP6_15_12
+ IP6_11_8
+ IP6_7_4
+ IP6_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4) {
+ IP7_31_28
+ IP7_27_24
+ IP7_23_20
+ IP7_19_16
+ IP7_15_12
+ IP7_11_8
+ IP7_7_4
+ IP7_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4) {
+ IP8_31_28
+ IP8_27_24
+ IP8_23_20
+ IP8_19_16
+ IP8_15_12
+ IP8_11_8
+ IP8_7_4
+ IP8_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR9", 0xe6060224, 32, 4) {
+ IP9_31_28
+ IP9_27_24
+ IP9_23_20
+ IP9_19_16
+ IP9_15_12
+ IP9_11_8
+ IP9_7_4
+ IP9_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4) {
+ IP10_31_28
+ IP10_27_24
+ IP10_23_20
+ IP10_19_16
+ IP10_15_12
+ IP10_11_8
+ IP10_7_4
+ IP10_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR11", 0xe606022c, 32, 4) {
+ IP11_31_28
+ IP11_27_24
+ IP11_23_20
+ IP11_19_16
+ IP11_15_12
+ IP11_11_8
+ IP11_7_4
+ IP11_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR12", 0xe6060230, 32, 4) {
+ IP12_31_28
+ IP12_27_24
+ IP12_23_20
+ IP12_19_16
+ IP12_15_12
+ IP12_11_8
+ IP12_7_4
+ IP12_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4) {
+ IP13_31_28
+ IP13_27_24
+ IP13_23_20
+ IP13_19_16
+ IP13_15_12
+ IP13_11_8
+ IP13_7_4
+ IP13_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR14", 0xe6060238, 32, 4) {
+ IP14_31_28
+ IP14_27_24
+ IP14_23_20
+ IP14_19_16
+ IP14_15_12
+ IP14_11_8
+ IP14_7_4
+ IP14_3_0 }
+ },
+ { PINMUX_CFG_REG("IPSR15", 0xe606023c, 32, 4) {
+ IP15_31_28
+ IP15_27_24
+ IP15_23_20
+ IP15_19_16
+ IP15_15_12
+ IP15_11_8
+ IP15_7_4
+ IP15_3_0 }
+ },
+#undef F_
+#undef FM
+
+#define F_(x, y) x,
+#define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
+ 1, 2, 1, 2, 1, 1, 1, 1, 2, 3, 1,
+ 1, 1, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2) {
+ /* RESERVED 31 */
+ 0, 0,
+ MOD_SEL0_30_29
+ MOD_SEL0_28
+ MOD_SEL0_27_26
+ MOD_SEL0_25
+ MOD_SEL0_24
+ MOD_SEL0_23
+ MOD_SEL0_22
+ MOD_SEL0_21_20
+ MOD_SEL0_19_18_17
+ MOD_SEL0_16
+ MOD_SEL0_15
+ MOD_SEL0_14
+ MOD_SEL0_13_12
+ MOD_SEL0_11_10
+ MOD_SEL0_9
+ MOD_SEL0_8
+ MOD_SEL0_7
+ MOD_SEL0_6_5
+ MOD_SEL0_4
+ MOD_SEL0_3
+ MOD_SEL0_2
+ MOD_SEL0_1_0 }
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
+ 1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
+ 1, 2, 2, 2, 1, 1, 2, 1, 4) {
+ MOD_SEL1_31
+ MOD_SEL1_30
+ MOD_SEL1_29
+ MOD_SEL1_28
+ /* RESERVED 27 */
+ 0, 0,
+ MOD_SEL1_26
+ MOD_SEL1_25
+ MOD_SEL1_24_23_22
+ MOD_SEL1_21_20_19
+ MOD_SEL1_18
+ MOD_SEL1_17
+ MOD_SEL1_16
+ MOD_SEL1_15
+ MOD_SEL1_14_13
+ MOD_SEL1_12_11
+ MOD_SEL1_10_9
+ MOD_SEL1_8
+ MOD_SEL1_7
+ MOD_SEL1_6_5
+ MOD_SEL1_4
+ /* RESERVED 3, 2, 1, 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ },
+ { },
+};
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [0] = RCAR_GP_PIN(2, 23), /* RD# */
+ [1] = RCAR_GP_PIN(2, 22), /* BS# */
+ [2] = RCAR_GP_PIN(2, 21), /* AVB_PHY_INT */
+ [3] = PIN_NUMBER('P', 5), /* AVB_MDC */
+ [4] = PIN_NUMBER('P', 4), /* AVB_MDIO */
+ [5] = RCAR_GP_PIN(2, 20), /* AVB_TXCREFCLK */
+ [6] = PIN_NUMBER('N', 6), /* AVB_TD3 */
+ [7] = PIN_NUMBER('N', 5), /* AVB_TD2 */
+ [8] = PIN_NUMBER('N', 3), /* AVB_TD1 */
+ [9] = PIN_NUMBER('N', 2), /* AVB_TD0 */
+ [10] = PIN_NUMBER('N', 1), /* AVB_TXC */
+ [11] = PIN_NUMBER('P', 3), /* AVB_TX_CTL */
+ [12] = RCAR_GP_PIN(2, 19), /* AVB_RD3 */
+ [13] = RCAR_GP_PIN(2, 18), /* AVB_RD2 */
+ [14] = RCAR_GP_PIN(2, 17), /* AVB_RD1 */
+ [15] = RCAR_GP_PIN(2, 16), /* AVB_RD0 */
+ [16] = RCAR_GP_PIN(2, 15), /* AVB_RXC */
+ [17] = RCAR_GP_PIN(2, 14), /* AVB_RX_CTL */
+ [18] = RCAR_GP_PIN(2, 13), /* RPC_RESET# */
+ [19] = RCAR_GP_PIN(2, 12), /* RPC_INT# */
+ [20] = RCAR_GP_PIN(2, 11), /* QSPI1_SSL */
+ [21] = RCAR_GP_PIN(2, 10), /* QSPI1_IO3 */
+ [22] = RCAR_GP_PIN(2, 9), /* QSPI1_IO2 */
+ [23] = RCAR_GP_PIN(2, 8), /* QSPI1_MISO/IO1 */
+ [24] = RCAR_GP_PIN(2, 7), /* QSPI1_MOSI/IO0 */
+ [25] = RCAR_GP_PIN(2, 6), /* QSPI1_SPCLK */
+ [26] = RCAR_GP_PIN(2, 5), /* QSPI0_SSL */
+ [27] = RCAR_GP_PIN(2, 4), /* QSPI0_IO3 */
+ [28] = RCAR_GP_PIN(2, 3), /* QSPI0_IO2 */
+ [29] = RCAR_GP_PIN(2, 2), /* QSPI0_MISO/IO1 */
+ [30] = RCAR_GP_PIN(2, 1), /* QSPI0_MOSI/IO0 */
+ [31] = RCAR_GP_PIN(2, 0), /* QSPI0_SPCLK */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [0] = RCAR_GP_PIN(0, 4), /* D4 */
+ [1] = RCAR_GP_PIN(0, 3), /* D3 */
+ [2] = RCAR_GP_PIN(0, 2), /* D2 */
+ [3] = RCAR_GP_PIN(0, 1), /* D1 */
+ [4] = RCAR_GP_PIN(0, 0), /* D0 */
+ [5] = RCAR_GP_PIN(1, 22), /* WE0# */
+ [6] = RCAR_GP_PIN(1, 21), /* CS0# */
+ [7] = RCAR_GP_PIN(1, 20), /* CLKOUT */
+ [8] = RCAR_GP_PIN(1, 19), /* A19 */
+ [9] = RCAR_GP_PIN(1, 18), /* A18 */
+ [10] = RCAR_GP_PIN(1, 17), /* A17 */
+ [11] = RCAR_GP_PIN(1, 16), /* A16 */
+ [12] = RCAR_GP_PIN(1, 15), /* A15 */
+ [13] = RCAR_GP_PIN(1, 14), /* A14 */
+ [14] = RCAR_GP_PIN(1, 13), /* A13 */
+ [15] = RCAR_GP_PIN(1, 12), /* A12 */
+ [16] = RCAR_GP_PIN(1, 11), /* A11 */
+ [17] = RCAR_GP_PIN(1, 10), /* A10 */
+ [18] = RCAR_GP_PIN(1, 9), /* A9 */
+ [19] = RCAR_GP_PIN(1, 8), /* A8 */
+ [20] = RCAR_GP_PIN(1, 7), /* A7 */
+ [21] = RCAR_GP_PIN(1, 6), /* A6 */
+ [22] = RCAR_GP_PIN(1, 5), /* A5 */
+ [23] = RCAR_GP_PIN(1, 4), /* A4 */
+ [24] = RCAR_GP_PIN(1, 3), /* A3 */
+ [25] = RCAR_GP_PIN(1, 2), /* A2 */
+ [26] = RCAR_GP_PIN(1, 1), /* A1 */
+ [27] = RCAR_GP_PIN(1, 0), /* A0 */
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = RCAR_GP_PIN(2, 25), /* PUEN_EX_WAIT0 */
+ [31] = RCAR_GP_PIN(2, 24), /* PUEN_RD/WR# */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [0] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [1] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [2] = PIN_NUMBER('H', 1), /* ASEBRK */
+ [3] = PIN_NONE,
+ [4] = PIN_NUMBER('G', 2), /* TDI */
+ [5] = PIN_NUMBER('F', 3), /* TMS */
+ [6] = PIN_NUMBER('F', 4), /* TCK */
+ [7] = PIN_NUMBER('F', 1), /* TRST# */
+ [8] = PIN_NONE,
+ [9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NUMBER('G', 3), /* FSCLKST# */
+ [16] = RCAR_GP_PIN(0, 17), /* SDA4 */
+ [17] = RCAR_GP_PIN(0, 16), /* SCL4 */
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_A_NUMBER('D', 3), /* PRESETOUT# */
+ [21] = RCAR_GP_PIN(0, 15), /* D15 */
+ [22] = RCAR_GP_PIN(0, 14), /* D14 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 12), /* D12 */
+ [25] = RCAR_GP_PIN(0, 11), /* D11 */
+ [26] = RCAR_GP_PIN(0, 10), /* D10 */
+ [27] = RCAR_GP_PIN(0, 9), /* D9 */
+ [28] = RCAR_GP_PIN(0, 8), /* D8 */
+ [29] = RCAR_GP_PIN(0, 7), /* D7 */
+ [30] = RCAR_GP_PIN(0, 6), /* D6 */
+ [31] = RCAR_GP_PIN(0, 5), /* D5 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [0] = RCAR_GP_PIN(5, 0), /* SCK0_A */
+ [1] = RCAR_GP_PIN(5, 4), /* RTS0#/TANS_A */
+ [2] = RCAR_GP_PIN(5, 3), /* CTS0#_A */
+ [3] = RCAR_GP_PIN(5, 2), /* TX0_A */
+ [4] = RCAR_GP_PIN(5, 1), /* RX0_A */
+ [5] = PIN_NONE,
+ [6] = PIN_NONE,
+ [7] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [8] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [11] = RCAR_GP_PIN(4, 10), /* SD3_DS */
+ [12] = RCAR_GP_PIN(4, 9), /* SD3_DAT7 */
+ [13] = RCAR_GP_PIN(4, 8), /* SD3_DAT6 */
+ [14] = RCAR_GP_PIN(4, 7), /* SD3_DAT5 */
+ [15] = RCAR_GP_PIN(4, 6), /* SD3_DAT4 */
+ [16] = RCAR_GP_PIN(4, 5), /* SD3_DAT3 */
+ [17] = RCAR_GP_PIN(4, 4), /* SD3_DAT2 */
+ [18] = RCAR_GP_PIN(4, 3), /* SD3_DAT1 */
+ [19] = RCAR_GP_PIN(4, 2), /* SD3_DAT0 */
+ [20] = RCAR_GP_PIN(4, 1), /* SD3_CMD */
+ [21] = RCAR_GP_PIN(4, 0), /* SD3_CLK */
+ [22] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [23] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [24] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [25] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [26] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [27] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [28] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [29] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [30] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [31] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [0] = RCAR_GP_PIN(6, 8), /* AUDIO_CLKA */
+ [1] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [2] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [3] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [4] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [5] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [6] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [7] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [8] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [9] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [10] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2 */
+ [12] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1 */
+ [13] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [14] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [15] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [16] = PIN_NUMBER('T', 21), /* MLB_REF */
+ [17] = RCAR_GP_PIN(5, 19), /* MLB_DAT */
+ [18] = RCAR_GP_PIN(5, 18), /* MLB_SIG */
+ [19] = RCAR_GP_PIN(5, 17), /* MLB_CLK */
+ [20] = RCAR_GP_PIN(5, 16), /* SSI_SDATA9 */
+ [21] = RCAR_GP_PIN(5, 15), /* MSIOF0_SS2 */
+ [22] = RCAR_GP_PIN(5, 14), /* MSIOF0_SS1 */
+ [23] = RCAR_GP_PIN(5, 13), /* MSIOF0_SYNC */
+ [24] = RCAR_GP_PIN(5, 12), /* MSIOF0_TXD */
+ [25] = RCAR_GP_PIN(5, 11), /* MSIOF0_RXD */
+ [26] = RCAR_GP_PIN(5, 10), /* MSIOF0_SCK */
+ [27] = RCAR_GP_PIN(5, 9), /* RX2_A */
+ [28] = RCAR_GP_PIN(5, 8), /* TX2_A */
+ [29] = RCAR_GP_PIN(5, 7), /* SCK2_A */
+ [30] = RCAR_GP_PIN(5, 6), /* TX1 */
+ [31] = RCAR_GP_PIN(5, 5), /* RX1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [0] = PIN_NONE,
+ [1] = PIN_NONE,
+ [2] = PIN_NONE,
+ [3] = PIN_NONE,
+ [4] = PIN_NONE,
+ [5] = PIN_NONE,
+ [6] = PIN_NONE,
+ [7] = PIN_NONE,
+ [8] = PIN_NONE,
+ [9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = RCAR_GP_PIN(6, 9), /* PUEN_USB30_OVC */
+ [31] = RCAR_GP_PIN(6, 17), /* PUEN_USB30_PWEN */
+ } },
+ { /* sentinel */ },
+};
+
+static unsigned int r8a77990_pinmux_get_bias(struct sh_pfc *pfc,
+ unsigned int pin)
+{
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
+
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
+ return PIN_CONFIG_BIAS_DISABLE;
+
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
+ return PIN_CONFIG_BIAS_DISABLE;
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
+ return PIN_CONFIG_BIAS_PULL_UP;
+ else
+ return PIN_CONFIG_BIAS_PULL_DOWN;
+}
+
+static void r8a77990_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
+ unsigned int bias)
+{
+ const struct pinmux_bias_reg *reg;
+ u32 enable, updown;
+ unsigned int bit;
+
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
+ return;
+
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
+ if (bias != PIN_CONFIG_BIAS_DISABLE)
+ enable |= BIT(bit);
+
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
+ if (bias == PIN_CONFIG_BIAS_PULL_UP)
+ updown |= BIT(bit);
+
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
+}
+
+static const struct sh_pfc_soc_operations r8a77990_pinmux_ops = {
+ .get_bias = r8a77990_pinmux_get_bias,
+ .set_bias = r8a77990_pinmux_set_bias,
+};
+
+const struct sh_pfc_soc_info r8a77990_pinmux_info = {
+ .name = "r8a77990_pfc",
+ .ops = &r8a77990_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 7fad897cd9f55..3d0b31636d6d8 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -274,6 +274,7 @@ extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
extern const struct sh_pfc_soc_info r8a7743_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77470_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
@@ -287,6 +288,7 @@ extern const struct sh_pfc_soc_info r8a7796_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
extern const struct sh_pfc_soc_info r8a77970_pinmux_info;
extern const struct sh_pfc_soc_info r8a77980_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77990_pinmux_info;
extern const struct sh_pfc_soc_info r8a77995_pinmux_info;
extern const struct sh_pfc_soc_info sh7203_pinmux_info;
extern const struct sh_pfc_soc_info sh7264_pinmux_info;
@@ -415,9 +417,13 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
PORT_GP_CFG_1(bank, 9, fn, sfx, cfg)
#define PORT_GP_10(bank, fn, sfx) PORT_GP_CFG_10(bank, fn, sfx, 0)
-#define PORT_GP_CFG_12(bank, fn, sfx, cfg) \
+#define PORT_GP_CFG_11(bank, fn, sfx, cfg) \
PORT_GP_CFG_10(bank, fn, sfx, cfg), \
- PORT_GP_CFG_1(bank, 10, fn, sfx, cfg), \
+ PORT_GP_CFG_1(bank, 10, fn, sfx, cfg)
+#define PORT_GP_11(bank, fn, sfx) PORT_GP_CFG_11(bank, fn, sfx, 0)
+
+#define PORT_GP_CFG_12(bank, fn, sfx, cfg) \
+ PORT_GP_CFG_11(bank, fn, sfx, cfg), \
PORT_GP_CFG_1(bank, 11, fn, sfx, cfg)
#define PORT_GP_12(bank, fn, sfx) PORT_GP_CFG_12(bank, fn, sfx, 0)
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 5de1f63b07bb8..95282cda6ceea 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -81,4 +81,8 @@ config PINCTRL_SUN50I_H6
def_bool ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
+config PINCTRL_SUN50I_H6_R
+ def_bool ARM64 && ARCH_SUNXI
+ select PINCTRL_SUNXI
+
endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 3c4aec6611e98..adb8443aa55c2 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -19,5 +19,6 @@ obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
obj-$(CONFIG_PINCTRL_SUN50I_H5) += pinctrl-sun50i-h5.o
obj-$(CONFIG_PINCTRL_SUN50I_H6) += pinctrl-sun50i-h6.o
+obj-$(CONFIG_PINCTRL_SUN50I_H6_R) += pinctrl-sun50i-h6-r.o
obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
obj-$(CONFIG_PINCTRL_SUN9I_A80_R) += pinctrl-sun9i-a80-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
new file mode 100644
index 0000000000000..4557e18d59899
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner H6 R_PIO pin controller driver
+ *
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on pinctrl-sun6i-a31-r.c, which is:
+ * Copyright (C) 2014 Boris Brezillon
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/reset.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun50i_h6_r_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_i2c"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PL_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_i2c"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PL_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PL_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PL_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PL_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PL_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PL_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PL_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_pwm"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PL_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_cir_rx"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PL_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_w1"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PL_EINT10 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PM_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PM_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2), /* PM_EINT2 */
+ SUNXI_FUNCTION(0x3, "1wire")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PM_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(M, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PM_EINT4 */
+};
+
+static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = {
+ .pins = sun50i_h6_r_pins,
+ .npins = ARRAY_SIZE(sun50i_h6_r_pins),
+ .pin_base = PL_BASE,
+ .irq_banks = 2,
+};
+
+static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun50i_h6_r_pinctrl_data);
+}
+
+static const struct of_device_id sun50i_h6_r_pinctrl_match[] = {
+ { .compatible = "allwinner,sun50i-h6-r-pinctrl", },
+ {}
+};
+
+static struct platform_driver sun50i_h6_r_pinctrl_driver = {
+ .probe = sun50i_h6_r_pinctrl_probe,
+ .driver = {
+ .name = "sun50i-h6-r-pinctrl",
+ .of_match_table = sun50i_h6_r_pinctrl_match,
+ },
+};
+builtin_platform_driver(sun50i_h6_r_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 72c718e66ebb1..49c7c1499bc3c 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -33,17 +33,6 @@
#include "../pinctrl-utils.h"
#include "pinctrl-tegra.h"
-struct tegra_pmx {
- struct device *dev;
- struct pinctrl_dev *pctl;
-
- const struct tegra_pinctrl_soc_data *soc;
- const char **group_pins;
-
- int nbanks;
- void __iomem **regs;
-};
-
static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
{
return readl(pmx->regs[bank] + reg);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index 33b17cb1471e5..aa33c20766c4a 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -16,6 +16,17 @@
#ifndef __PINMUX_TEGRA_H__
#define __PINMUX_TEGRA_H__
+struct tegra_pmx {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+
+ const struct tegra_pinctrl_soc_data *soc;
+ const char **group_pins;
+
+ int nbanks;
+ void __iomem **regs;
+};
+
enum tegra_pinconf_param {
/* argument: tegra_pinconf_pull */
TEGRA_PINCONF_PARAM_PULL,
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index 7e38ee9bae785..b6dd939d32cc4 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
@@ -19,6 +19,7 @@
* more details.
*/
+#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -2231,9 +2232,36 @@ static const struct tegra_pinctrl_soc_data tegra20_pinctrl = {
.drvtype_in_mux = false,
};
+static const char *cdev1_parents[] = {
+ "dev1_osc_div", "pll_a_out0", "pll_m_out1", "audio",
+};
+
+static const char *cdev2_parents[] = {
+ "dev2_osc_div", "hclk", "pclk", "pll_p_out4",
+};
+
+static void tegra20_pinctrl_register_clock_muxes(struct platform_device *pdev)
+{
+ struct tegra_pmx *pmx = platform_get_drvdata(pdev);
+
+ clk_register_mux(NULL, "cdev1_mux", cdev1_parents, 4, 0,
+ pmx->regs[1] + 0x8, 2, 2, CLK_MUX_READ_ONLY, NULL);
+
+ clk_register_mux(NULL, "cdev2_mux", cdev2_parents, 4, 0,
+ pmx->regs[1] + 0x8, 4, 2, CLK_MUX_READ_ONLY, NULL);
+}
+
static int tegra20_pinctrl_probe(struct platform_device *pdev)
{
- return tegra_pinctrl_probe(pdev, &tegra20_pinctrl);
+ int err;
+
+ err = tegra_pinctrl_probe(pdev, &tegra20_pinctrl);
+ if (err)
+ return err;
+
+ tegra20_pinctrl_register_clock_muxes(pdev);
+
+ return 0;
}
static const struct of_device_id tegra20_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 0976fbfecd503..58825f68b58ba 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -481,6 +481,31 @@ static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned ether_rmii_pins[] = {6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17};
static const int ether_rmii_muxvals[] = {4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4};
+static const unsigned hscin0_ci_pins[] = {102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112};
+static const int hscin0_ci_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscin0_p_pins[] = {102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112};
+static const int hscin0_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscin0_s_pins[] = {116, 117, 118, 119};
+static const int hscin0_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin1_p_pins[] = {124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134};
+static const int hscin1_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscin1_s_pins[] = {120, 121, 122, 123};
+static const int hscin1_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin2_s_pins[] = {124, 125, 126, 127};
+static const int hscin2_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscout0_ci_pins[] = {113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123};
+static const int hscout0_ci_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscout0_p_pins[] = {113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123};
+static const int hscout0_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscout0_s_pins[] = {116, 117, 118, 119};
+static const int hscout0_s_muxvals[] = {4, 4, 4, 4};
+static const unsigned hscout1_s_pins[] = {120, 121, 122, 123};
+static const int hscout1_s_muxvals[] = {4, 4, 4, 4};
static const unsigned i2c0_pins[] = {63, 64};
static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {65, 66};
@@ -556,6 +581,16 @@ static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = {
UNIPHIER_PINCTRL_GROUP(emmc),
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
UNIPHIER_PINCTRL_GROUP(ether_rmii),
+ UNIPHIER_PINCTRL_GROUP(hscin0_ci),
+ UNIPHIER_PINCTRL_GROUP(hscin0_p),
+ UNIPHIER_PINCTRL_GROUP(hscin0_s),
+ UNIPHIER_PINCTRL_GROUP(hscin1_p),
+ UNIPHIER_PINCTRL_GROUP(hscin1_s),
+ UNIPHIER_PINCTRL_GROUP(hscin2_s),
+ UNIPHIER_PINCTRL_GROUP(hscout0_ci),
+ UNIPHIER_PINCTRL_GROUP(hscout0_p),
+ UNIPHIER_PINCTRL_GROUP(hscout0_s),
+ UNIPHIER_PINCTRL_GROUP(hscout1_s),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c3),
@@ -583,6 +618,15 @@ static const char * const aout1_groups[] = {"aout1"};
static const char * const aoutiec1_groups[] = {"aoutiec1"};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
static const char * const ether_rmii_groups[] = {"ether_rmii"};
+static const char * const hscin0_groups[] = {"hscin0_ci",
+ "hscin0_p",
+ "hscin0_s"};
+static const char * const hscin1_groups[] = {"hscin1_p", "hscin1_s"};
+static const char * const hscin2_groups[] = {"hscin2_s"};
+static const char * const hscout0_groups[] = {"hscout0_ci",
+ "hscout0_p",
+ "hscout0_s"};
+static const char * const hscout1_groups[] = {"hscout1_s"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c3_groups[] = {"i2c3"};
@@ -603,6 +647,11 @@ static const struct uniphier_pinmux_function uniphier_ld11_functions[] = {
UNIPHIER_PINMUX_FUNCTION(aoutiec1),
UNIPHIER_PINMUX_FUNCTION(emmc),
UNIPHIER_PINMUX_FUNCTION(ether_rmii),
+ UNIPHIER_PINMUX_FUNCTION(hscin0),
+ UNIPHIER_PINMUX_FUNCTION(hscin1),
+ UNIPHIER_PINMUX_FUNCTION(hscin2),
+ UNIPHIER_PINMUX_FUNCTION(hscout0),
+ UNIPHIER_PINMUX_FUNCTION(hscout1),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c3),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index bf8f0c3bea5e2..9f449b35e300a 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -566,6 +566,33 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
41, 42, 45};
static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscin0_ci_pins[] = {102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112};
+static const int hscin0_ci_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscin0_p_pins[] = {102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112};
+static const int hscin0_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscin0_s_pins[] = {116, 117, 118, 119};
+static const int hscin0_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin1_p_pins[] = {124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134};
+static const int hscin1_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscin1_s_pins[] = {120, 121, 122, 123};
+static const int hscin1_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin2_s_pins[] = {124, 125, 126, 127};
+static const int hscin2_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscin3_s_pins[] = {129, 130, 131, 132};
+static const int hscin3_s_muxvals[] = {3, 3, 3, 3};
+static const unsigned hscout0_ci_pins[] = {113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123};
+static const int hscout0_ci_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned hscout0_p_pins[] = {113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123};
+static const int hscout0_p_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned hscout0_s_pins[] = {116, 117, 118, 119};
+static const int hscout0_s_muxvals[] = {4, 4, 4, 4};
+static const unsigned hscout1_s_pins[] = {120, 121, 122, 123};
+static const int hscout1_s_muxvals[] = {4, 4, 4, 4};
static const unsigned i2c0_pins[] = {63, 64};
static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {65, 66};
@@ -641,6 +668,17 @@ static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = {
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
UNIPHIER_PINCTRL_GROUP(ether_rgmii),
UNIPHIER_PINCTRL_GROUP(ether_rmii),
+ UNIPHIER_PINCTRL_GROUP(hscin0_ci),
+ UNIPHIER_PINCTRL_GROUP(hscin0_p),
+ UNIPHIER_PINCTRL_GROUP(hscin0_s),
+ UNIPHIER_PINCTRL_GROUP(hscin1_p),
+ UNIPHIER_PINCTRL_GROUP(hscin1_s),
+ UNIPHIER_PINCTRL_GROUP(hscin2_s),
+ UNIPHIER_PINCTRL_GROUP(hscin3_s),
+ UNIPHIER_PINCTRL_GROUP(hscout0_ci),
+ UNIPHIER_PINCTRL_GROUP(hscout0_p),
+ UNIPHIER_PINCTRL_GROUP(hscout0_s),
+ UNIPHIER_PINCTRL_GROUP(hscout1_s),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c3),
@@ -668,6 +706,16 @@ static const char * const aoutiec1_groups[] = {"aoutiec1"};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
static const char * const ether_rgmii_groups[] = {"ether_rgmii"};
static const char * const ether_rmii_groups[] = {"ether_rmii"};
+static const char * const hscin0_groups[] = {"hscin0_ci",
+ "hscin0_p",
+ "hscin0_s"};
+static const char * const hscin1_groups[] = {"hscin1_p", "hscin1_s"};
+static const char * const hscin2_groups[] = {"hscin2_s"};
+static const char * const hscin3_groups[] = {"hscin3_s"};
+static const char * const hscout0_groups[] = {"hscout0_ci",
+ "hscout0_p",
+ "hscout0_s"};
+static const char * const hscout1_groups[] = {"hscout1_s"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c3_groups[] = {"i2c3"};
@@ -691,6 +739,12 @@ static const struct uniphier_pinmux_function uniphier_ld20_functions[] = {
UNIPHIER_PINMUX_FUNCTION(emmc),
UNIPHIER_PINMUX_FUNCTION(ether_rgmii),
UNIPHIER_PINMUX_FUNCTION(ether_rmii),
+ UNIPHIER_PINMUX_FUNCTION(hscin0),
+ UNIPHIER_PINMUX_FUNCTION(hscin1),
+ UNIPHIER_PINMUX_FUNCTION(hscin2),
+ UNIPHIER_PINMUX_FUNCTION(hscin3),
+ UNIPHIER_PINMUX_FUNCTION(hscout0),
+ UNIPHIER_PINMUX_FUNCTION(hscout1),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c3),
diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
index 4878a67a844c3..604fe781c4658 100644
--- a/include/dt-bindings/pinctrl/mt7623-pinfunc.h
+++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
@@ -23,20 +23,26 @@
#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_PWRAP_SPICK2_I (MTK_PIN_NO(5) | 1)
+#define MT7623_PIN_5_PWRAP_SPI0_CK2_FUNC_ANT_SEL1 (MTK_PIN_NO(5) | 5)
#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_PWRAP_SPICS2_B_I (MTK_PIN_NO(6) | 1)
+#define MT7623_PIN_6_PWRAP_SPI0_CSN2_FUNC_ANT_SEL0 (MTK_PIN_NO(6) | 5)
#define MT7623_PIN_7_SPI1_CSN_FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
#define MT7623_PIN_7_SPI1_CSN_FUNC_SPI1_CS (MTK_PIN_NO(7) | 1)
+#define MT7623_PIN_7_SPI1_CSN_FUNC_KCOL0 (MTK_PIN_NO(7) | 4)
#define MT7623_PIN_8_SPI1_MI_FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MI (MTK_PIN_NO(8) | 1)
#define MT7623_PIN_8_SPI1_MI_FUNC_SPI1_MO (MTK_PIN_NO(8) | 2)
+#define MT7623_PIN_8_SPI1_MI_FUNC_KCOL1 (MTK_PIN_NO(8) | 4)
#define MT7623_PIN_9_SPI1_MO_FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MO (MTK_PIN_NO(9) | 1)
#define MT7623_PIN_9_SPI1_MO_FUNC_SPI1_MI (MTK_PIN_NO(9) | 2)
+#define MT7623_PIN_9_SPI1_MO_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(9) | 3)
+#define MT7623_PIN_9_SPI1_MO_FUNC_KCOL2 (MTK_PIN_NO(9) | 4)
#define MT7623_PIN_10_RTC32K_CK_FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
#define MT7623_PIN_10_RTC32K_CK_FUNC_RTC32K_CK (MTK_PIN_NO(10) | 1)
@@ -53,6 +59,7 @@
#define MT7623_PIN_14_GPIO14_FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
#define MT7623_PIN_14_GPIO14_FUNC_URXD2 (MTK_PIN_NO(14) | 1)
#define MT7623_PIN_14_GPIO14_FUNC_UTXD2 (MTK_PIN_NO(14) | 2)
+#define MT7623_PIN_14_GPIO14_FUNC_SRCCLKENAI2 (MTK_PIN_NO(14) | 5)
#define MT7623_PIN_15_GPIO15_FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
#define MT7623_PIN_15_GPIO15_FUNC_UTXD2 (MTK_PIN_NO(15) | 1)
@@ -60,88 +67,139 @@
#define MT7623_PIN_18_PCM_CLK_FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
#define MT7623_PIN_18_PCM_CLK_FUNC_PCM_CLK0 (MTK_PIN_NO(18) | 1)
+#define MT7623_PIN_18_PCM_CLK_FUNC_MRG_CLK (MTK_PIN_NO(18) | 2)
+#define MT7623_PIN_18_PCM_CLK_FUNC_MM_TEST_CK (MTK_PIN_NO(18) | 4)
+#define MT7623_PIN_18_PCM_CLK_FUNC_CONN_DSP_JCK (MTK_PIN_NO(18) | 5)
#define MT7623_PIN_18_PCM_CLK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(18) | 6)
#define MT7623_PIN_19_PCM_SYNC_FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
#define MT7623_PIN_19_PCM_SYNC_FUNC_PCM_SYNC (MTK_PIN_NO(19) | 1)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_MRG_SYNC (MTK_PIN_NO(19) | 2)
+#define MT7623_PIN_19_PCM_SYNC_FUNC_CONN_DSP_JINTP (MTK_PIN_NO(19) | 5)
#define MT7623_PIN_19_PCM_SYNC_FUNC_AP_PCM_SYNC (MTK_PIN_NO(19) | 6)
#define MT7623_PIN_20_PCM_RX_FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
#define MT7623_PIN_20_PCM_RX_FUNC_PCM_RX (MTK_PIN_NO(20) | 1)
+#define MT7623_PIN_20_PCM_RX_FUNC_MRG_RX (MTK_PIN_NO(20) | 2)
+#define MT7623_PIN_20_PCM_RX_FUNC_MRG_TX (MTK_PIN_NO(20) | 3)
#define MT7623_PIN_20_PCM_RX_FUNC_PCM_TX (MTK_PIN_NO(20) | 4)
+#define MT7623_PIN_20_PCM_RX_FUNC_CONN_DSP_JDI (MTK_PIN_NO(20) | 5)
#define MT7623_PIN_20_PCM_RX_FUNC_AP_PCM_RX (MTK_PIN_NO(20) | 6)
#define MT7623_PIN_21_PCM_TX_FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
#define MT7623_PIN_21_PCM_TX_FUNC_PCM_TX (MTK_PIN_NO(21) | 1)
+#define MT7623_PIN_21_PCM_TX_FUNC_MRG_TX (MTK_PIN_NO(21) | 2)
+#define MT7623_PIN_21_PCM_TX_FUNC_MRG_RX (MTK_PIN_NO(21) | 3)
#define MT7623_PIN_21_PCM_TX_FUNC_PCM_RX (MTK_PIN_NO(21) | 4)
+#define MT7623_PIN_21_PCM_TX_FUNC_CONN_DSP_JMS (MTK_PIN_NO(21) | 5)
#define MT7623_PIN_21_PCM_TX_FUNC_AP_PCM_TX (MTK_PIN_NO(21) | 6)
#define MT7623_PIN_22_EINT0_FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
#define MT7623_PIN_22_EINT0_FUNC_UCTS0 (MTK_PIN_NO(22) | 1)
#define MT7623_PIN_22_EINT0_FUNC_PCIE0_PERST_N (MTK_PIN_NO(22) | 2)
+#define MT7623_PIN_22_EINT0_FUNC_KCOL3 (MTK_PIN_NO(22) | 3)
+#define MT7623_PIN_22_EINT0_FUNC_CONN_DSP_JDO (MTK_PIN_NO(22) | 4)
+#define MT7623_PIN_22_EINT0_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(22) | 5)
#define MT7623_PIN_23_EINT1_FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
#define MT7623_PIN_23_EINT1_FUNC_URTS0 (MTK_PIN_NO(23) | 1)
#define MT7623_PIN_23_EINT1_FUNC_PCIE1_PERST_N (MTK_PIN_NO(23) | 2)
+#define MT7623_PIN_23_EINT1_FUNC_KCOL2 (MTK_PIN_NO(23) | 3)
+#define MT7623_PIN_23_EINT1_FUNC_CONN_MCU_TDO (MTK_PIN_NO(23) | 4)
+#define MT7623_PIN_23_EINT1_FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5)
#define MT7623_PIN_24_EINT2_FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
#define MT7623_PIN_24_EINT2_FUNC_UCTS1 (MTK_PIN_NO(24) | 1)
#define MT7623_PIN_24_EINT2_FUNC_PCIE2_PERST_N (MTK_PIN_NO(24) | 2)
+#define MT7623_PIN_24_EINT2_FUNC_KCOL1 (MTK_PIN_NO(24) | 3)
+#define MT7623_PIN_24_EINT2_FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(24) | 4)
#define MT7623_PIN_25_EINT3_FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
#define MT7623_PIN_25_EINT3_FUNC_URTS1 (MTK_PIN_NO(25) | 1)
+#define MT7623_PIN_25_EINT3_FUNC_KCOL0 (MTK_PIN_NO(25) | 3)
+#define MT7623_PIN_25_EINT3_FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(25) | 4)
#define MT7623_PIN_26_EINT4_FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
#define MT7623_PIN_26_EINT4_FUNC_UCTS3 (MTK_PIN_NO(26) | 1)
+#define MT7623_PIN_26_EINT4_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(26) | 2)
+#define MT7623_PIN_26_EINT4_FUNC_KROW3 (MTK_PIN_NO(26) | 3)
+#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_TCK0 (MTK_PIN_NO(26) | 4)
+#define MT7623_PIN_26_EINT4_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(26) | 5)
#define MT7623_PIN_26_EINT4_FUNC_PCIE2_WAKE_N (MTK_PIN_NO(26) | 6)
#define MT7623_PIN_27_EINT5_FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
#define MT7623_PIN_27_EINT5_FUNC_URTS3 (MTK_PIN_NO(27) | 1)
+#define MT7623_PIN_27_EINT5_FUNC_IDDIG_P1 (MTK_PIN_NO(27) | 2)
+#define MT7623_PIN_27_EINT5_FUNC_KROW2 (MTK_PIN_NO(27) | 3)
+#define MT7623_PIN_27_EINT5_FUNC_CONN_MCU_TDI (MTK_PIN_NO(27) | 4)
#define MT7623_PIN_27_EINT5_FUNC_PCIE1_WAKE_N (MTK_PIN_NO(27) | 6)
#define MT7623_PIN_28_EINT6_FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
#define MT7623_PIN_28_EINT6_FUNC_DRV_VBUS (MTK_PIN_NO(28) | 1)
+#define MT7623_PIN_28_EINT6_FUNC_KROW1 (MTK_PIN_NO(28) | 3)
+#define MT7623_PIN_28_EINT6_FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(28) | 4)
#define MT7623_PIN_28_EINT6_FUNC_PCIE0_WAKE_N (MTK_PIN_NO(28) | 6)
#define MT7623_PIN_29_EINT7_FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
#define MT7623_PIN_29_EINT7_FUNC_IDDIG (MTK_PIN_NO(29) | 1)
#define MT7623_PIN_29_EINT7_FUNC_MSDC1_WP (MTK_PIN_NO(29) | 2)
+#define MT7623_PIN_29_EINT7_FUNC_KROW0 (MTK_PIN_NO(29) | 3)
+#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 4)
+#define MT7623_PIN_29_EINT7_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(29) | 5)
#define MT7623_PIN_29_EINT7_FUNC_PCIE2_PERST_N (MTK_PIN_NO(29) | 6)
#define MT7623_PIN_33_I2S1_DATA_FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA (MTK_PIN_NO(33) | 1)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_I2S1_DATA_BYPS (MTK_PIN_NO(33) | 2)
#define MT7623_PIN_33_I2S1_DATA_FUNC_PCM_TX (MTK_PIN_NO(33) | 3)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_IMG_TEST_CK (MTK_PIN_NO(33) | 4)
+#define MT7623_PIN_33_I2S1_DATA_FUNC_G1_RXD0 (MTK_PIN_NO(33) | 5)
#define MT7623_PIN_33_I2S1_DATA_FUNC_AP_PCM_TX (MTK_PIN_NO(33) | 6)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_I2S1_DATA_IN (MTK_PIN_NO(34) | 1)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_PCM_RX (MTK_PIN_NO(34) | 3)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_VDEC_TEST_CK (MTK_PIN_NO(34) | 4)
+#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_G1_RXD1 (MTK_PIN_NO(34) | 5)
#define MT7623_PIN_34_I2S1_DATA_IN_FUNC_AP_PCM_RX (MTK_PIN_NO(34) | 6)
#define MT7623_PIN_35_I2S1_BCK_FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
#define MT7623_PIN_35_I2S1_BCK_FUNC_I2S1_BCK (MTK_PIN_NO(35) | 1)
#define MT7623_PIN_35_I2S1_BCK_FUNC_PCM_CLK0 (MTK_PIN_NO(35) | 3)
+#define MT7623_PIN_35_I2S1_BCK_FUNC_G1_RXD2 (MTK_PIN_NO(35) | 5)
#define MT7623_PIN_35_I2S1_BCK_FUNC_AP_PCM_CLKO (MTK_PIN_NO(35) | 6)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_I2S1_LRCK (MTK_PIN_NO(36) | 1)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_PCM_SYNC (MTK_PIN_NO(36) | 3)
+#define MT7623_PIN_36_I2S1_LRCK_FUNC_G1_RXD3 (MTK_PIN_NO(36) | 5)
#define MT7623_PIN_36_I2S1_LRCK_FUNC_AP_PCM_SYNC (MTK_PIN_NO(36) | 6)
#define MT7623_PIN_37_I2S1_MCLK_FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
#define MT7623_PIN_37_I2S1_MCLK_FUNC_I2S1_MCLK (MTK_PIN_NO(37) | 1)
+#define MT7623_PIN_37_I2S1_MCLK_FUNC_G1_RXDV (MTK_PIN_NO(37) | 5)
#define MT7623_PIN_39_JTMS_FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
#define MT7623_PIN_39_JTMS_FUNC_JTMS (MTK_PIN_NO(39) | 1)
+#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_TMS (MTK_PIN_NO(39) | 2)
+#define MT7623_PIN_39_JTMS_FUNC_CONN_MCU_AICE_JMSC (MTK_PIN_NO(39) | 3)
+#define MT7623_PIN_39_JTMS_FUNC_DFD_TMS_XI (MTK_PIN_NO(39) | 4)
#define MT7623_PIN_40_JTCK_FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
#define MT7623_PIN_40_JTCK_FUNC_JTCK (MTK_PIN_NO(40) | 1)
+#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_TCK1 (MTK_PIN_NO(40) | 2)
+#define MT7623_PIN_40_JTCK_FUNC_CONN_MCU_AICE_JCKC (MTK_PIN_NO(40) | 3)
+#define MT7623_PIN_40_JTCK_FUNC_DFD_TCK_XI (MTK_PIN_NO(40) | 4)
#define MT7623_PIN_41_JTDI_FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
#define MT7623_PIN_41_JTDI_FUNC_JTDI (MTK_PIN_NO(41) | 1)
+#define MT7623_PIN_41_JTDI_FUNC_CONN_MCU_TDI (MTK_PIN_NO(41) | 2)
+#define MT7623_PIN_41_JTDI_FUNC_DFD_TDI_XI (MTK_PIN_NO(41) | 4)
#define MT7623_PIN_42_JTDO_FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
#define MT7623_PIN_42_JTDO_FUNC_JTDO (MTK_PIN_NO(42) | 1)
+#define MT7623_PIN_42_JTDO_FUNC_CONN_MCU_TDO (MTK_PIN_NO(42) | 2)
+#define MT7623_PIN_42_JTDO_FUNC_DFD_TDO (MTK_PIN_NO(42) | 4)
#define MT7623_PIN_43_NCLE_FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
#define MT7623_PIN_43_NCLE_FUNC_NCLE (MTK_PIN_NO(43) | 1)
@@ -160,31 +218,40 @@
#define MT7623_PIN_47_NREB_FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
#define MT7623_PIN_47_NREB_FUNC_NREB (MTK_PIN_NO(47) | 1)
+#define MT7623_PIN_47_NREB_FUNC_IDDIG_P1 (MTK_PIN_NO(47) | 2)
#define MT7623_PIN_48_NRNB_FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
#define MT7623_PIN_48_NRNB_FUNC_NRNB (MTK_PIN_NO(48) | 1)
+#define MT7623_PIN_48_NRNB_FUNC_DRV_VBUS_P1 (MTK_PIN_NO(48) | 2)
#define MT7623_PIN_49_I2S0_DATA_FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA (MTK_PIN_NO(49) | 1)
+#define MT7623_PIN_49_I2S0_DATA_FUNC_I2S0_DATA_BYPS (MTK_PIN_NO(49) | 2)
#define MT7623_PIN_49_I2S0_DATA_FUNC_PCM_TX (MTK_PIN_NO(49) | 3)
#define MT7623_PIN_49_I2S0_DATA_FUNC_AP_I2S_DO (MTK_PIN_NO(49) | 6)
#define MT7623_PIN_53_SPI0_CSN_FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
#define MT7623_PIN_53_SPI0_CSN_FUNC_SPI0_CS (MTK_PIN_NO(53) | 1)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_SPDIF (MTK_PIN_NO(53) | 3)
+#define MT7623_PIN_53_SPI0_CSN_FUNC_ADC_CK (MTK_PIN_NO(53) | 4)
#define MT7623_PIN_53_SPI0_CSN_FUNC_PWM1 (MTK_PIN_NO(53) | 5)
#define MT7623_PIN_54_SPI0_CK_FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
#define MT7623_PIN_54_SPI0_CK_FUNC_SPI0_CK (MTK_PIN_NO(54) | 1)
+#define MT7623_PIN_54_SPI0_CK_FUNC_SPDIF_IN1 (MTK_PIN_NO(54) | 3)
+#define MT7623_PIN_54_SPI0_CK_FUNC_ADC_DAT_IN (MTK_PIN_NO(54) | 4)
#define MT7623_PIN_55_SPI0_MI_FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MI (MTK_PIN_NO(55) | 1)
#define MT7623_PIN_55_SPI0_MI_FUNC_SPI0_MO (MTK_PIN_NO(55) | 2)
#define MT7623_PIN_55_SPI0_MI_FUNC_MSDC1_WP (MTK_PIN_NO(55) | 3)
+#define MT7623_PIN_55_SPI0_MI_FUNC_ADC_WS (MTK_PIN_NO(55) | 4)
#define MT7623_PIN_55_SPI0_MI_FUNC_PWM2 (MTK_PIN_NO(55) | 5)
#define MT7623_PIN_56_SPI0_MO_FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1)
#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2)
+#define MT7623_PIN_56_SPI0_MO_FUNC_SPDIF_IN0 (MTK_PIN_NO(56) | 3)
#define MT7623_PIN_57_SDA1_FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
#define MT7623_PIN_57_SDA1_FUNC_SDA1 (MTK_PIN_NO(57) | 1)
@@ -275,10 +342,23 @@
#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1)
+#define MT7623_PIN_83_LCM_RST_FUNC_VDAC_CK_XI (MTK_PIN_NO(83) | 2)
#define MT7623_PIN_84_DSI_TE_FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
#define MT7623_PIN_84_DSI_TE_FUNC_DSI_TE (MTK_PIN_NO(84) | 1)
+#define MT7623_PIN_91_MIPI_TDN3_FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT7623_PIN_91_MIPI_TDN3_FUNC_TDN3 (MTK_PIN_NO(91) | 1)
+
+#define MT7623_PIN_92_MIPI_TDP3_FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT7623_PIN_92_MIPI_TDP3_FUNC_TDP3 (MTK_PIN_NO(92) | 1)
+
+#define MT7623_PIN_93_MIPI_TDN2_FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT7623_PIN_93_MIPI_TDN2_FUNC_TDN2 (MTK_PIN_NO(93) | 1)
+
+#define MT7623_PIN_94_MIPI_TDP2_FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT7623_PIN_94_MIPI_TDP2_FUNC_TDP2 (MTK_PIN_NO(94) | 1)
+
#define MT7623_PIN_95_MIPI_TCN_FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
#define MT7623_PIN_95_MIPI_TCN_FUNC_TCN (MTK_PIN_NO(95) | 1)
@@ -300,20 +380,24 @@
#define MT7623_PIN_101_SPI2_CSN_FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
#define MT7623_PIN_101_SPI2_CSN_FUNC_SPI2_CS (MTK_PIN_NO(101) | 1)
#define MT7623_PIN_101_SPI2_CSN_FUNC_SCL3 (MTK_PIN_NO(101) | 3)
+#define MT7623_PIN_101_SPI2_CSN_FUNC_KROW0 (MTK_PIN_NO(101) | 4)
#define MT7623_PIN_102_SPI2_MI_FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MI (MTK_PIN_NO(102) | 1)
#define MT7623_PIN_102_SPI2_MI_FUNC_SPI2_MO (MTK_PIN_NO(102) | 2)
#define MT7623_PIN_102_SPI2_MI_FUNC_SDA3 (MTK_PIN_NO(102) | 3)
+#define MT7623_PIN_102_SPI2_MI_FUNC_KROW1 (MTK_PIN_NO(102) | 4)
#define MT7623_PIN_103_SPI2_MO_FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MO (MTK_PIN_NO(103) | 1)
#define MT7623_PIN_103_SPI2_MO_FUNC_SPI2_MI (MTK_PIN_NO(103) | 2)
#define MT7623_PIN_103_SPI2_MO_FUNC_SCL3 (MTK_PIN_NO(103) | 3)
+#define MT7623_PIN_103_SPI2_MO_FUNC_KROW2 (MTK_PIN_NO(103) | 4)
#define MT7623_PIN_104_SPI2_CK_FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
#define MT7623_PIN_104_SPI2_CK_FUNC_SPI2_CK (MTK_PIN_NO(104) | 1)
#define MT7623_PIN_104_SPI2_CK_FUNC_SDA3 (MTK_PIN_NO(104) | 3)
+#define MT7623_PIN_104_SPI2_CK_FUNC_KROW3 (MTK_PIN_NO(104) | 4)
#define MT7623_PIN_105_MSDC1_CMD_FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
#define MT7623_PIN_105_MSDC1_CMD_FUNC_MSDC1_CMD (MTK_PIN_NO(105) | 1)
@@ -394,7 +478,7 @@
#define MT7623_PIN_121_MSDC0_DAT0_FUNC_WATCHDOG (MTK_PIN_NO(121) | 5)
#define MT7623_PIN_122_GPIO122_FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
-#define MT7623_PIN_122_GPIO122_FUNC_TEST (MTK_PIN_NO(122) | 1)
+#define MT7623_PIN_122_GPIO122_FUNC_CEC (MTK_PIN_NO(122) | 1)
#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4)
#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5)
@@ -404,12 +488,12 @@
#define MT7623_PIN_123_HTPLG_FUNC_UTXD0 (MTK_PIN_NO(123) | 5)
#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
-#define MT7623_PIN_124_GPIO124_FUNC_TEST (MTK_PIN_NO(124) | 1)
+#define MT7623_PIN_124_GPIO124_FUNC_HDMISCK (MTK_PIN_NO(124) | 1)
#define MT7623_PIN_124_GPIO124_FUNC_SDA1 (MTK_PIN_NO(124) | 4)
#define MT7623_PIN_124_GPIO124_FUNC_PWM3 (MTK_PIN_NO(124) | 5)
#define MT7623_PIN_125_GPIO125_FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
-#define MT7623_PIN_125_GPIO125_FUNC_TEST (MTK_PIN_NO(125) | 1)
+#define MT7623_PIN_125_GPIO125_FUNC_HDMISD (MTK_PIN_NO(125) | 1)
#define MT7623_PIN_125_GPIO125_FUNC_SCL1 (MTK_PIN_NO(125) | 4)
#define MT7623_PIN_125_GPIO125_FUNC_PWM4 (MTK_PIN_NO(125) | 5)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fd0ea6af9e36c..8758a2a9e6c11 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -506,7 +506,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
-#define OSC_PCI_CONTROL_MASKS 0x0000001f
+#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
+#define OSC_PCI_CONTROL_MASKS 0x0000003f
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 8f87bbeceef41..514bffa11dbbd 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -14,6 +14,7 @@
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
+#define DPC_FATAL 3
struct pci_dev;
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index b454dfce60d90..4060004968c81 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: cfag12864b.h
* Version: 0.1.0
* Description: cfag12864b LCD driver header
- * License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-12
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _CFAG12864B_H_
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
index cb311798e0bc6..0738389b42b60 100644
--- a/include/linux/ks0108.h
+++ b/include/linux/ks0108.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: ks0108.h
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
- * License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _KS0108_H_
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0f006cf8343df..02f72ebf31a78 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1002,6 +1002,18 @@ enum mlx5_wol_mode {
MLX5_WOL_PHY_ACTIVITY = 1 << 7,
};
+enum mlx5_mpls_supported_fields {
+ MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
+ MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1,
+ MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
+ MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3
+};
+
+enum mlx5_flex_parser_protos {
+ MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
+ MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
+};
+
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 9f4d32e41c069..757b4a30281e6 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -160,6 +160,7 @@ struct mlx5_flow_act {
u32 modify_id;
uintptr_t esp_id;
struct mlx5_fs_vlan vlan;
+ struct ib_counters *counters;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
@@ -186,6 +187,9 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
+ u64 *packets, u64 *bytes);
+
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 8e0b8865f91e1..27134c4fcb76e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -299,9 +299,15 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
- u8 reserved_at_40[0x17];
+
+ u8 reserved_at_40[0x5];
+ u8 outer_first_mpls_over_udp[0x4];
+ u8 outer_first_mpls_over_gre[0x4];
+ u8 inner_first_mpls[0x4];
+ u8 outer_first_mpls[0x4];
+ u8 reserved_at_55[0x2];
u8 outer_esp_spi[0x1];
- u8 reserved_at_58[0x2];
+ u8 reserved_at_58[0x2];
u8 bth_dst_qp[0x1];
u8 reserved_at_5b[0x25];
@@ -435,6 +441,29 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_1a0[0x60];
};
+struct mlx5_ifc_fte_match_mpls_bits {
+ u8 mpls_label[0x14];
+ u8 mpls_exp[0x3];
+ u8 mpls_s_bos[0x1];
+ u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
+
+ u8 reserved_at_80[0x100];
+
+ u8 metadata_reg_a[0x20];
+
+ u8 reserved_at_1a0[0x60];
+};
+
struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20];
@@ -1097,9 +1126,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_500[0x20];
u8 num_of_uars_per_page[0x20];
- u8 reserved_at_540[0x40];
- u8 reserved_at_580[0x3d];
+ u8 flex_parser_protocols[0x20];
+ u8 reserved_at_560[0x20];
+
+ u8 reserved_at_580[0x3c];
+ u8 mini_cqe_resp_stride_index[0x1];
u8 cqe_128_always[0x1];
u8 cqe_compression_128[0x1];
u8 cqe_compression[0x1];
@@ -1159,7 +1191,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
- u8 reserved_at_600[0xa00];
+ struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
+
+ u8 reserved_at_800[0x800];
};
enum {
@@ -4568,6 +4602,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -6958,9 +6993,10 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_create_flow_group_in_bits {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f0f3905f8bb8c..4c3881b44ef12 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -386,7 +386,7 @@ enum page_entry_size {
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
- * to the functions called when a no-page or a wp-page exception occurs.
+ * to the functions called when a no-page or a wp-page exception occurs.
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
@@ -1276,10 +1276,10 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
-int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
+void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
+ unsigned long size);
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end);
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 091033a6b836f..e83d87fc5673d 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -13,9 +13,6 @@ struct device_node;
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_pci_get_devfn(struct device_node *np);
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
-int of_get_pci_domain_nr(struct device_node *node);
-int of_pci_get_max_link_speed(struct device_node *node);
void of_pci_check_probe_only(void);
int of_pci_map_rid(struct device_node *np, u32 rid,
const char *map_name, const char *map_mask_name,
@@ -32,18 +29,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
return -EINVAL;
}
-static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
-of_get_pci_domain_nr(struct device_node *node)
-{
- return -1;
-}
-
static inline int of_pci_map_rid(struct device_node *np, u32 rid,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
@@ -51,12 +36,6 @@ static inline int of_pci_map_rid(struct device_node *np, u32 rid,
return -EINVAL;
}
-static inline int
-of_pci_get_max_link_speed(struct device_node *node)
-{
- return -EINVAL;
-}
-
static inline void of_pci_check_probe_only(void) { }
#endif
@@ -70,17 +49,4 @@ of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
}
#endif
-#if defined(CONFIG_OF_ADDRESS)
-int of_pci_get_host_bridge_resources(struct device_node *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base);
-#else
-static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
-{
- return -EINVAL;
-}
-#endif
-
#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index baadad1aabbcc..29efa09d686b2 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -62,5 +62,6 @@ extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops);
+int pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index af657ca58b703..243eaa5a66ff3 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -90,8 +90,16 @@ struct pci_epc {
struct config_group *group;
/* spinlock to protect against concurrent access of EP controller */
spinlock_t lock;
+ unsigned int features;
};
+#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
+#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
+#define EPC_FEATURE_SET_BAR(features, bar) \
+ (features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
+#define EPC_FEATURE_GET_BAR(features) \
+ ((features & EPC_FEATURE_BAR_MASK) >> 1)
+
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
#define pci_epc_create(dev, ops) \
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index f7d6f4883f8b2..4e7764935fa85 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -72,7 +72,7 @@ struct pci_epf_ops {
* @driver: PCI EPF driver
* @ops: set of function pointers for performing EPF operations
* @owner: the owner of the module that registers the PCI EPF driver
- * @group: configfs group corresponding to the PCI EPF driver
+ * @epf_group: list of configfs group corresponding to the PCI EPF driver
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
@@ -82,7 +82,7 @@ struct pci_epf_driver {
struct device_driver driver;
struct pci_epf_ops *ops;
struct module *owner;
- struct config_group *group;
+ struct list_head epf_group;
const struct pci_epf_device_id *id_table;
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 55371cb827adc..340029b2fb382 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -217,6 +217,7 @@ enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
+ PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
};
/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
@@ -406,6 +407,9 @@ struct pci_dev {
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+ unsigned int broken_cmd_compl:1; /* No compl for some cmds */
+#endif
#ifdef CONFIG_PCIE_PTM
unsigned int ptm_root:1;
unsigned int ptm_enabled:1;
@@ -471,8 +475,10 @@ struct pci_host_bridge {
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
unsigned int native_aer:1; /* OS may use PCIe AER */
- unsigned int native_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
unsigned int native_pme:1; /* OS may use PCIe PME */
+ unsigned int native_ltr:1; /* OS may use PCIe LTR */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
const struct resource *res,
@@ -1079,8 +1085,6 @@ int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
-int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width);
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
@@ -1451,8 +1455,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
+extern bool pcie_ports_native;
#else
#define pcie_ports_disabled true
+#define pcie_ports_native false
#endif
#ifdef CONFIG_PCIEASPM
@@ -1479,6 +1485,8 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
+bool pci_ats_disabled(void);
+
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
void pci_ats_init(struct pci_dev *dev);
@@ -1510,12 +1518,10 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
-int pci_get_new_domain_nr(void);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
-static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#endif /* CONFIG_PCI_DOMAINS */
/*
@@ -1670,7 +1676,6 @@ static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
-static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
@@ -1954,6 +1959,7 @@ int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
+int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
@@ -1986,6 +1992,7 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{ return 0; }
static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
{ return 0; }
+#define pci_sriov_configure_simple NULL
static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
{ return 0; }
static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
@@ -2284,7 +2291,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 26213024e81b8..cf5e22103f684 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -162,8 +162,9 @@ struct hotplug_params {
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
-bool pciehp_is_native(struct pci_dev *pdev);
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
+bool pciehp_is_native(struct pci_dev *bridge);
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
+bool shpchp_is_native(struct pci_dev *bridge);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(acpi_handle handle);
#else
@@ -172,6 +173,17 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
{
return -ENODEV;
}
-static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; }
+
+static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge)
+{
+ return 0;
+}
+static inline bool pciehp_is_native(struct pci_dev *bridge) { return true; }
+static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; }
#endif
+
+static inline bool hotplug_is_native(struct pci_dev *bridge)
+{
+ return pciehp_is_native(bridge) || shpchp_is_native(bridge);
+}
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cc608fc553347..29502238e5107 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -561,6 +561,7 @@
#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
+#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
@@ -2119,6 +2120,8 @@
#define PCI_VENDOR_ID_MYRICOM 0x14c1
+#define PCI_VENDOR_ID_MEDIATEK 0x14c3
+
#define PCI_VENDOR_ID_TITAN 0x14D2
#define PCI_DEVICE_ID_TITAN_010L 0x8001
#define PCI_DEVICE_ID_TITAN_100L 0x8010
@@ -2387,6 +2390,8 @@
#define PCI_VENDOR_ID_LENOVO 0x17aa
+#define PCI_VENDOR_ID_QCOM 0x17cb
+
#define PCI_VENDOR_ID_CDNS 0x17cd
#define PCI_VENDOR_ID_ARECA 0x17d3
@@ -2552,6 +2557,8 @@
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
+#define PCI_VENDOR_ID_AMAZON 0x1d0f
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
@@ -2672,6 +2679,7 @@
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
+#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2776,6 +2784,7 @@
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
+#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 9c689868eb4d9..a0794632fd01a 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -298,30 +298,44 @@ TRACE_EVENT(non_standard_event,
TRACE_EVENT(aer_event,
TP_PROTO(const char *dev_name,
const u32 status,
- const u8 severity),
+ const u8 severity,
+ const u8 tlp_header_valid,
+ struct aer_header_log_regs *tlp),
- TP_ARGS(dev_name, status, severity),
+ TP_ARGS(dev_name, status, severity, tlp_header_valid, tlp),
TP_STRUCT__entry(
__string( dev_name, dev_name )
__field( u32, status )
__field( u8, severity )
+ __field( u8, tlp_header_valid)
+ __array( u32, tlp_header, 4 )
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->status = status;
__entry->severity = severity;
+ __entry->tlp_header_valid = tlp_header_valid;
+ if (tlp_header_valid) {
+ __entry->tlp_header[0] = tlp->dw0;
+ __entry->tlp_header[1] = tlp->dw1;
+ __entry->tlp_header[2] = tlp->dw2;
+ __entry->tlp_header[3] = tlp->dw3;
+ }
),
- TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
+ TP_printk("%s PCIe Bus Error: severity=%s, %s, TLP Header=%s\n",
__get_str(dev_name),
__entry->severity == AER_CORRECTABLE ? "Corrected" :
__entry->severity == AER_FATAL ?
"Fatal" : "Uncorrected, non-fatal",
__entry->severity == AER_CORRECTABLE ?
__print_flags(__entry->status, "|", aer_correctable_errors) :
- __print_flags(__entry->status, "|", aer_uncorrectable_errors))
+ __print_flags(__entry->status, "|", aer_uncorrectable_errors),
+ __entry->tlp_header_valid ?
+ __print_array(__entry->tlp_header, 4, 4) :
+ "Not available")
);
/*
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index a08cc72789802..c2c8b1fdeeadb 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -49,22 +49,6 @@
#include <net/ipv6.h>
#include <net/net_namespace.h>
-struct rdma_addr_client {
- atomic_t refcount;
- struct completion comp;
-};
-
-/**
- * rdma_addr_register_client - Register an address client.
- */
-void rdma_addr_register_client(struct rdma_addr_client *client);
-
-/**
- * rdma_addr_unregister_client - Deregister an address client.
- * @client: Client object to deregister.
- */
-void rdma_addr_unregister_client(struct rdma_addr_client *client);
-
/**
* struct rdma_dev_addr - Contains resolved RDMA hardware addresses
* @src_dev_addr: Source MAC address.
@@ -99,7 +83,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
/**
* rdma_resolve_ip - Resolve source and destination IP addresses to
* RDMA hardware addresses.
- * @client: Address client associated with request.
* @src_addr: An optional source address to use in the resolution. If a
* source address is not provided, a usable address will be returned via
* the callback.
@@ -112,8 +95,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
* or been canceled. A status of 0 indicates success.
* @context: User-specified context associated with the call.
*/
-int rdma_resolve_ip(struct rdma_addr_client *client,
- struct sockaddr *src_addr, struct sockaddr *dst_addr,
+int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index eb49cc8d1f95b..a5f2498281153 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -149,4 +149,5 @@ int ib_get_cached_port_state(struct ib_device *device,
u8 port_num,
enum ib_port_state *port_active);
+bool rdma_is_zero_gid(const union ib_gid *gid);
#endif /* _IB_CACHE_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9fc8a825aa28c..2043e1a8f851a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -861,6 +861,19 @@ enum ib_sig_err_type {
};
/**
+ * Signature check masks (8 bytes in total) according to the T10-PI standard:
+ * -------- -------- ------------
+ * | GUARD | APPTAG | REFTAG |
+ * | 2B | 2B | 4B |
+ * -------- -------- ------------
+ */
+enum {
+ IB_SIG_CHECK_GUARD = 0xc0,
+ IB_SIG_CHECK_APPTAG = 0x30,
+ IB_SIG_CHECK_REFTAG = 0x0f,
+};
+
+/**
* struct ib_sig_err - signature error descriptor
*/
struct ib_sig_err {
@@ -1852,14 +1865,17 @@ enum ib_flow_spec_type {
IB_FLOW_SPEC_TCP = 0x40,
IB_FLOW_SPEC_UDP = 0x41,
IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
+ IB_FLOW_SPEC_GRE = 0x51,
+ IB_FLOW_SPEC_MPLS = 0x60,
IB_FLOW_SPEC_INNER = 0x100,
/* Actions */
IB_FLOW_SPEC_ACTION_TAG = 0x1000,
IB_FLOW_SPEC_ACTION_DROP = 0x1001,
IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
+ IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
};
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
-#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
+#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
/* Flow steering rule priority is set according to it's domain.
* Lower domain value means higher priority.
@@ -1994,6 +2010,34 @@ struct ib_flow_spec_esp {
struct ib_flow_esp_filter mask;
};
+struct ib_flow_gre_filter {
+ __be16 c_ks_res0_ver;
+ __be16 protocol;
+ __be32 key;
+ /* Must be last */
+ u8 real_sz[0];
+};
+
+struct ib_flow_spec_gre {
+ u32 type;
+ u16 size;
+ struct ib_flow_gre_filter val;
+ struct ib_flow_gre_filter mask;
+};
+
+struct ib_flow_mpls_filter {
+ __be32 tag;
+ /* Must be last */
+ u8 real_sz[0];
+};
+
+struct ib_flow_spec_mpls {
+ u32 type;
+ u16 size;
+ struct ib_flow_mpls_filter val;
+ struct ib_flow_mpls_filter mask;
+};
+
struct ib_flow_spec_action_tag {
enum ib_flow_spec_type type;
u16 size;
@@ -2011,6 +2055,17 @@ struct ib_flow_spec_action_handle {
struct ib_flow_action *act;
};
+enum ib_counters_description {
+ IB_COUNTER_PACKETS,
+ IB_COUNTER_BYTES,
+};
+
+struct ib_flow_spec_action_count {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_counters *counters;
+};
+
union ib_flow_spec {
struct {
u32 type;
@@ -2023,9 +2078,12 @@ union ib_flow_spec {
struct ib_flow_spec_ipv6 ipv6;
struct ib_flow_spec_tunnel tunnel;
struct ib_flow_spec_esp esp;
+ struct ib_flow_spec_gre gre;
+ struct ib_flow_spec_mpls mpls;
struct ib_flow_spec_action_tag flow_tag;
struct ib_flow_spec_action_drop drop;
struct ib_flow_spec_action_handle action;
+ struct ib_flow_spec_action_count flow_count;
};
struct ib_flow_attr {
@@ -2180,6 +2238,24 @@ struct ib_port_pkey_list {
struct list_head pkey_list;
};
+struct ib_counters {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ /* num of objects attached */
+ atomic_t usecnt;
+};
+
+enum ib_read_counters_flags {
+ /* prefer read values from driver cache */
+ IB_READ_COUNTERS_ATTR_PREFER_CACHED = 1 << 0,
+};
+
+struct ib_counters_read_attr {
+ u64 *counters_buff;
+ u32 ncounters;
+ u32 flags; /* use enum ib_read_counters_flags */
+};
+
struct uverbs_attr_bundle;
struct ib_device {
@@ -2409,7 +2485,8 @@ struct ib_device {
struct ib_flow * (*create_flow)(struct ib_qp *qp,
struct ib_flow_attr
*flow_attr,
- int domain);
+ int domain,
+ struct ib_udata *udata);
int (*destroy_flow)(struct ib_flow *flow_id);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
@@ -2451,6 +2528,13 @@ struct ib_device {
struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
+ struct ib_counters * (*create_counters)(struct ib_device *device,
+ struct uverbs_attr_bundle *attrs);
+ int (*destroy_counters)(struct ib_counters *counters);
+ int (*read_counters)(struct ib_counters *counters,
+ struct ib_counters_read_attr *counters_read_attr,
+ struct uverbs_attr_bundle *attrs);
+
/**
* rdma netdev operation
*
@@ -3734,6 +3818,20 @@ static inline int ib_check_mr_access(int flags)
return 0;
}
+static inline bool ib_access_writable(int access_flags)
+{
+ /*
+ * We have writable memory backing the MR if any of the following
+ * access flags are set. "Local write" and "remote write" obviously
+ * require write access. "Remote atomic" can do things like fetch and
+ * add, which will modify memory, and "MW bind" can change permissions
+ * by binding a window.
+ */
+ return access_flags &
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
+}
+
/**
* ib_check_mr_status: lightweight check of MR status.
* This routine may provide status checks on a selected
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 690934733ba79..c5c1435c129ab 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -420,4 +420,7 @@ const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
union ib_gid *dgid);
+struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *cm_id);
+struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 3f4c187e435d6..e79229a0cf014 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -2,7 +2,7 @@
#define DEF_RDMA_VT_H
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -167,7 +167,6 @@ struct rvt_driver_params {
int qpn_res_end;
int nports;
int npkeys;
- char cq_name[RVT_CQN_MAX];
int node;
int psn_mask;
int psn_shift;
@@ -347,6 +346,9 @@ struct rvt_driver_provided {
/* Notify driver to restart rc */
void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
+
+ /* Get and return CPU to pin CQ processing thread */
+ int (*comp_vect_cpu_lookup)(struct rvt_dev_info *rdi, int comp_vect);
};
struct rvt_dev_info {
@@ -402,7 +404,6 @@ struct rvt_dev_info {
spinlock_t pending_lock; /* protect pending mmap list */
/* CQ */
- struct kthread_worker *worker; /* per device cq worker */
u32 n_cqs_allocated; /* number of CQs allocated for device */
spinlock_t n_cqs_lock; /* protect count of in use cqs */
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
index 51fd00b243d01..75dc65c0bfb83 100644
--- a/include/rdma/rdmavt_cq.h
+++ b/include/rdma/rdmavt_cq.h
@@ -8,7 +8,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -80,10 +80,11 @@ struct rvt_cq_wc {
*/
struct rvt_cq {
struct ib_cq ibcq;
- struct kthread_work comptask;
+ struct work_struct comptask;
spinlock_t lock; /* protect changes in this struct */
u8 notify;
u8 triggered;
+ int comp_vector_cpu;
struct rvt_dev_info *rdi;
struct rvt_cq_wc *queue;
struct rvt_mmap_info *ip;
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 89ab88c342b60..1145a4c154b24 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -663,6 +663,7 @@ static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
+int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
void rvt_comm_est(struct rvt_qp *qp);
int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index f3b3e3576f6a9..9654d33edd989 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
*/
@@ -12,6 +12,7 @@
#include <linux/kref.h>
#include <linux/completion.h>
#include <linux/sched/task.h>
+#include <uapi/rdma/rdma_netlink.h>
/**
* enum rdma_restrack_type - HW objects to track
@@ -44,6 +45,8 @@ enum rdma_restrack_type {
};
#define RDMA_RESTRACK_HASH_BITS 8
+struct rdma_restrack_entry;
+
/**
* struct rdma_restrack_root - main resource tracking management
* entity, per-device
@@ -57,6 +60,13 @@ struct rdma_restrack_root {
* @hash: global database for all resources per-device
*/
DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
+ /**
+ * @fill_res_entry: driver-specific fill function
+ *
+ * Allows rdma drivers to add their own restrack attributes.
+ */
+ int (*fill_res_entry)(struct sk_buff *msg,
+ struct rdma_restrack_entry *entry);
};
/**
@@ -174,4 +184,14 @@ static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res,
res->task = task;
}
+/*
+ * Helper functions for rdma drivers when filling out
+ * nldev driver attributes.
+ */
+int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value);
+int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
+ u32 value);
+int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value);
+int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name,
+ u64 value);
#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 095383a4bd1a6..bd6bba3a6e04a 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -420,6 +420,17 @@ static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_b
return attr->obj_attr.uobject->object;
}
+static inline struct ib_uobject *uverbs_attr_get_uobject(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+
+ if (IS_ERR(attr))
+ return ERR_CAST(attr);
+
+ return attr->obj_attr.uobject;
+}
+
static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, const void *from, size_t size)
{
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 04f9bd249094e..c35aee9ad4a6f 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -147,6 +147,7 @@
#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
+#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 83ade9b5cf950..4da87e2ef8a84 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -657,6 +657,11 @@
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+#define PCI_EXP_LNKCTL2_TLS 0x000f
+#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
+#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
@@ -983,6 +988,7 @@
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 83e3890eef20e..888ac5975a6c2 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -55,6 +55,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_WQ,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_OBJECT_DM,
+ UVERBS_OBJECT_COUNTERS,
};
enum {
@@ -131,4 +132,24 @@ enum uverbs_methods_mr {
UVERBS_METHOD_DM_MR_REG,
};
+enum uverbs_attrs_create_counters_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
+};
+
+enum uverbs_attrs_destroy_counters_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_COUNTERS_HANDLE,
+};
+
+enum uverbs_attrs_read_counters_cmd_attr_ids {
+ UVERBS_ATTR_READ_COUNTERS_HANDLE,
+ UVERBS_ATTR_READ_COUNTERS_BUFF,
+ UVERBS_ATTR_READ_COUNTERS_FLAGS,
+};
+
+enum uverbs_methods_actions_counters_ops {
+ UVERBS_METHOD_COUNTERS_CREATE,
+ UVERBS_METHOD_COUNTERS_DESTROY,
+ UVERBS_METHOD_COUNTERS_READ,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 6aeb03315b0bd..4f9991de8e3ad 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -998,6 +998,19 @@ struct ib_uverbs_flow_spec_action_handle {
__u32 reserved1;
};
+struct ib_uverbs_flow_spec_action_count {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 handle;
+ __u32 reserved1;
+};
+
struct ib_uverbs_flow_tunnel_filter {
__be32 tunnel_id;
};
@@ -1033,6 +1046,56 @@ struct ib_uverbs_flow_spec_esp {
struct ib_uverbs_flow_spec_esp_filter mask;
};
+struct ib_uverbs_flow_gre_filter {
+ /* c_ks_res0_ver field is bits 0-15 in offset 0 of a standard GRE header:
+ * bit 0 - C - checksum bit.
+ * bit 1 - reserved. set to 0.
+ * bit 2 - key bit.
+ * bit 3 - sequence number bit.
+ * bits 4:12 - reserved. set to 0.
+ * bits 13:15 - GRE version.
+ */
+ __be16 c_ks_res0_ver;
+ __be16 protocol;
+ __be32 key;
+};
+
+struct ib_uverbs_flow_spec_gre {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_gre_filter val;
+ struct ib_uverbs_flow_gre_filter mask;
+};
+
+struct ib_uverbs_flow_mpls_filter {
+ /* The field includes the entire MPLS label:
+ * bits 0:19 - label field.
+ * bits 20:22 - traffic class field.
+ * bits 23 - bottom of stack bit.
+ * bits 24:31 - ttl field.
+ */
+ __be32 label;
+};
+
+struct ib_uverbs_flow_spec_mpls {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_mpls_filter val;
+ struct ib_uverbs_flow_mpls_filter mask;
+};
+
struct ib_uverbs_flow_attr {
__u32 type;
__u16 size;
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index fdaf00e206498..8daec1fa49cf6 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <linux/if_ether.h> /* For ETH_ALEN. */
+#include <rdma/ib_user_ioctl_verbs.h>
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -163,7 +164,7 @@ struct mlx5_ib_rss_caps {
enum mlx5_ib_cqe_comp_res_format {
MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
- MLX5_IB_CQE_RES_RESERVED = 1 << 2,
+ MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
};
struct mlx5_ib_cqe_comp_caps {
@@ -233,7 +234,9 @@ enum mlx5_ib_query_dev_resp_flags {
enum mlx5_ib_tunnel_offloads {
MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
- MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
};
struct mlx5_ib_query_device_resp {
@@ -441,4 +444,27 @@ enum {
enum {
MLX5_IB_CLOCK_INFO_V1 = 0,
};
+
+struct mlx5_ib_flow_counters_desc {
+ __u32 description;
+ __u32 index;
+};
+
+struct mlx5_ib_flow_counters_data {
+ RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
+ __u32 ncounters;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_flow {
+ __u32 ncounters_data;
+ __u32 reserved;
+ /*
+ * Following are counters data based on ncounters_data, each
+ * entry in the data[] should match a corresponding counter object
+ * that was pointed by a counters spec upon the flow creation
+ */
+ struct mlx5_ib_flow_counters_data data[];
+};
+
#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 0ce0943fc8087..edba6351ac136 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -249,10 +249,22 @@ enum rdma_nldev_command {
RDMA_NLDEV_NUM_OPS
};
+enum {
+ RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16,
+};
+
+enum rdma_nldev_print_type {
+ RDMA_NLDEV_PRINT_TYPE_UNSPEC,
+ RDMA_NLDEV_PRINT_TYPE_HEX,
+};
+
enum rdma_nldev_attr {
/* don't change the order or add anything between, this is ABI! */
RDMA_NLDEV_ATTR_UNSPEC,
+ /* Pad attribute for 64b alignment */
+ RDMA_NLDEV_ATTR_PAD = RDMA_NLDEV_ATTR_UNSPEC,
+
/* Identifier for ib_device */
RDMA_NLDEV_ATTR_DEV_INDEX, /* u32 */
@@ -387,7 +399,6 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_RES_PD_ENTRY, /* nested table */
RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, /* u32 */
RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, /* u32 */
-
/*
* Provides logical name and index of netdevice which is
* connected to physical port. This information is relevant
@@ -400,7 +411,24 @@ enum rdma_nldev_attr {
*/
RDMA_NLDEV_ATTR_NDEV_INDEX, /* u32 */
RDMA_NLDEV_ATTR_NDEV_NAME, /* string */
+ /*
+ * driver-specific attributes.
+ */
+ RDMA_NLDEV_ATTR_DRIVER, /* nested table */
+ RDMA_NLDEV_ATTR_DRIVER_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_DRIVER_STRING, /* string */
+ /*
+ * u8 values from enum rdma_nldev_print_type
+ */
+ RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_DRIVER_S32, /* s32 */
+ RDMA_NLDEV_ATTR_DRIVER_U32, /* u32 */
+ RDMA_NLDEV_ATTR_DRIVER_S64, /* s64 */
+ RDMA_NLDEV_ATTR_DRIVER_U64, /* u64 */
+ /*
+ * Always the end
+ */
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/mm/memory.c b/mm/memory.c
index bc381486d527e..7206a634270be 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1656,16 +1656,15 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
*
* The entire address range must be fully contained within the vma.
*
- * Returns 0 if successful.
*/
-int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
if (address < vma->vm_start || address + size > vma->vm_end ||
!(vma->vm_flags & VM_PFNMAP))
- return -1;
+ return;
+
zap_page_range_single(vma, address, size, NULL);
- return 0;
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
index e7823ffb1ca0f..85571e90191fc 100644
--- a/samples/auxdisplay/cfag12864b-example.c
+++ b/samples/auxdisplay/cfag12864b-example.c
@@ -1,25 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Filename: cfag12864b-example.c
* Version: 0.1.0
* Description: cfag12864b LCD userspace example program
- * License: GPLv2
*
* Author: Copyright (C) Miguel Ojeda Sandonis
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
/*
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 5af34a2b0cd9a..1bb594fcfe12c 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -251,6 +251,9 @@ DTC_FLAGS += -Wno-unit_address_vs_reg \
-Wno-unit_address_format \
-Wno-avoid_unnecessary_addr_size \
-Wno-alias_paths \
+ -Wno-graph_child_address \
+ -Wno-graph_port \
+ -Wno-unique_unit_address \
-Wno-pci_device_reg
endif
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 815eaf140ab5c..a2cc1036c9155 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -255,7 +255,7 @@ static void check_duplicate_node_names(struct check *c, struct dt_info *dti,
child2;
child2 = child2->next_sibling)
if (streq(child->name, child2->name))
- FAIL(c, dti, node, "Duplicate node name");
+ FAIL(c, dti, child2, "Duplicate node name");
}
ERROR(duplicate_node_names, check_duplicate_node_names, NULL);
@@ -317,6 +317,11 @@ static void check_unit_address_vs_reg(struct check *c, struct dt_info *dti,
const char *unitname = get_unitname(node);
struct property *prop = get_property(node, "reg");
+ if (get_subnode(node, "__overlay__")) {
+ /* HACK: Overlay fragments are a special case */
+ return;
+ }
+
if (!prop) {
prop = get_property(node, "ranges");
if (prop && !prop->val.len)
@@ -579,6 +584,8 @@ static void fixup_phandle_references(struct check *c, struct dt_info *dti,
phandle = get_node_phandle(dt, refnode);
*((fdt32_t *)(prop->val.val + m->offset)) = cpu_to_fdt32(phandle);
+
+ reference_node(refnode);
}
}
}
@@ -609,11 +616,21 @@ static void fixup_path_references(struct check *c, struct dt_info *dti,
path = refnode->fullpath;
prop->val = data_insert_at_marker(prop->val, m, path,
strlen(path) + 1);
+
+ reference_node(refnode);
}
}
}
ERROR(path_references, fixup_path_references, NULL, &duplicate_node_names);
+static void fixup_omit_unused_nodes(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ if (node->omit_if_unused && !node->is_referenced)
+ delete_node(node);
+}
+ERROR(omit_unused_nodes, fixup_omit_unused_nodes, NULL, &phandle_references, &path_references);
+
/*
* Semantic checks
*/
@@ -1017,6 +1034,36 @@ static void check_avoid_unnecessary_addr_size(struct check *c, struct dt_info *d
}
WARNING(avoid_unnecessary_addr_size, check_avoid_unnecessary_addr_size, NULL, &avoid_default_addr_size);
+static void check_unique_unit_address(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ struct node *childa;
+
+ if (node->addr_cells < 0 || node->size_cells < 0)
+ return;
+
+ if (!node->children)
+ return;
+
+ for_each_child(node, childa) {
+ struct node *childb;
+ const char *addr_a = get_unitname(childa);
+
+ if (!strlen(addr_a))
+ continue;
+
+ for_each_child(node, childb) {
+ const char *addr_b = get_unitname(childb);
+ if (childa == childb)
+ break;
+
+ if (streq(addr_a, addr_b))
+ FAIL(c, dti, childb, "duplicate unit-address (also used in node %s)", childa->fullpath);
+ }
+ }
+}
+WARNING(unique_unit_address, check_unique_unit_address, NULL, &avoid_default_addr_size);
+
static void check_obsolete_chosen_interrupt_controller(struct check *c,
struct dt_info *dti,
struct node *node)
@@ -1357,6 +1404,152 @@ static void check_interrupts_property(struct check *c,
}
WARNING(interrupts_property, check_interrupts_property, &phandle_references);
+static const struct bus_type graph_port_bus = {
+ .name = "graph-port",
+};
+
+static const struct bus_type graph_ports_bus = {
+ .name = "graph-ports",
+};
+
+static void check_graph_nodes(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ struct node *child;
+
+ for_each_child(node, child) {
+ if (!(strprefixeq(child->name, child->basenamelen, "endpoint") ||
+ get_property(child, "remote-endpoint")))
+ continue;
+
+ node->bus = &graph_port_bus;
+
+ /* The parent of 'port' nodes can be either 'ports' or a device */
+ if (!node->parent->bus &&
+ (streq(node->parent->name, "ports") || get_property(node, "reg")))
+ node->parent->bus = &graph_ports_bus;
+
+ break;
+ }
+
+}
+WARNING(graph_nodes, check_graph_nodes, NULL);
+
+static void check_graph_child_address(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ int cnt = 0;
+ struct node *child;
+
+ if (node->bus != &graph_ports_bus && node->bus != &graph_port_bus)
+ return;
+
+ for_each_child(node, child) {
+ struct property *prop = get_property(child, "reg");
+
+ /* No error if we have any non-zero unit address */
+ if (prop && propval_cell(prop) != 0)
+ return;
+
+ cnt++;
+ }
+
+ if (cnt == 1 && node->addr_cells != -1)
+ FAIL(c, dti, node, "graph node has single child node '%s', #address-cells/#size-cells are not necessary",
+ node->children->name);
+}
+WARNING(graph_child_address, check_graph_child_address, NULL, &graph_nodes);
+
+static void check_graph_reg(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ char unit_addr[9];
+ const char *unitname = get_unitname(node);
+ struct property *prop;
+
+ prop = get_property(node, "reg");
+ if (!prop || !unitname)
+ return;
+
+ if (!(prop->val.val && prop->val.len == sizeof(cell_t))) {
+ FAIL(c, dti, node, "graph node malformed 'reg' property");
+ return;
+ }
+
+ snprintf(unit_addr, sizeof(unit_addr), "%x", propval_cell(prop));
+ if (!streq(unitname, unit_addr))
+ FAIL(c, dti, node, "graph node unit address error, expected \"%s\"",
+ unit_addr);
+
+ if (node->parent->addr_cells != 1)
+ FAIL_PROP(c, dti, node, get_property(node, "#address-cells"),
+ "graph node '#address-cells' is %d, must be 1",
+ node->parent->addr_cells);
+ if (node->parent->size_cells != 0)
+ FAIL_PROP(c, dti, node, get_property(node, "#size-cells"),
+ "graph node '#size-cells' is %d, must be 0",
+ node->parent->size_cells);
+}
+
+static void check_graph_port(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ if (node->bus != &graph_port_bus)
+ return;
+
+ if (!strprefixeq(node->name, node->basenamelen, "port"))
+ FAIL(c, dti, node, "graph port node name should be 'port'");
+
+ check_graph_reg(c, dti, node);
+}
+WARNING(graph_port, check_graph_port, NULL, &graph_nodes);
+
+static struct node *get_remote_endpoint(struct check *c, struct dt_info *dti,
+ struct node *endpoint)
+{
+ int phandle;
+ struct node *node;
+ struct property *prop;
+
+ prop = get_property(endpoint, "remote-endpoint");
+ if (!prop)
+ return NULL;
+
+ phandle = propval_cell(prop);
+ /* Give up if this is an overlay with external references */
+ if (phandle == 0 || phandle == -1)
+ return NULL;
+
+ node = get_node_by_phandle(dti->dt, phandle);
+ if (!node)
+ FAIL_PROP(c, dti, endpoint, prop, "graph phandle is not valid");
+
+ return node;
+}
+
+static void check_graph_endpoint(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ struct node *remote_node;
+
+ if (!node->parent || node->parent->bus != &graph_port_bus)
+ return;
+
+ if (!strprefixeq(node->name, node->basenamelen, "endpoint"))
+ FAIL(c, dti, node, "graph endpont node name should be 'endpoint'");
+
+ check_graph_reg(c, dti, node);
+
+ remote_node = get_remote_endpoint(c, dti, node);
+ if (!remote_node)
+ return;
+
+ if (get_remote_endpoint(c, dti, remote_node) != node)
+ FAIL(c, dti, node, "graph connection to node '%s' is not bidirectional",
+ remote_node->fullpath);
+}
+WARNING(graph_endpoint, check_graph_endpoint, NULL, &graph_nodes);
+
static struct check *check_table[] = {
&duplicate_node_names, &duplicate_property_names,
&node_name_chars, &node_name_format, &property_name_chars,
@@ -1366,6 +1559,7 @@ static struct check *check_table[] = {
&explicit_phandles,
&phandle_references, &path_references,
+ &omit_unused_nodes,
&address_cells_is_cell, &size_cells_is_cell, &interrupt_cells_is_cell,
&device_type_is_string, &model_is_string, &status_is_string,
@@ -1390,6 +1584,7 @@ static struct check *check_table[] = {
&avoid_default_addr_size,
&avoid_unnecessary_addr_size,
+ &unique_unit_address,
&obsolete_chosen_interrupt_controller,
&chosen_node_is_root, &chosen_node_bootargs, &chosen_node_stdout_path,
@@ -1416,6 +1611,8 @@ static struct check *check_table[] = {
&alias_paths,
+ &graph_nodes, &graph_child_address, &graph_port, &graph_endpoint,
+
&always_fail,
};
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index fd825ebba69cb..615b7ec6588f1 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -153,6 +153,13 @@ static void PRINTF(1, 2) lexical_error(const char *fmt, ...);
return DT_DEL_NODE;
}
+<*>"/omit-if-no-ref/" {
+ DPRINT("Keyword: /omit-if-no-ref/\n");
+ DPRINT("<PROPNODENAME>\n");
+ BEGIN(PROPNODENAME);
+ return DT_OMIT_NO_REF;
+ }
+
<*>{LABEL}: {
DPRINT("Label: %s\n", yytext);
yylval.labelref = xstrdup(yytext);
diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y
index 44af170abfeac..011a5b25539a1 100644
--- a/scripts/dtc/dtc-parser.y
+++ b/scripts/dtc/dtc-parser.y
@@ -63,6 +63,7 @@ extern bool treesource_error;
%token DT_BITS
%token DT_DEL_PROP
%token DT_DEL_NODE
+%token DT_OMIT_NO_REF
%token <propnodename> DT_PROPNODENAME
%token <integer> DT_LITERAL
%token <integer> DT_CHAR_LITERAL
@@ -190,18 +191,18 @@ devicetree:
}
| devicetree DT_REF nodedef
{
- struct node *target = get_node_by_ref($1, $2);
-
- if (target) {
- merge_nodes(target, $3);
+ /*
+ * We rely on the rule being always:
+ * versioninfo plugindecl memreserves devicetree
+ * so $-1 is what we want (plugindecl)
+ */
+ if ($<flags>-1 & DTSF_PLUGIN) {
+ add_orphan_node($1, $3, $2);
} else {
- /*
- * We rely on the rule being always:
- * versioninfo plugindecl memreserves devicetree
- * so $-1 is what we want (plugindecl)
- */
- if ($<flags>-1 & DTSF_PLUGIN)
- add_orphan_node($1, $3, $2);
+ struct node *target = get_node_by_ref($1, $2);
+
+ if (target)
+ merge_nodes(target, $3);
else
ERROR(&@2, "Label or path %s not found", $2);
}
@@ -219,6 +220,18 @@ devicetree:
$$ = $1;
}
+ | devicetree DT_OMIT_NO_REF DT_REF ';'
+ {
+ struct node *target = get_node_by_ref($1, $3);
+
+ if (target)
+ omit_node_if_unused(target);
+ else
+ ERROR(&@3, "Label or path %s not found", $3);
+
+
+ $$ = $1;
+ }
;
nodedef:
@@ -523,6 +536,10 @@ subnode:
{
$$ = name_node(build_node_delete(), $2);
}
+ | DT_OMIT_NO_REF subnode
+ {
+ $$ = omit_node_if_unused($2);
+ }
| DT_LABEL subnode
{
add_label(&$2->labels, $1);
diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h
index 3b18a42b866e7..6d667701ab6aa 100644
--- a/scripts/dtc/dtc.h
+++ b/scripts/dtc/dtc.h
@@ -168,6 +168,8 @@ struct node {
struct label *labels;
const struct bus_type *bus;
+
+ bool omit_if_unused, is_referenced;
};
#define for_each_label_withdel(l0, l) \
@@ -202,6 +204,8 @@ struct property *reverse_properties(struct property *first);
struct node *build_node(struct property *proplist, struct node *children);
struct node *build_node_delete(void);
struct node *name_node(struct node *node, char *name);
+struct node *omit_node_if_unused(struct node *node);
+struct node *reference_node(struct node *node);
struct node *chain_node(struct node *first, struct node *list);
struct node *merge_nodes(struct node *old_node, struct node *new_node);
struct node *add_orphan_node(struct node *old_node, struct node *new_node, char *ref);
diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c
index 57b7db2ed1534..6e4c367f54b3a 100644
--- a/scripts/dtc/livetree.c
+++ b/scripts/dtc/livetree.c
@@ -134,6 +134,20 @@ struct node *name_node(struct node *node, char *name)
return node;
}
+struct node *omit_node_if_unused(struct node *node)
+{
+ node->omit_if_unused = 1;
+
+ return node;
+}
+
+struct node *reference_node(struct node *node)
+{
+ node->is_referenced = 1;
+
+ return node;
+}
+
struct node *merge_nodes(struct node *old_node, struct node *new_node)
{
struct property *new_prop, *old_prop;
@@ -224,10 +238,16 @@ struct node * add_orphan_node(struct node *dt, struct node *new_node, char *ref)
struct data d = empty_data;
char *name;
- d = data_add_marker(d, REF_PHANDLE, ref);
- d = data_append_integer(d, 0xffffffff, 32);
+ if (ref[0] == '/') {
+ d = data_append_data(d, ref, strlen(ref) + 1);
- p = build_property("target", d);
+ p = build_property("target-path", d);
+ } else {
+ d = data_add_marker(d, REF_PHANDLE, ref);
+ d = data_append_integer(d, 0xffffffff, 32);
+
+ p = build_property("target", d);
+ }
xasprintf(&name, "fragment@%u",
next_orphan_fragment++);
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index ad87849e333bc..b00f14ff7a17c 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.4.6-gaadd0b65"
+#define DTC_VERSION "DTC 1.4.6-g84e414b0"
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
index e825e0ae78e72..d593346d0bba4 100644
--- a/security/integrity/evm/Kconfig
+++ b/security/integrity/evm/Kconfig
@@ -42,6 +42,17 @@ config EVM_EXTRA_SMACK_XATTRS
additional info to the calculation, requires existing EVM
labeled file systems to be relabeled.
+config EVM_ADD_XATTRS
+ bool "Add additional EVM extended attributes at runtime"
+ depends on EVM
+ default n
+ help
+ Allow userland to provide additional xattrs for HMAC calculation.
+
+ When this option is enabled, root can add additional xattrs to the
+ list used by EVM by writing them into
+ /sys/kernel/security/integrity/evm/evm_xattrs.
+
config EVM_LOAD_X509
bool "Load an X509 certificate onto the '.evm' trusted keyring"
depends on EVM && INTEGRITY_TRUSTED_KEYRING
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index 45c4a89c02ffd..1257c3c247236 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -30,6 +30,11 @@
#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP_COMPLETE | \
EVM_ALLOW_METADATA_WRITES)
+struct xattr_list {
+ struct list_head list;
+ char *name;
+};
+
extern int evm_initialized;
#define EVM_ATTR_FSUUID 0x0001
@@ -40,7 +45,7 @@ extern struct crypto_shash *hmac_tfm;
extern struct crypto_shash *hash_tfm;
/* List of EVM protected security xattrs */
-extern char *evm_config_xattrnames[];
+extern struct list_head evm_config_xattrnames;
int evm_init_key(void);
int evm_update_evmxattr(struct dentry *dentry,
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index facf9cdd577d9..b605243108556 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -192,8 +192,8 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
char type, char *digest)
{
struct inode *inode = d_backing_inode(dentry);
+ struct xattr_list *xattr;
struct shash_desc *desc;
- char **xattrname;
size_t xattr_size = 0;
char *xattr_value = NULL;
int error;
@@ -209,14 +209,14 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
return PTR_ERR(desc);
error = -ENODATA;
- for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
bool is_ima = false;
- if (strcmp(*xattrname, XATTR_NAME_IMA) == 0)
+ if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
is_ima = true;
if ((req_xattr_name && req_xattr_value)
- && !strcmp(*xattrname, req_xattr_name)) {
+ && !strcmp(xattr->name, req_xattr_name)) {
error = 0;
crypto_shash_update(desc, (const u8 *)req_xattr_value,
req_xattr_value_len);
@@ -224,7 +224,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
ima_present = true;
continue;
}
- size = vfs_getxattr_alloc(dentry, *xattrname,
+ size = vfs_getxattr_alloc(dentry, xattr->name,
&xattr_value, xattr_size, GFP_NOFS);
if (size == -ENOMEM) {
error = -ENOMEM;
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 9ea9c19a545c3..f9eff5041e4ca 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -35,28 +35,29 @@ static const char * const integrity_status_msg[] = {
};
int evm_hmac_attrs;
-char *evm_config_xattrnames[] = {
+static struct xattr_list evm_config_default_xattrnames[] = {
#ifdef CONFIG_SECURITY_SELINUX
- XATTR_NAME_SELINUX,
+ {.name = XATTR_NAME_SELINUX},
#endif
#ifdef CONFIG_SECURITY_SMACK
- XATTR_NAME_SMACK,
+ {.name = XATTR_NAME_SMACK},
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
- XATTR_NAME_SMACKEXEC,
- XATTR_NAME_SMACKTRANSMUTE,
- XATTR_NAME_SMACKMMAP,
+ {.name = XATTR_NAME_SMACKEXEC},
+ {.name = XATTR_NAME_SMACKTRANSMUTE},
+ {.name = XATTR_NAME_SMACKMMAP},
#endif
#endif
#ifdef CONFIG_SECURITY_APPARMOR
- XATTR_NAME_APPARMOR,
+ {.name = XATTR_NAME_APPARMOR},
#endif
#ifdef CONFIG_IMA_APPRAISE
- XATTR_NAME_IMA,
+ {.name = XATTR_NAME_IMA},
#endif
- XATTR_NAME_CAPS,
- NULL
+ {.name = XATTR_NAME_CAPS},
};
+LIST_HEAD(evm_config_xattrnames);
+
static int evm_fixmode;
static int __init evm_set_fixmode(char *str)
{
@@ -68,6 +69,17 @@ __setup("evm=", evm_set_fixmode);
static void __init evm_init_config(void)
{
+ int i, xattrs;
+
+ xattrs = ARRAY_SIZE(evm_config_default_xattrnames);
+
+ pr_info("Initialising EVM extended attributes:\n");
+ for (i = 0; i < xattrs; i++) {
+ pr_info("%s\n", evm_config_default_xattrnames[i].name);
+ list_add_tail(&evm_config_default_xattrnames[i].list,
+ &evm_config_xattrnames);
+ }
+
#ifdef CONFIG_EVM_ATTR_FSUUID
evm_hmac_attrs |= EVM_ATTR_FSUUID;
#endif
@@ -82,15 +94,15 @@ static bool evm_key_loaded(void)
static int evm_find_protected_xattrs(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
- char **xattr;
+ struct xattr_list *xattr;
int error;
int count = 0;
if (!(inode->i_opflags & IOP_XATTR))
return -EOPNOTSUPP;
- for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
- error = __vfs_getxattr(dentry, inode, *xattr, NULL, 0);
+ list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+ error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
if (error < 0) {
if (error == -ENODATA)
continue;
@@ -211,24 +223,25 @@ out:
static int evm_protected_xattr(const char *req_xattr_name)
{
- char **xattrname;
int namelen;
int found = 0;
+ struct xattr_list *xattr;
namelen = strlen(req_xattr_name);
- for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
- if ((strlen(*xattrname) == namelen)
- && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) {
+ list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+ if ((strlen(xattr->name) == namelen)
+ && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
found = 1;
break;
}
if (strncmp(req_xattr_name,
- *xattrname + XATTR_SECURITY_PREFIX_LEN,
+ xattr->name + XATTR_SECURITY_PREFIX_LEN,
strlen(req_xattr_name)) == 0) {
found = 1;
break;
}
}
+
return found;
}
@@ -544,35 +557,35 @@ void __init evm_load_x509(void)
static int __init init_evm(void)
{
int error;
+ struct list_head *pos, *q;
+ struct xattr_list *xattr;
evm_init_config();
error = integrity_init_keyring(INTEGRITY_KEYRING_EVM);
if (error)
- return error;
+ goto error;
error = evm_init_secfs();
if (error < 0) {
pr_info("Error registering secfs\n");
- return error;
+ goto error;
}
- return 0;
-}
-
-/*
- * evm_display_config - list the EVM protected security extended attributes
- */
-static int __init evm_display_config(void)
-{
- char **xattrname;
+error:
+ if (error != 0) {
+ if (!list_empty(&evm_config_xattrnames)) {
+ list_for_each_safe(pos, q, &evm_config_xattrnames) {
+ xattr = list_entry(pos, struct xattr_list,
+ list);
+ list_del(pos);
+ }
+ }
+ }
- for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++)
- pr_info("%s\n", *xattrname);
- return 0;
+ return error;
}
-pure_initcall(evm_display_config);
late_initcall(init_evm);
MODULE_DESCRIPTION("Extended Verification Module");
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index feba03bbedae2..637eb999e3406 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -15,11 +15,21 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/audit.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include "evm.h"
+static struct dentry *evm_dir;
static struct dentry *evm_init_tpm;
+static struct dentry *evm_symlink;
+
+#ifdef CONFIG_EVM_ADD_XATTRS
+static struct dentry *evm_xattrs;
+static DEFINE_MUTEX(xattr_list_mutex);
+static int evm_xattrs_locked;
+#endif
/**
* evm_read_key - read() for <securityfs>/evm
@@ -107,13 +117,203 @@ static const struct file_operations evm_key_ops = {
.write = evm_write_key,
};
+#ifdef CONFIG_EVM_ADD_XATTRS
+/**
+ * evm_read_xattrs - read() for <securityfs>/evm_xattrs
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_xattrs(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *temp;
+ int offset = 0;
+ ssize_t rc, size = 0;
+ struct xattr_list *xattr;
+
+ if (*ppos != 0)
+ return 0;
+
+ rc = mutex_lock_interruptible(&xattr_list_mutex);
+ if (rc)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(xattr, &evm_config_xattrnames, list)
+ size += strlen(xattr->name) + 1;
+
+ temp = kmalloc(size + 1, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&xattr_list_mutex);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry(xattr, &evm_config_xattrnames, list) {
+ sprintf(temp + offset, "%s\n", xattr->name);
+ offset += strlen(xattr->name) + 1;
+ }
+
+ mutex_unlock(&xattr_list_mutex);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ kfree(temp);
+
+ return rc;
+}
+
+/**
+ * evm_write_xattrs - write() for <securityfs>/evm_xattrs
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct xattr_list *xattr, *tmp;
+ struct audit_buffer *ab;
+ struct iattr newattrs;
+ struct inode *inode;
+
+ if (!capable(CAP_SYS_ADMIN) || evm_xattrs_locked)
+ return -EPERM;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (count > XATTR_NAME_MAX)
+ return -E2BIG;
+
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_EVM_XATTR);
+ if (IS_ERR(ab))
+ return PTR_ERR(ab);
+
+ xattr = kmalloc(sizeof(struct xattr_list), GFP_KERNEL);
+ if (!xattr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ xattr->name = memdup_user_nul(buf, count);
+ if (IS_ERR(xattr->name)) {
+ err = PTR_ERR(xattr->name);
+ xattr->name = NULL;
+ goto out;
+ }
+
+ /* Remove any trailing newline */
+ len = strlen(xattr->name);
+ if (len && xattr->name[len-1] == '\n')
+ xattr->name[len-1] = '\0';
+
+ if (strcmp(xattr->name, ".") == 0) {
+ evm_xattrs_locked = 1;
+ newattrs.ia_mode = S_IFREG | 0440;
+ newattrs.ia_valid = ATTR_MODE;
+ inode = evm_xattrs->d_inode;
+ inode_lock(inode);
+ err = simple_setattr(evm_xattrs, &newattrs);
+ inode_unlock(inode);
+ audit_log_format(ab, "locked");
+ if (!err)
+ err = count;
+ goto out;
+ }
+
+ audit_log_format(ab, "xattr=");
+ audit_log_untrustedstring(ab, xattr->name);
+
+ if (strncmp(xattr->name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN) != 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Guard against races in evm_read_xattrs */
+ mutex_lock(&xattr_list_mutex);
+ list_for_each_entry(tmp, &evm_config_xattrnames, list) {
+ if (strcmp(xattr->name, tmp->name) == 0) {
+ err = -EEXIST;
+ mutex_unlock(&xattr_list_mutex);
+ goto out;
+ }
+ }
+ list_add_tail_rcu(&xattr->list, &evm_config_xattrnames);
+ mutex_unlock(&xattr_list_mutex);
+
+ audit_log_format(ab, " res=0");
+ audit_log_end(ab);
+ return count;
+out:
+ audit_log_format(ab, " res=%d", err);
+ audit_log_end(ab);
+ if (xattr) {
+ kfree(xattr->name);
+ kfree(xattr);
+ }
+ return err;
+}
+
+static const struct file_operations evm_xattr_ops = {
+ .read = evm_read_xattrs,
+ .write = evm_write_xattrs,
+};
+
+static int evm_init_xattrs(void)
+{
+ evm_xattrs = securityfs_create_file("evm_xattrs", 0660, evm_dir, NULL,
+ &evm_xattr_ops);
+ if (!evm_xattrs || IS_ERR(evm_xattrs))
+ return -EFAULT;
+
+ return 0;
+}
+#else
+static int evm_init_xattrs(void)
+{
+ return 0;
+}
+#endif
+
int __init evm_init_secfs(void)
{
int error = 0;
- evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP,
- NULL, NULL, &evm_key_ops);
- if (!evm_init_tpm || IS_ERR(evm_init_tpm))
+ evm_dir = securityfs_create_dir("evm", integrity_dir);
+ if (!evm_dir || IS_ERR(evm_dir))
+ return -EFAULT;
+
+ evm_init_tpm = securityfs_create_file("evm", 0660,
+ evm_dir, NULL, &evm_key_ops);
+ if (!evm_init_tpm || IS_ERR(evm_init_tpm)) {
error = -EFAULT;
+ goto out;
+ }
+
+ evm_symlink = securityfs_create_symlink("evm", NULL,
+ "integrity/evm/evm", NULL);
+ if (!evm_symlink || IS_ERR(evm_symlink)) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ if (evm_init_xattrs() != 0) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+out:
+ securityfs_remove(evm_symlink);
+ securityfs_remove(evm_init_tpm);
+ securityfs_remove(evm_dir);
return error;
}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index f266e4b3b7d46..149faa81f6f05 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -21,12 +21,15 @@
#include <linux/rbtree.h>
#include <linux/file.h>
#include <linux/uaccess.h>
+#include <linux/security.h>
#include "integrity.h"
static struct rb_root integrity_iint_tree = RB_ROOT;
static DEFINE_RWLOCK(integrity_iint_lock);
static struct kmem_cache *iint_cache __read_mostly;
+struct dentry *integrity_dir;
+
/*
* __integrity_iint_find - return the iint associated with an inode
*/
@@ -211,3 +214,18 @@ void __init integrity_load_keys(void)
ima_load_x509();
evm_load_x509();
}
+
+static int __init integrity_fs_init(void)
+{
+ integrity_dir = securityfs_create_dir("integrity", NULL);
+ if (IS_ERR(integrity_dir)) {
+ pr_err("Unable to create integrity sysfs dir: %ld\n",
+ PTR_ERR(integrity_dir));
+ integrity_dir = NULL;
+ return PTR_ERR(integrity_dir);
+ }
+
+ return 0;
+}
+
+late_initcall(integrity_fs_init)
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 35fe91aa1fc95..354bb5716ce32 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -53,7 +53,6 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
extern int ima_policy_flag;
/* set during initialization */
-extern int ima_initialized;
extern int ima_used_chip;
extern int ima_hash_algo;
extern int ima_appraise;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index fa540c0469dac..ae9d5c766a3ce 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -15,6 +15,9 @@
* implemenents security file system for reporting
* current measurement list and IMA statistics
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -336,7 +339,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf,
if (data[0] == '/') {
result = ima_read_policy(data);
} else if (ima_appraise & IMA_APPRAISE_POLICY) {
- pr_err("IMA: signed policy file (specified as an absolute pathname) required\n");
+ pr_err("signed policy file (specified as an absolute pathname) required\n");
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
"policy_update", "signed policy required",
1, 0);
@@ -356,6 +359,7 @@ out:
}
static struct dentry *ima_dir;
+static struct dentry *ima_symlink;
static struct dentry *binary_runtime_measurements;
static struct dentry *ascii_runtime_measurements;
static struct dentry *runtime_measurements_count;
@@ -417,7 +421,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)
valid_policy = 0;
}
- pr_info("IMA: policy update %s\n", cause);
+ pr_info("policy update %s\n", cause);
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
"policy_update", cause, !valid_policy, 0);
@@ -434,6 +438,8 @@ static int ima_release_policy(struct inode *inode, struct file *file)
ima_policy = NULL;
#elif defined(CONFIG_IMA_WRITE_POLICY)
clear_bit(IMA_FS_BUSY, &ima_fs_flags);
+#elif defined(CONFIG_IMA_READ_POLICY)
+ inode->i_mode &= ~S_IWUSR;
#endif
return 0;
}
@@ -448,10 +454,15 @@ static const struct file_operations ima_measure_policy_ops = {
int __init ima_fs_init(void)
{
- ima_dir = securityfs_create_dir("ima", NULL);
+ ima_dir = securityfs_create_dir("ima", integrity_dir);
if (IS_ERR(ima_dir))
return -1;
+ ima_symlink = securityfs_create_symlink("ima", NULL, "integrity/ima",
+ NULL);
+ if (IS_ERR(ima_symlink))
+ goto out;
+
binary_runtime_measurements =
securityfs_create_file("binary_runtime_measurements",
S_IRUSR | S_IRGRP, ima_dir, NULL,
@@ -491,6 +502,7 @@ out:
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
+ securityfs_remove(ima_symlink);
securityfs_remove(ima_dir);
securityfs_remove(ima_policy);
return -1;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index e473eee913cba..16bd18747cfa0 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -10,6 +10,8 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/kexec.h>
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 74d0bd7e76d71..dca44cf7838ea 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -32,8 +32,6 @@
#include "ima.h"
-int ima_initialized;
-
#ifdef CONFIG_IMA_APPRAISE
int ima_appraise = IMA_APPRAISE_ENFORCE;
#else
@@ -61,14 +59,11 @@ static int __init hash_setup(char *str)
goto out;
}
- for (i = 0; i < HASH_ALGO__LAST; i++) {
- if (strcmp(str, hash_algo_name[i]) == 0) {
- ima_hash_algo = i;
- break;
- }
- }
- if (i == HASH_ALGO__LAST)
+ i = match_string(hash_algo_name, HASH_ALGO__LAST, str);
+ if (i < 0)
return 1;
+
+ ima_hash_algo = i;
out:
hash_setup_done = 1;
return 1;
@@ -449,6 +444,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
static int read_idmap[READING_MAX_ID] = {
[READING_FIRMWARE] = FIRMWARE_CHECK,
+ [READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
[READING_MODULE] = MODULE_CHECK,
[READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK,
[READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK,
@@ -517,10 +513,9 @@ static int __init init_ima(void)
error = ima_init();
}
- if (!error) {
- ima_initialized = 1;
+ if (!error)
ima_update_policy_flag();
- }
+
return error;
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index d89bebf85421e..cdcc9a7b4e248 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -33,6 +33,7 @@
#define IMA_INMASK 0x0040
#define IMA_EUID 0x0080
#define IMA_PCR 0x0100
+#define IMA_FSNAME 0x0200
#define UNKNOWN 0
#define MEASURE 0x0001 /* same as IMA_MEASURE */
@@ -74,6 +75,7 @@ struct ima_rule_entry {
void *args_p; /* audit value */
int type; /* audit type */
} lsm[MAX_LSM_RULES];
+ char *fsname;
};
/*
@@ -273,6 +275,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
if ((rule->flags & IMA_FSMAGIC)
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
+ if ((rule->flags & IMA_FSNAME)
+ && strcmp(rule->fsname, inode->i_sb->s_type->name))
+ return false;
if ((rule->flags & IMA_FSUUID) &&
!uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
return false;
@@ -435,6 +440,17 @@ void ima_update_policy_flag(void)
ima_policy_flag &= ~IMA_APPRAISE;
}
+static int ima_appraise_flag(enum ima_hooks func)
+{
+ if (func == MODULE_CHECK)
+ return IMA_APPRAISE_MODULES;
+ else if (func == FIRMWARE_CHECK)
+ return IMA_APPRAISE_FIRMWARE;
+ else if (func == POLICY_CHECK)
+ return IMA_APPRAISE_POLICY;
+ return 0;
+}
+
/**
* ima_init_policy - initialize the default measure rules.
*
@@ -473,9 +489,11 @@ void __init ima_init_policy(void)
* Insert the appraise rules requiring file signatures, prior to
* any other appraise rules.
*/
- for (i = 0; i < secure_boot_entries; i++)
- list_add_tail(&secure_boot_rules[i].list,
- &ima_default_rules);
+ for (i = 0; i < secure_boot_entries; i++) {
+ list_add_tail(&secure_boot_rules[i].list, &ima_default_rules);
+ temp_ima_appraise |=
+ ima_appraise_flag(secure_boot_rules[i].func);
+ }
for (i = 0; i < appraise_entries; i++) {
list_add_tail(&default_appraise_rules[i].list,
@@ -509,22 +527,9 @@ int ima_check_policy(void)
*/
void ima_update_policy(void)
{
- struct list_head *first, *last, *policy;
-
- /* append current policy with the new rules */
- first = (&ima_temp_rules)->next;
- last = (&ima_temp_rules)->prev;
- policy = &ima_policy_rules;
-
- synchronize_rcu();
+ struct list_head *policy = &ima_policy_rules;
- last->next = policy;
- rcu_assign_pointer(list_next_rcu(policy->prev), first);
- first->prev = policy->prev;
- policy->prev = last;
-
- /* prepare for the next policy rules addition */
- INIT_LIST_HEAD(&ima_temp_rules);
+ list_splice_tail_init_rcu(&ima_temp_rules, policy, synchronize_rcu);
if (ima_rules != policy) {
ima_policy_flag = 0;
@@ -540,7 +545,7 @@ enum {
Opt_audit, Opt_hash, Opt_dont_hash,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
- Opt_func, Opt_mask, Opt_fsmagic,
+ Opt_func, Opt_mask, Opt_fsmagic, Opt_fsname,
Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_fowner_eq,
Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
@@ -565,6 +570,7 @@ static match_table_t policy_tokens = {
{Opt_func, "func=%s"},
{Opt_mask, "mask=%s"},
{Opt_fsmagic, "fsmagic=%s"},
+ {Opt_fsname, "fsname=%s"},
{Opt_fsuuid, "fsuuid=%s"},
{Opt_uid_eq, "uid=%s"},
{Opt_euid_eq, "euid=%s"},
@@ -776,6 +782,17 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
if (!result)
entry->flags |= IMA_FSMAGIC;
break;
+ case Opt_fsname:
+ ima_log_string(ab, "fsname", args[0].from);
+
+ entry->fsname = kstrdup(args[0].from, GFP_KERNEL);
+ if (!entry->fsname) {
+ result = -ENOMEM;
+ break;
+ }
+ result = 0;
+ entry->flags |= IMA_FSNAME;
+ break;
case Opt_fsuuid:
ima_log_string(ab, "fsuuid", args[0].from);
@@ -917,12 +934,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
}
if (!result && (entry->action == UNKNOWN))
result = -EINVAL;
- else if (entry->func == MODULE_CHECK)
- temp_ima_appraise |= IMA_APPRAISE_MODULES;
- else if (entry->func == FIRMWARE_CHECK)
- temp_ima_appraise |= IMA_APPRAISE_FIRMWARE;
- else if (entry->func == POLICY_CHECK)
- temp_ima_appraise |= IMA_APPRAISE_POLICY;
+ else if (entry->action == APPRAISE)
+ temp_ima_appraise |= ima_appraise_flag(entry->func);
+
audit_log_format(ab, "res=%d", !result);
audit_log_end(ab);
return result;
@@ -1104,6 +1118,12 @@ int ima_policy_show(struct seq_file *m, void *v)
seq_puts(m, " ");
}
+ if (entry->flags & IMA_FSNAME) {
+ snprintf(tbuf, sizeof(tbuf), "%s", entry->fsname);
+ seq_printf(m, pt(Opt_fsname), tbuf);
+ seq_puts(m, " ");
+ }
+
if (entry->flags & IMA_PCR) {
snprintf(tbuf, sizeof(tbuf), "%d", entry->pcr);
seq_printf(m, pt(Opt_pcr), tbuf);
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 5afaa53decc50..43752002c2223 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -13,6 +13,8 @@
* Library of supported template fields.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ima_template_lib.h"
static bool ima_template_hash_algo_allowed(u8 algo)
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 5e58e02ba8dc9..0bb372eed62ae 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -143,6 +143,8 @@ int integrity_kernel_read(struct file *file, loff_t offset,
#define INTEGRITY_KEYRING_MODULE 2
#define INTEGRITY_KEYRING_MAX 3
+extern struct dentry *integrity_dir;
+
#ifdef CONFIG_INTEGRITY_SIGNATURE
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index dcb976f98df28..7ad226018f516 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1545,9 +1545,9 @@ static int smack_inode_listsecurity(struct inode *inode, char *buffer,
*/
static void smack_inode_getsecid(struct inode *inode, u32 *secid)
{
- struct inode_smack *isp = inode->i_security;
+ struct smack_known *skp = smk_of_inode(inode);
- *secid = isp->smk_inode->smk_secid;
+ *secid = skp->smk_secid;
}
/*
@@ -4559,12 +4559,10 @@ static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
- int len = 0;
- len = smack_inode_getsecurity(inode, XATTR_SMACK_SUFFIX, ctx, true);
+ struct smack_known *skp = smk_of_inode(inode);
- if (len < 0)
- return len;
- *ctxlen = len;
+ *ctx = skp->smk_known;
+ *ctxlen = strlen(skp->smk_known);
return 0;
}