summaryrefslogtreecommitdiffstats
path: root/drivers/pci/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/host')
-rw-r--r--drivers/pci/host/Kconfig55
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c7
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-host-common.c13
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-hyperv.c162
-rw-r--r--drivers/pci/host/pci-mvebu.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pci-v3-semi.c5
-rw-r--r--drivers/pci/host/pci-versatile.c5
-rw-r--r--drivers/pci/host/pci-xgene.c5
-rw-r--r--drivers/pci/host/pcie-altera.c7
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c5
-rw-r--r--drivers/pci/host/pcie-mediatek.c236
-rw-r--r--drivers/pci/host/pcie-mobiveil.c866
-rw-r--r--drivers/pci/host/pcie-rcar.c284
-rw-r--r--drivers/pci/host/pcie-rockchip-ep.c642
-rw-r--r--drivers/pci/host/pcie-rockchip-host.c1142
-rw-r--r--drivers/pci/host/pcie-rockchip.c1580
-rw-r--r--drivers/pci/host/pcie-rockchip.h338
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c6
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/host/vmd.c91
25 files changed, 3678 insertions, 1792 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 0d0177ce436cc..a96e23bda6640 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -5,13 +5,14 @@ menu "PCI host controller drivers"
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
- depends on ARCH_MVEBU || ARCH_DOVE
+ depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
+ depends on MVEBU_MBUS
depends on ARM
depends on OF
config PCI_AARDVARK
bool "Aardvark PCIe controller"
- depends on ARCH_MVEBU && ARM64
+ depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
help
@@ -21,7 +22,7 @@ config PCI_AARDVARK
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
- depends on ARCH_ZYNQMP
+ depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say 'Y' here if you want kernel support for Xilinx
@@ -32,12 +33,11 @@ config PCIE_XILINX_NWL
config PCI_FTPCI100
bool "Faraday Technology FTPCI100 PCI controller"
depends on OF
- depends on ARM
default ARCH_GEMINI
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want support for the PCIe host controller found
@@ -45,8 +45,8 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARM
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARM
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
@@ -54,7 +54,7 @@ config PCI_RCAR_GEN2
config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
- depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe controller support on R-Car SoCs.
@@ -65,25 +65,25 @@ config PCI_HOST_COMMON
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
- depends on (ARM || ARM64) && OF
+ depends on OF
select PCI_HOST_COMMON
select IRQ_DOMAIN
+ select PCI_DOMAINS
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
- depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC)
+ depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
config PCI_XGENE
bool "X-Gene PCIe controller"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
- select PCIEPORTBUS
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
There are 5 internal PCIe ports available. Each port is GEN3 capable
@@ -101,7 +101,7 @@ config PCI_XGENE_MSI
config PCI_V3_SEMI
bool "V3 Semiconductor PCI controller"
depends on OF
- depends on ARM
+ depends on ARM || COMPILE_TEST
default ARCH_INTEGRATOR_AP
config PCI_VERSATILE
@@ -147,8 +147,7 @@ config PCIE_IPROC_MSI
config PCIE_ALTERA
bool "Altera PCIe controller"
- depends on ARM || NIOS2
- depends on OF_PCI
+ depends on ARM || NIOS2 || COMPILE_TEST
select PCI_DOMAINS
help
Say Y here if you want to enable PCIe controller support on Altera
@@ -164,7 +163,7 @@ config PCIE_ALTERA_MSI
config PCI_HOST_THUNDER_PEM
bool "Cavium Thunder PCIe controller to off-chip devices"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
@@ -172,29 +171,45 @@ config PCI_HOST_THUNDER_PEM
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
config PCIE_ROCKCHIP
- tristate "Rockchip PCIe controller"
+ bool
+ depends on PCI
+
+config PCIE_ROCKCHIP_HOST
+ tristate "Rockchip PCIe host controller"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
+ select PCIE_ROCKCHIP
help
Say Y here if you want internal PCI support on Rockchip SoC.
There is 1 internal PCIe port available to support GEN2 with
4 slots.
+config PCIE_ROCKCHIP_EP
+ bool "Rockchip PCIe endpoint controller"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select MFD_SYSCON
+ select PCIE_ROCKCHIP
+ help
+ Say Y here if you want to support Rockchip PCIe controller in
+ endpoint mode on Rockchip SoC. There is 1 internal PCIe port
+ available to support GEN2 with 4 slots.
+
config PCIE_MEDIATEK
bool "MediaTek PCIe controller"
- depends on (ARM || ARM64) && (ARCH_MEDIATEK || COMPILE_TEST)
+ depends on ARCH_MEDIATEK || COMPILE_TEST
depends on OF
- depends on PCI
- select PCIEPORTBUS
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 3b10591908675..11d21b026d373 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 9abf549631b4d..d3172d5d3d352 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -19,6 +19,8 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include "../pci.h"
+
/* PCIe core registers */
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
@@ -822,14 +824,13 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct resource_entry *win, *tmp;
resource_size_t iobase;
INIT_LIST_HEAD(&pcie->resources);
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
- &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, &iobase);
if (err)
return err;
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 5008fd87956a9..a1ebe9ed441f0 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -28,6 +28,8 @@
#include <linux/irq.h>
#include <linux/clk.h>
+#include "../pci.h"
+
/*
* Special configuration registers directly in the first few words
* in I/O space.
@@ -476,8 +478,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
- &res, &io_base);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &res, &io_base);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 5d028f53fdcdb..d8f10451f2730 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -101,5 +101,18 @@ int pci_host_common_probe(struct platform_device *pdev,
return ret;
}
+ platform_set_drvdata(pdev, bridge->bus);
+ return 0;
+}
+
+int pci_host_common_remove(struct platform_device *pdev)
+{
+ struct pci_bus *bus = platform_get_drvdata(pdev);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+ pci_unlock_rescan_remove();
+
return 0;
}
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 45319ee3b484b..dea3ec7592a23 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -95,5 +95,6 @@ static struct platform_driver gen_pci_driver = {
.suppress_bind_attrs = true,
},
.probe = gen_pci_probe,
+ .remove = pci_host_common_remove,
};
builtin_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 50cdefe3f6d3c..6cc5036ac83cf 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -433,7 +433,7 @@ enum hv_pcibus_state {
struct hv_pcibus_device {
struct pci_sysdata sysdata;
enum hv_pcibus_state state;
- atomic_t remove_lock;
+ refcount_t remove_lock;
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@@ -488,17 +488,6 @@ enum hv_pcichild_state {
hv_pcichild_maximum
};
-enum hv_pcidev_ref_reason {
- hv_pcidev_ref_invalid = 0,
- hv_pcidev_ref_initial,
- hv_pcidev_ref_by_slot,
- hv_pcidev_ref_packet,
- hv_pcidev_ref_pnp,
- hv_pcidev_ref_childlist,
- hv_pcidev_irqdata,
- hv_pcidev_ref_max
-};
-
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
@@ -548,14 +537,41 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
u32 wslot);
-static void get_pcichild(struct hv_pci_dev *hv_pcidev,
- enum hv_pcidev_ref_reason reason);
-static void put_pcichild(struct hv_pci_dev *hv_pcidev,
- enum hv_pcidev_ref_reason reason);
+
+static void get_pcichild(struct hv_pci_dev *hpdev)
+{
+ refcount_inc(&hpdev->refs);
+}
+
+static void put_pcichild(struct hv_pci_dev *hpdev)
+{
+ if (refcount_dec_and_test(&hpdev->refs))
+ kfree(hpdev);
+}
static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+/*
+ * There is no good way to get notified from vmbus_onoffer_rescind(),
+ * so let's use polling here, since this is not a hot path.
+ */
+static int wait_for_response(struct hv_device *hdev,
+ struct completion *comp)
+{
+ while (true) {
+ if (hdev->channel->rescind) {
+ dev_warn_once(&hdev->device, "The device is gone.\n");
+ return -ENODEV;
+ }
+
+ if (wait_for_completion_timeout(comp, HZ / 10))
+ break;
+ }
+
+ return 0;
+}
+
/**
* devfn_to_wslot() - Convert from Linux PCI slot to Windows
* @devfn: The Linux representation of PCI slot
@@ -762,7 +778,7 @@ static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
_hv_pcifront_read_config(hpdev, where, size, val);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return PCIBIOS_SUCCESSFUL;
}
@@ -790,7 +806,7 @@ static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
_hv_pcifront_write_config(hpdev, where, size, val);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return PCIBIOS_SUCCESSFUL;
}
@@ -856,7 +872,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
}
hv_int_desc_free(hpdev, int_desc);
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
}
static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
@@ -1186,13 +1202,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_lo = comp.int_desc.address & 0xffffffff;
msg->data = comp.int_desc.data;
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return;
free_int_desc:
kfree(int_desc);
drop_reference:
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
return_null_message:
msg->address_hi = 0;
msg->address_lo = 0;
@@ -1283,7 +1299,6 @@ static u64 get_bar_size(u64 bar_val)
*/
static void survey_child_resources(struct hv_pcibus_device *hbus)
{
- struct list_head *iter;
struct hv_pci_dev *hpdev;
resource_size_t bar_size = 0;
unsigned long flags;
@@ -1309,8 +1324,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
* for a child device are a power of 2 in size and aligned in memory,
* so it's sufficient to just add them up without tracking alignment.
*/
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev, list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
for (i = 0; i < 6; i++) {
if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device,
@@ -1363,7 +1377,6 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
resource_size_t low_base = 0;
resource_size_t bar_size;
struct hv_pci_dev *hpdev;
- struct list_head *iter;
unsigned long flags;
u64 bar_val;
u32 command;
@@ -1385,9 +1398,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
/* Pick addresses for the BARs. */
do {
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
for (i = 0; i < 6; i++) {
bar_val = hpdev->probed_bar[i];
if (bar_val == 0)
@@ -1508,19 +1519,6 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
complete(&completion->host_event);
}
-static void get_pcichild(struct hv_pci_dev *hpdev,
- enum hv_pcidev_ref_reason reason)
-{
- refcount_inc(&hpdev->refs);
-}
-
-static void put_pcichild(struct hv_pci_dev *hpdev,
- enum hv_pcidev_ref_reason reason)
-{
- if (refcount_dec_and_test(&hpdev->refs))
- kfree(hpdev);
-}
-
/**
* new_pcichild_device() - Create a new child device
* @hbus: The internal struct tracking this root PCI bus.
@@ -1568,24 +1566,14 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
if (ret)
goto error;
- wait_for_completion(&comp_pkt.host_event);
+ if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
+ goto error;
hpdev->desc = *desc;
refcount_set(&hpdev->refs, 1);
- get_pcichild(hpdev, hv_pcidev_ref_childlist);
+ get_pcichild(hpdev);
spin_lock_irqsave(&hbus->device_list_lock, flags);
- /*
- * When a device is being added to the bus, we set the PCI domain
- * number to be the device serial number, which is non-zero and
- * unique on the same VM. The serial numbers start with 1, and
- * increase by 1 for each device. So device names including this
- * can have shorter names than based on the bus instance UUID.
- * Only the first device serial number is used for domain, so the
- * domain number will not change after the first device is added.
- */
- if (list_empty(&hbus->children))
- hbus->sysdata.domain = desc->ser;
list_add_tail(&hpdev->list_entry, &hbus->children);
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
return hpdev;
@@ -1618,7 +1606,7 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
list_for_each_entry(iter, &hbus->children, list_entry) {
if (iter->desc.win_slot.slot == wslot) {
hpdev = iter;
- get_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ get_pcichild(hpdev);
break;
}
}
@@ -1654,7 +1642,6 @@ static void pci_devices_present_work(struct work_struct *work)
{
u32 child_no;
bool found;
- struct list_head *iter;
struct pci_function_description *new_desc;
struct hv_pci_dev *hpdev;
struct hv_pcibus_device *hbus;
@@ -1691,10 +1678,8 @@ static void pci_devices_present_work(struct work_struct *work)
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
- hpdev->reported_missing = true;
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ hpdev->reported_missing = true;
}
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
@@ -1704,11 +1689,8 @@ static void pci_devices_present_work(struct work_struct *work)
new_desc = &dr->func[child_no];
spin_lock_irqsave(&hbus->device_list_lock, flags);
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
- if ((hpdev->desc.win_slot.slot ==
- new_desc->win_slot.slot) &&
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
(hpdev->desc.v_id == new_desc->v_id) &&
(hpdev->desc.d_id == new_desc->d_id) &&
(hpdev->desc.ser == new_desc->ser)) {
@@ -1730,12 +1712,10 @@ static void pci_devices_present_work(struct work_struct *work)
spin_lock_irqsave(&hbus->device_list_lock, flags);
do {
found = false;
- list_for_each(iter, &hbus->children) {
- hpdev = container_of(iter, struct hv_pci_dev,
- list_entry);
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
if (hpdev->reported_missing) {
found = true;
- put_pcichild(hpdev, hv_pcidev_ref_childlist);
+ put_pcichild(hpdev);
list_move_tail(&hpdev->list_entry, &removed);
break;
}
@@ -1748,7 +1728,7 @@ static void pci_devices_present_work(struct work_struct *work)
hpdev = list_first_entry(&removed, struct hv_pci_dev,
list_entry);
list_del(&hpdev->list_entry);
- put_pcichild(hpdev, hv_pcidev_ref_initial);
+ put_pcichild(hpdev);
}
switch (hbus->state) {
@@ -1883,8 +1863,8 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
- put_pcichild(hpdev, hv_pcidev_ref_childlist);
- put_pcichild(hpdev, hv_pcidev_ref_pnp);
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
put_hvpcibus(hpdev->hbus);
}
@@ -1899,7 +1879,7 @@ static void hv_eject_device_work(struct work_struct *work)
static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
{
hpdev->state = hv_pcichild_ejecting;
- get_pcichild(hpdev, hv_pcidev_ref_pnp);
+ get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
get_hvpcibus(hpdev->hbus);
queue_work(hpdev->hbus->wq, &hpdev->wrk);
@@ -1999,8 +1979,7 @@ static void hv_pci_onchannelcallback(void *context)
dev_message->wslot.slot);
if (hpdev) {
hv_pci_eject_device(hpdev);
- put_pcichild(hpdev,
- hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
}
break;
@@ -2069,15 +2048,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
sizeof(struct pci_version_request),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
if (ret) {
dev_err(&hdev->device,
- "PCI Pass-through VSP failed sending version reqquest: %#x",
+ "PCI Pass-through VSP failed to request version: %d",
ret);
goto exit;
}
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status >= 0) {
pci_protocol_version = pci_protocol_versions[i];
dev_info(&hdev->device,
@@ -2286,11 +2266,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
+
if (ret)
goto exit;
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2330,11 +2311,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
0, VM_PKT_DATA_INBAND, 0);
- if (ret)
- return ret;
+ if (!ret)
+ ret = wait_for_response(hdev, &comp);
- wait_for_completion(&comp);
- return 0;
+ return ret;
}
/**
@@ -2398,17 +2378,17 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
ret = vmbus_sendpacket(hdev->channel, &pkt->message,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ ret = wait_for_response(hdev, &comp_pkt.host_event);
if (ret)
break;
- wait_for_completion(&comp_pkt.host_event);
-
if (comp_pkt.completion_status < 0) {
ret = -EPROTO;
dev_err(&hdev->device,
@@ -2446,7 +2426,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
pkt.message_type.type = PCI_RESOURCES_RELEASED;
pkt.wslot.slot = hpdev->desc.win_slot.slot;
- put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ put_pcichild(hpdev);
ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
VM_PKT_DATA_INBAND, 0);
@@ -2459,12 +2439,12 @@ static int hv_send_resources_released(struct hv_device *hdev)
static void get_hvpcibus(struct hv_pcibus_device *hbus)
{
- atomic_inc(&hbus->remove_lock);
+ refcount_inc(&hbus->remove_lock);
}
static void put_hvpcibus(struct hv_pcibus_device *hbus)
{
- if (atomic_dec_and_test(&hbus->remove_lock))
+ if (refcount_dec_and_test(&hbus->remove_lock))
complete(&hbus->remove_event);
}
@@ -2508,7 +2488,7 @@ static int hv_pci_probe(struct hv_device *hdev,
hdev->dev_instance.b[8] << 8;
hbus->hdev = hdev;
- atomic_inc(&hbus->remove_lock);
+ refcount_set(&hbus->remove_lock, 1);
INIT_LIST_HEAD(&hbus->children);
INIT_LIST_HEAD(&hbus->dr_list);
INIT_LIST_HEAD(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 5d4dccfc9d813..23e270839e6a8 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -21,6 +21,8 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include "../pci.h"
+
/*
* PCIe unit register offsets.
*/
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index dd4f1a6b57c56..326171cb1a978 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -21,6 +21,8 @@
#include <linux/sizes.h>
#include <linux/slab.h>
+#include "../pci.h"
+
/* AHB-PCI Bridge PCI communication registers */
#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 389e74be846ce..f4f53d092e005 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -40,6 +40,8 @@
#include <soc/tegra/cpuidle.h>
#include <soc/tegra/pmc.h>
+#include "../pci.h"
+
#define INT_PCI_MSI_NR (8 * 32)
/* register definitions */
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
index 0a4dea7966638..68b8bfbdb867d 100644
--- a/drivers/pci/host/pci-v3-semi.c
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -33,6 +33,8 @@
#include <linux/regmap.h>
#include <linux/clk.h>
+#include "../pci.h"
+
#define V3_PCI_VENDOR 0x00000000
#define V3_PCI_DEVICE 0x00000002
#define V3_PCI_CMD 0x00000004
@@ -791,7 +793,8 @@ static int v3_pci_probe(struct platform_device *pdev)
if (IS_ERR(v3->config_base))
return PTR_ERR(v3->config_base);
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &io_base);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 5b3876f5312bd..994f32061b325 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -15,6 +15,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
static void __iomem *versatile_pci_base;
static void __iomem *versatile_cfg_base[2];
@@ -64,11 +66,10 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *res)
{
int err, mem = 1, res_valid = 0;
- struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
if (err)
return err;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 0a0d7ee6d3c92..d854d67e873cc 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -22,6 +22,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define PCIECORE_CTLANDSTATUS 0x50
#define PIM1_1L 0x80
#define IBAR2 0x98
@@ -632,7 +634,8 @@ static int xgene_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (ret)
return ret;
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index a6af62e0256dc..7d05e51205b38 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -17,6 +17,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define RP_TX_REG0 0x2000
#define RP_TX_REG1 0x2004
#define RP_TX_CNTRL 0x2008
@@ -488,11 +490,10 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
{
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct resource_entry *win;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
- NULL);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, NULL);
if (err)
return err;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index e764a2a2693ce..f30f5f3fb5c1e 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -16,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#include "../pci.h"
#include "pcie-iproc.h"
static const struct of_device_id iproc_pcie_of_match_table[] = {
@@ -99,8 +100,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->phy = NULL;
}
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
- &iobase);
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
+ &iobase);
if (ret) {
dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
index a8b20c5012a98..0baabe30858fd 100644
--- a/drivers/pci/host/pcie-mediatek.c
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -11,8 +11,10 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
@@ -22,6 +24,8 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include "../pci.h"
+
/* PCIe shared registers */
#define PCIE_SYS_CFG 0x00
#define PCIE_INT_ENABLE 0x0c
@@ -66,6 +70,10 @@
/* PCIe V2 per-port registers */
#define PCIE_MSI_VECTOR 0x0c0
+
+#define PCIE_CONF_VEND_ID 0x100
+#define PCIE_CONF_CLASS_ID 0x106
+
#define PCIE_INT_MASK 0x420
#define INTX_MASK GENMASK(19, 16)
#define INTX_SHIFT 16
@@ -125,13 +133,13 @@ struct mtk_pcie_port;
/**
* struct mtk_pcie_soc - differentiate between host generations
- * @has_msi: whether this host supports MSI interrupts or not
+ * @need_fix_class_id: whether this host's class ID needed to be fixed or not
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
* @setup_irq: pointer to initialize IRQ functions
*/
struct mtk_pcie_soc {
- bool has_msi;
+ bool need_fix_class_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
@@ -155,7 +163,9 @@ struct mtk_pcie_soc {
* @lane: lane count
* @slot: port slot
* @irq_domain: legacy INTx IRQ domain
+ * @inner_domain: inner IRQ domain
* @msi_domain: MSI IRQ domain
+ * @lock: protect the msi_irq_in_use bitmap
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
struct mtk_pcie_port {
@@ -173,7 +183,9 @@ struct mtk_pcie_port {
u32 lane;
u32 slot;
struct irq_domain *irq_domain;
+ struct irq_domain *inner_domain;
struct irq_domain *msi_domain;
+ struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
@@ -375,6 +387,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
struct resource *mem = &pcie->mem;
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
size_t size;
int err;
@@ -403,6 +416,15 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
PCIE_MAC_SRSTB | PCIE_CRSTB;
writel(val, port->base + PCIE_RST_CTRL);
+ /* Set up vendor ID and class code */
+ if (soc->need_fix_class_id) {
+ val = PCI_VENDOR_ID_MEDIATEK;
+ writew(val, port->base + PCIE_CONF_VEND_ID);
+
+ val = PCI_CLASS_BRIDGE_HOST;
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
+ }
+
/* 100ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
!!(val & PCIE_PORT_LINKUP_V2), 20,
@@ -430,103 +452,130 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
return 0;
}
-static int mtk_pcie_msi_alloc(struct mtk_pcie_port *port)
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- int msi;
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr;
- msi = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
- if (msi < MTK_MSI_IRQS_NUM)
- set_bit(msi, port->msi_irq_in_use);
- else
- return -ENOSPC;
+ /* MT2712/MT7622 only support 32-bit MSI addresses */
+ addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+ msg->address_hi = 0;
+ msg->address_lo = lower_32_bits(addr);
+
+ msg->data = data->hwirq;
- return msi;
+ dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void mtk_pcie_msi_free(struct mtk_pcie_port *port, unsigned long hwirq)
+static int mtk_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- clear_bit(hwirq, port->msi_irq_in_use);
+ return -EINVAL;
}
-static int mtk_pcie_msi_setup_irq(struct msi_controller *chip,
- struct pci_dev *pdev, struct msi_desc *desc)
+static void mtk_msi_ack_irq(struct irq_data *data)
{
- struct mtk_pcie_port *port;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
- phys_addr_t msg_addr;
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ u32 hwirq = data->hwirq;
- port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
- if (!port)
- return -EINVAL;
+ writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
+}
- hwirq = mtk_pcie_msi_alloc(port);
- if (hwirq < 0)
- return hwirq;
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+ .name = "MTK MSI",
+ .irq_compose_msi_msg = mtk_compose_msi_msg,
+ .irq_set_affinity = mtk_msi_set_affinity,
+ .irq_ack = mtk_msi_ack_irq,
+};
- irq = irq_create_mapping(port->msi_domain, hwirq);
- if (!irq) {
- mtk_pcie_msi_free(port, hwirq);
- return -EINVAL;
- }
+static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct mtk_pcie_port *port = domain->host_data;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&port->lock);
- chip->dev = &pdev->dev;
+ bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+ if (bit >= MTK_MSI_IRQS_NUM) {
+ mutex_unlock(&port->lock);
+ return -ENOSPC;
+ }
- irq_set_msi_desc(irq, desc);
+ __set_bit(bit, port->msi_irq_in_use);
- /* MT2712/MT7622 only support 32-bit MSI addresses */
- msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
- msg.address_hi = 0;
- msg.address_lo = lower_32_bits(msg_addr);
- msg.data = hwirq;
+ mutex_unlock(&port->lock);
- pci_write_msi_msg(irq, &msg);
+ irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq,
+ NULL, NULL);
return 0;
}
-static void mtk_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- struct pci_dev *pdev = to_pci_dev(chip->dev);
- struct irq_data *d = irq_get_irq_data(irq);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct mtk_pcie_port *port;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
- port = mtk_pcie_find_port(pdev->bus, pdev->devfn);
- if (!port)
- return;
+ mutex_lock(&port->lock);
- irq_dispose_mapping(irq);
- mtk_pcie_msi_free(port, hwirq);
+ if (!test_bit(d->hwirq, port->msi_irq_in_use))
+ dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ else
+ __clear_bit(d->hwirq, port->msi_irq_in_use);
+
+ mutex_unlock(&port->lock);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static struct msi_controller mtk_pcie_msi_chip = {
- .setup_irq = mtk_pcie_msi_setup_irq,
- .teardown_irq = mtk_msi_teardown_irq,
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mtk_pcie_irq_domain_alloc,
+ .free = mtk_pcie_irq_domain_free,
};
static struct irq_chip mtk_msi_irq_chip = {
- .name = "MTK PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
+ .name = "MTK PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
};
-static int mtk_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &mtk_msi_irq_chip,
+};
+
+static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- irq_set_chip_and_handler(irq, &mtk_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+
+ mutex_init(&port->lock);
+
+ port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
+ &msi_domain_ops, port);
+ if (!port->inner_domain) {
+ dev_err(port->pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
+ port->inner_domain);
+ if (!port->msi_domain) {
+ dev_err(port->pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(port->inner_domain);
+ return -ENOMEM;
+ }
return 0;
}
-static const struct irq_domain_ops msi_domain_ops = {
- .map = mtk_pcie_msi_map,
-};
-
static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
{
u32 val;
@@ -559,6 +608,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
{
struct device *dev = port->pcie->dev;
struct device_node *pcie_intc_node;
+ int ret;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
@@ -575,27 +625,28 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- port->msi_domain = irq_domain_add_linear(node, MTK_MSI_IRQS_NUM,
- &msi_domain_ops,
- &mtk_pcie_msi_chip);
- if (!port->msi_domain) {
- dev_err(dev, "failed to create MSI IRQ domain\n");
- return -ENODEV;
- }
+ ret = mtk_pcie_allocate_msi_domains(port);
+ if (ret)
+ return ret;
+
mtk_pcie_enable_msi(port);
}
return 0;
}
-static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
+static void mtk_pcie_intr_handler(struct irq_desc *desc)
{
- struct mtk_pcie_port *port = (struct mtk_pcie_port *)data;
+ struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
u32 virq;
u32 bit = INTX_SHIFT;
- while ((status = readl(port->base + PCIE_INT_STATUS)) & INTX_MASK) {
+ chained_irq_enter(irqchip, desc);
+
+ status = readl(port->base + PCIE_INT_STATUS);
+ if (status & INTX_MASK) {
for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
/* Clear the INTx */
writel(1 << bit, port->base + PCIE_INT_STATUS);
@@ -606,14 +657,12 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- while ((status = readl(port->base + PCIE_INT_STATUS)) & MSI_STATUS) {
+ if (status & MSI_STATUS){
unsigned long imsi_status;
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
- /* Clear the MSI */
- writel(1 << bit, port->base + PCIE_IMSI_STATUS);
- virq = irq_find_mapping(port->msi_domain, bit);
+ virq = irq_find_mapping(port->inner_domain, bit);
generic_handle_irq(virq);
}
}
@@ -622,7 +671,9 @@ static irqreturn_t mtk_pcie_intr_handler(int irq, void *data)
}
}
- return IRQ_HANDLED;
+ chained_irq_exit(irqchip, desc);
+
+ return;
}
static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
@@ -633,20 +684,15 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
struct platform_device *pdev = to_platform_device(dev);
int err, irq;
- irq = platform_get_irq(pdev, port->slot);
- err = devm_request_irq(dev, irq, mtk_pcie_intr_handler,
- IRQF_SHARED, "mtk-pcie", port);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", irq);
- return err;
- }
-
err = mtk_pcie_init_irq_domain(port, node);
if (err) {
dev_err(dev, "failed to init PCIe IRQ domain\n");
return err;
}
+ irq = platform_get_irq(pdev, port->slot);
+ irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
+
return 0;
}
@@ -1080,8 +1126,6 @@ static int mtk_pcie_register_host(struct pci_host_bridge *host)
host->map_irq = of_irq_parse_and_map_pci;
host->swizzle_irq = pci_common_swizzle;
host->sysdata = pcie;
- if (IS_ENABLED(CONFIG_PCI_MSI) && pcie->soc->has_msi)
- host->msi = &mtk_pcie_msi_chip;
err = pci_scan_root_bus_bridge(host);
if (err < 0)
@@ -1142,8 +1186,14 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
.startup = mtk_pcie_startup_port,
};
-static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
- .has_msi = true,
+static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_v2,
+ .setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
+ .need_fix_class_id = true,
.ops = &mtk_pcie_ops_v2,
.startup = mtk_pcie_startup_port_v2,
.setup_irq = mtk_pcie_setup_irq,
@@ -1152,8 +1202,8 @@ static const struct mtk_pcie_soc mtk_pcie_soc_v2 = {
static const struct of_device_id mtk_pcie_ids[] = {
{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
- { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_v2 },
- { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_v2 },
+ { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
+ { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
{},
};
diff --git a/drivers/pci/host/pcie-mobiveil.c b/drivers/pci/host/pcie-mobiveil.c
new file mode 100644
index 0000000000000..4d6c20e47bed4
--- /dev/null
+++ b/drivers/pci/host/pcie-mobiveil.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
+
+#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
+
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_EN 0xc00
+#define PAGE_SEL_OFFSET_SHIFT 10
+
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
+
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
+
+#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_INTX_MASK 0x01e0
+#define PAB_INTP_MSI_MASK 0x8
+
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+
+#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
+#define AXI_WINDOW_ALIGN_MASK 3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS 0x474
+
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START 5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI 16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+struct mobiveil_msi { /* MSI information */
+ struct mutex lock; /* protect bitmap variable */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ phys_addr_t msi_pages_phys;
+ int num_of_vectors;
+ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie {
+ struct platform_device *pdev;
+ struct list_head resources;
+ void __iomem *config_axi_slave_base; /* endpoint config base */
+ void __iomem *csr_axi_slave_base; /* root port config base */
+ void __iomem *apb_csr_base; /* MSI register base */
+ void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
+ struct irq_domain *intx_domain;
+ raw_spinlock_t intx_mask_lock;
+ int irq;
+ int apio_wins;
+ int ppio_wins;
+ int ob_wins_configured; /* configured outbound windows */
+ int ib_wins_configured; /* configured inbound windows */
+ struct resource *ob_io_res;
+ char root_bus_nr;
+ struct mobiveil_msi msi;
+};
+
+static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->csr_axi_slave_base + reg);
+}
+
+static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->csr_axi_slave_base + reg);
+}
+
+static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+ return (csr_readl(pcie, LTSSM_STATUS) &
+ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+
+ /* Only one device down on each root port */
+ if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly
+ * attached to RC
+ */
+ if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
+ return false;
+
+ return true;
+}
+
+/*
+ * mobiveil_pcie_map_bus - routine to get the configuration base of either
+ * root port or endpoint
+ */
+static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct mobiveil_pcie *pcie = bus->sysdata;
+
+ if (!mobiveil_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ if (bus->number == pcie->root_bus_nr) {
+ /* RC config access */
+ return pcie->csr_axi_slave_base + where;
+ }
+
+ /*
+ * EP config access (in Config/APIO space)
+ * Program PEX Address base (31..16 bits) with appropriate value
+ * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
+ * Relies on pci_lock serialization
+ */
+ csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
+ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
+ PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ return pcie->config_axi_slave_base + where;
+}
+
+static struct pci_ops mobiveil_pcie_ops = {
+ .map_bus = mobiveil_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void mobiveil_pcie_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct device *dev = &pcie->pdev->dev;
+ struct mobiveil_msi *msi = &pcie->msi;
+ u32 msi_data, msi_addr_lo, msi_addr_hi;
+ u32 intr_status, msi_status;
+ unsigned long shifted_status;
+ u32 bit, virq, val, mask;
+
+ /*
+ * The core provides a single interrupt for both INTx/MSI messages.
+ * So we'll read both INTx and MSI status
+ */
+
+ chained_irq_enter(chip, desc);
+
+ /* read INTx status */
+ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ intr_status = val & mask;
+
+ /* Handle INTx */
+ if (intr_status & PAB_INTP_INTX_MASK) {
+ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
+ PAB_INTX_START;
+ do {
+ for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
+ virq = irq_find_mapping(pcie->intx_domain,
+ bit + 1);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err_ratelimited(dev,
+ "unexpected IRQ, INT%d\n", bit);
+
+ /* clear interrupt */
+ csr_writel(pcie,
+ shifted_status << PAB_INTX_START,
+ PAB_INTP_AMBA_MISC_STAT);
+ }
+ } while ((shifted_status >> PAB_INTX_START) != 0);
+ }
+
+ /* read extra MSI status register */
+ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
+
+ /* handle MSI interrupts */
+ while (msi_status & 1) {
+ msi_data = readl_relaxed(pcie->apb_csr_base
+ + MSI_DATA_OFFSET);
+
+ /*
+ * MSI_STATUS_OFFSET register gets updated to zero
+ * once we pop not only the MSI data but also address
+ * from MSI hardware FIFO. So keeping these following
+ * two dummy reads.
+ */
+ msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_L_OFFSET);
+ msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
+ MSI_ADDR_H_OFFSET);
+ dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
+ msi_data, msi_addr_hi, msi_addr_lo);
+
+ virq = irq_find_mapping(msi->dev_domain, msi_data);
+ if (virq)
+ generic_handle_irq(virq);
+
+ msi_status = readl_relaxed(pcie->apb_csr_base +
+ MSI_STATUS_OFFSET);
+ }
+
+ /* Clear the interrupt status */
+ csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ chained_irq_exit(chip, desc);
+}
+
+static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct platform_device *pdev = pcie->pdev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ const char *type;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ /* map config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "config_axi_slave");
+ pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->config_axi_slave_base))
+ return PTR_ERR(pcie->config_axi_slave_base);
+ pcie->ob_io_res = res;
+
+ /* map csr resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "csr_axi_slave");
+ pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->csr_axi_slave_base))
+ return PTR_ERR(pcie->csr_axi_slave_base);
+ pcie->pcie_reg_base = res->start;
+
+ /* map MSI config resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie->apb_csr_base))
+ return PTR_ERR(pcie->apb_csr_base);
+
+ /* read the number of windows requested */
+ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
+ pcie->apio_wins = MAX_PIO_WINDOWS;
+
+ if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
+ pcie->ppio_wins = MAX_PIO_WINDOWS;
+
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq <= 0) {
+ dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
+
+ return 0;
+}
+
+/*
+ * select_paged_register - routine to access paged register of root complex
+ *
+ * registers of RC are paged, for this scheme to work
+ * extracted higher 6 bits of the offset will be written to pg_sel
+ * field of PAB_CTRL register and rest of the lower 10 bits enabled with
+ * PAGE_SEL_EN are used as offset of the register.
+ */
+static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+ int pab_ctrl_dw, pg_sel;
+
+ /* clear pg_sel field */
+ pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
+ pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
+
+ /* set pg_sel field */
+ pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
+ pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
+ csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
+}
+
+static void write_paged_register(struct mobiveil_pcie *pcie,
+ u32 val, u32 offset)
+{
+ u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+ select_paged_register(pcie, offset);
+ csr_writel(pcie, val, off);
+}
+
+static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
+{
+ u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
+
+ select_paged_register(pcie, offset);
+ return csr_readl(pcie, off);
+}
+
+static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+ int pci_addr, u32 type, u64 size)
+{
+ int pio_ctrl_val;
+ int amap_ctrl_dw;
+ u64 size64 = ~(size - 1);
+
+ if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+ }
+
+ pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ csr_writel(pcie,
+ pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
+ amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
+ amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
+
+ write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
+ PAB_PEX_AMAP_CTRL(win_num));
+
+ write_paged_register(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+ write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
+ write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
+}
+
+/*
+ * routine to program the outbound windows
+ */
+static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+ u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
+{
+
+ u32 value, type;
+ u64 size64 = ~(size - 1);
+
+ if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+ }
+
+ /*
+ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+ * to 4 KB in PAB_AXI_AMAP_CTRL register
+ */
+ type = config_io_bit;
+ value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
+
+ write_paged_register(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+ /*
+ * program AXI window base with appropriate value in
+ * PAB_AXI_AMAP_AXI_WIN0 register
+ */
+ value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
+ csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+
+ value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+ pcie->ob_wins_configured++;
+}
+
+static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (mobiveil_pcie_link_up(pcie))
+ return 0;
+
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ }
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+ return -ETIMEDOUT;
+}
+
+static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
+{
+ phys_addr_t msg_addr = pcie->pcie_reg_base;
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ pcie->msi.num_of_vectors = PCI_NUM_MSI;
+ msi->msi_pages_phys = (phys_addr_t)msg_addr;
+
+ writel_relaxed(lower_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+ writel_relaxed(upper_32_bits(msg_addr),
+ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+ writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
+ writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
+}
+
+static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+{
+ u32 value, pab_ctrl, type = 0;
+ int err;
+ struct resource_entry *win, *tmp;
+
+ err = mobiveil_bringup_link(pcie);
+ if (err) {
+ dev_info(&pcie->pdev->dev, "link bring-up failed\n");
+ return err;
+ }
+
+ /*
+ * program Bus Master Enable Bit in Command Register in PAB Config
+ * Space
+ */
+ value = csr_readl(pcie, PCI_COMMAND);
+ csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER, PCI_COMMAND);
+
+ /*
+ * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
+ * register
+ */
+ pab_ctrl = csr_readl(pcie, PAB_CTRL);
+ csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
+ (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
+
+ csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
+
+ /*
+ * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
+ * PAB_AXI_PIO_CTRL Register
+ */
+ value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
+
+ /*
+ * we'll program one outbound window for config reads and
+ * another default inbound window for all the upstream traffic
+ * rest of the outbound windows will be configured according to
+ * the "ranges" field defined in device tree
+ */
+
+ /* config outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
+ resource_size(pcie->ob_io_res));
+
+ /* memory inbound translation window */
+ program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
+ type = 0;
+ if (resource_type(win->res) == IORESOURCE_MEM)
+ type = MEM_WINDOW_TYPE;
+ if (resource_type(win->res) == IORESOURCE_IO)
+ type = IO_WINDOW_TYPE;
+ if (type) {
+ /* configure outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ win->res->start, 0, type,
+ resource_size(win->res));
+ }
+ }
+
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
+
+ return err;
+}
+
+static void mobiveil_mask_intx_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct mobiveil_pcie *pcie;
+ unsigned long flags;
+ u32 mask, shifted_val;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static void mobiveil_unmask_intx_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct mobiveil_pcie *pcie;
+ unsigned long flags;
+ u32 shifted_val, mask;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+ raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
+ raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+ .name = "mobiveil_pcie:intx",
+ .irq_enable = mobiveil_unmask_intx_irq,
+ .irq_disable = mobiveil_mask_intx_irq,
+ .irq_mask = mobiveil_mask_intx_irq,
+ .irq_unmask = mobiveil_unmask_intx_irq,
+};
+
+/* routine to setup the INTx related data */
+static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ return 0;
+}
+
+/* INTx domain operations structure */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = mobiveil_pcie_intx_map,
+};
+
+static struct irq_chip mobiveil_msi_irq_chip = {
+ .name = "Mobiveil PCIe MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mobiveil_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .chip = &mobiveil_msi_irq_chip,
+};
+
+static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip mobiveil_msi_bottom_irq_chip = {
+ .name = "Mobiveil MSI",
+ .irq_compose_msi_msg = mobiveil_compose_msi_msg,
+ .irq_set_affinity = mobiveil_msi_set_affinity,
+};
+
+static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs, void *args)
+{
+ struct mobiveil_pcie *pcie = domain->host_data;
+ struct mobiveil_msi *msi = &pcie->msi;
+ unsigned long bit;
+
+ WARN_ON(nr_irqs != 1);
+ mutex_lock(&msi->lock);
+
+ bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
+ if (bit >= msi->num_of_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->msi_irq_in_use);
+
+ mutex_unlock(&msi->lock);
+
+ irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
+ domain->host_data, handle_level_irq,
+ NULL, NULL);
+ return 0;
+}
+
+static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
+ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
+ d->hwirq);
+ } else {
+ __clear_bit(d->hwirq, msi->msi_irq_in_use);
+ }
+
+ mutex_unlock(&msi->lock);
+}
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = mobiveil_irq_msi_domain_alloc,
+ .free = mobiveil_irq_msi_domain_free,
+};
+
+static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct mobiveil_msi *msi = &pcie->msi;
+
+ mutex_init(&pcie->msi.lock);
+ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &mobiveil_msi_domain_info, msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ /* setup INTx */
+ pcie->intx_domain = irq_domain_add_linear(node,
+ PCI_NUM_INTX, &intx_domain_ops, pcie);
+
+ if (!pcie->intx_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ raw_spin_lock_init(&pcie->intx_mask_lock);
+
+ /* setup MSI */
+ ret = mobiveil_allocate_msi_domains(pcie);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+ struct mobiveil_pcie *pcie;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+ resource_size_t iobase;
+ int ret;
+
+ /* allocate the PCIe port */
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+
+ ret = mobiveil_pcie_parse_dt(pcie);
+ if (ret) {
+ dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&pcie->resources);
+
+ /* parse the host bridge base addresses from the device tree file */
+ ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &pcie->resources, &iobase);
+ if (ret) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * configure all inbound and outbound windows and prepare the RC for
+ * config access
+ */
+ ret = mobiveil_host_init(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize host\n");
+ goto error;
+ }
+
+ /* fixup for PCIe class register */
+ csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
+
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ goto error;
+ }
+
+ ret = devm_request_pci_bus_resources(dev, &pcie->resources);
+ if (ret)
+ goto error;
+
+ /* Initialize bridge */
+ list_splice_init(&pcie->resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = pcie->root_bus_nr;
+ bridge->ops = &mobiveil_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ /* setup the kernel resources for the newly added PCIe root bus */
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret)
+ goto error;
+
+ bus = bridge->bus;
+
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+
+ return 0;
+error:
+ pci_free_resource_list(&pcie->resources);
+ return ret;
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+ {.compatible = "mbvl,gpex40-pcie",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+ .probe = mobiveil_pcie_probe,
+ .driver = {
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 6ab28f29ac6a0..874d75c9ee4ac 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -11,6 +11,7 @@
* Author: Phil Edworthy <phil.edworthy@renesas.com>
*/
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -24,18 +25,23 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include "../pci.h"
+
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
-#define CONFIG_SEND_ENABLE (1 << 31)
+#define CONFIG_SEND_ENABLE BIT(31)
#define TYPE0 (0 << 8)
-#define TYPE1 (1 << 8)
+#define TYPE1 BIT(8)
#define PCIECDR 0x000020
#define PCIEMSR 0x000028
#define PCIEINTXR 0x000400
+#define PCIEPHYSR 0x0007f0
+#define PHYRDY BIT(0)
#define PCIEMSITXR 0x000840
/* Transfer control */
@@ -44,7 +50,7 @@
#define PCIETSTR 0x02004
#define DATA_LINK_ACTIVE 1
#define PCIEERRFR 0x02020
-#define UNSUPPORTED_REQUEST (1 << 4)
+#define UNSUPPORTED_REQUEST BIT(4)
#define PCIEMSIFR 0x02044
#define PCIEMSIALR 0x02048
#define MSIFE 1
@@ -57,17 +63,17 @@
/* local address reg & mask */
#define PCIELAR(x) (0x02200 + ((x) * 0x20))
#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
-#define LAM_PREFETCH (1 << 3)
-#define LAM_64BIT (1 << 2)
-#define LAR_ENABLE (1 << 1)
+#define LAM_PREFETCH BIT(3)
+#define LAM_64BIT BIT(2)
+#define LAR_ENABLE BIT(1)
/* PCIe address reg & mask */
#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
-#define PAR_ENABLE (1 << 31)
-#define IO_SPACE (1 << 8)
+#define PAR_ENABLE BIT(31)
+#define IO_SPACE BIT(8)
/* Configuration */
#define PCICONF(x) (0x010000 + ((x) * 0x4))
@@ -79,47 +85,46 @@
#define IDSETR1 0x011004
#define TLCTLR 0x011048
#define MACSR 0x011054
-#define SPCHGFIN (1 << 4)
-#define SPCHGFAIL (1 << 6)
-#define SPCHGSUC (1 << 7)
+#define SPCHGFIN BIT(4)
+#define SPCHGFAIL BIT(6)
+#define SPCHGSUC BIT(7)
#define LINK_SPEED (0xf << 16)
#define LINK_SPEED_2_5GTS (1 << 16)
#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
-#define SPEED_CHANGE (1 << 24)
-#define SCRAMBLE_DISABLE (1 << 27)
+#define SPEED_CHANGE BIT(24)
+#define SCRAMBLE_DISABLE BIT(27)
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
-#define SPCNGRSN (1 << 31)
+#define SPCNGRSN BIT(31)
/* R-Car H1 PHY */
#define H1_PCIEPHYADRR 0x04000c
-#define WRITE_CMD (1 << 16)
-#define PHY_ACK (1 << 24)
+#define WRITE_CMD BIT(16)
+#define PHY_ACK BIT(24)
#define RATE_POS 12
#define LANE_POS 8
#define ADR_POS 0
#define H1_PCIEPHYDOUTR 0x040014
-#define H1_PCIEPHYSR 0x040018
/* R-Car Gen2 PHY */
#define GEN2_PCIEPHYADDR 0x780
#define GEN2_PCIEPHYDATA 0x784
#define GEN2_PCIEPHYCTRL 0x78c
-#define INT_PCI_MSI_NR 32
+#define INT_PCI_MSI_NR 32
-#define RCONF(x) (PCICONF(0)+(x))
-#define RPMCAP(x) (PMCAP(0)+(x))
-#define REXPCAP(x) (EXPCAP(0)+(x))
-#define RVCCAP(x) (VCCAP(0)+(x))
+#define RCONF(x) (PCICONF(0) + (x))
+#define RPMCAP(x) (PMCAP(0) + (x))
+#define REXPCAP(x) (EXPCAP(0) + (x))
+#define RVCCAP(x) (VCCAP(0) + (x))
-#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
-#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
-#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
-#define RCAR_PCI_MAX_RESOURCES 4
-#define MAX_NR_INBOUND_MAPS 6
+#define RCAR_PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
@@ -139,10 +144,10 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
/* Structure representing the PCIe interface */
struct rcar_pcie {
struct device *dev;
+ struct phy *phy;
void __iomem *base;
struct list_head resources;
int root_bus_nr;
- struct clk *clk;
struct clk *bus_clk;
struct rcar_msi msi;
};
@@ -527,12 +532,12 @@ static void phy_write_reg(struct rcar_pcie *pcie,
phy_wait_for_ack(pcie);
}
-static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
{
unsigned int timeout = 10;
while (timeout--) {
- if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
return 0;
msleep(5);
@@ -541,6 +546,21 @@ static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
return -ETIMEDOUT;
}
+static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10000;
+
+ while (timeout--) {
+ if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
+
+ udelay(5);
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
{
int err;
@@ -551,6 +571,10 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
/* Set mode */
rcar_pci_write_reg(pcie, 1, PCIEMSR);
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err)
+ return err;
+
/*
* Initial header for port config space is type 1, set the device
* class to match. Hardware takes care of propagating the IDSETR
@@ -605,10 +629,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
return 0;
}
-static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
+static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
{
- unsigned int timeout = 10;
-
/* Initialize the phy */
phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
@@ -627,17 +649,10 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
- while (timeout--) {
- if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
- return rcar_pcie_hw_init(pcie);
-
- msleep(5);
- }
-
- return -ETIMEDOUT;
+ return 0;
}
-static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
+static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
{
/*
* These settings come from the R-Car Series, 2nd Generation User's
@@ -654,7 +669,18 @@ static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
- return rcar_pcie_hw_init(pcie);
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
+{
+ int err;
+
+ err = phy_init(pcie->phy);
+ if (err)
+ return err;
+
+ return phy_power_on(pcie->phy);
}
static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -842,6 +868,20 @@ static const struct irq_domain_ops msi_domain_ops = {
.map = rcar_msi_map,
};
+static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
+{
+ struct rcar_msi *msi = &pcie->msi;
+ int i, irq;
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++) {
+ irq = irq_find_mapping(msi->domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(msi->domain);
+}
+
static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -896,16 +936,35 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
return 0;
err:
- irq_domain_remove(msi->domain);
+ rcar_pcie_unmap_msi(pcie);
return err;
}
+static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
+{
+ struct rcar_msi *msi = &pcie->msi;
+
+ /* Disable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /* Disable address decoding of the MSI interrupt, MSIFE */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
+
+ free_pages(msi->pages, 0);
+
+ rcar_pcie_unmap_msi(pcie);
+}
+
static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct resource res;
int err, i;
+ pcie->phy = devm_phy_optional_get(dev, "pcie");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
return err;
@@ -914,30 +973,17 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- pcie->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(pcie->clk)) {
- dev_err(dev, "cannot get platform clock\n");
- return PTR_ERR(pcie->clk);
- }
- err = clk_prepare_enable(pcie->clk);
- if (err)
- return err;
-
pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
dev_err(dev, "cannot get pcie bus clock\n");
- err = PTR_ERR(pcie->bus_clk);
- goto fail_clk;
+ return PTR_ERR(pcie->bus_clk);
}
- err = clk_prepare_enable(pcie->bus_clk);
- if (err)
- goto fail_clk;
i = irq_of_parse_and_map(dev->of_node, 0);
if (!i) {
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
- goto err_map_reg;
+ goto err_irq1;
}
pcie->msi.irq1 = i;
@@ -945,17 +991,15 @@ static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
if (!i) {
dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
- goto err_map_reg;
+ goto err_irq2;
}
pcie->msi.irq2 = i;
return 0;
-err_map_reg:
- clk_disable_unprepare(pcie->bus_clk);
-fail_clk:
- clk_disable_unprepare(pcie->clk);
-
+err_irq2:
+ irq_dispose_mapping(pcie->msi.irq1);
+err_irq1:
return err;
}
@@ -1051,63 +1095,28 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
}
static const struct of_device_id rcar_pcie_of_match[] = {
- { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
+ { .compatible = "renesas,pcie-r8a7779",
+ .data = rcar_pcie_phy_init_h1 },
{ .compatible = "renesas,pcie-r8a7790",
- .data = rcar_pcie_hw_init_gen2 },
+ .data = rcar_pcie_phy_init_gen2 },
{ .compatible = "renesas,pcie-r8a7791",
- .data = rcar_pcie_hw_init_gen2 },
+ .data = rcar_pcie_phy_init_gen2 },
{ .compatible = "renesas,pcie-rcar-gen2",
- .data = rcar_pcie_hw_init_gen2 },
- { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
- { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7795",
+ .data = rcar_pcie_phy_init_gen3 },
+ { .compatible = "renesas,pcie-rcar-gen3",
+ .data = rcar_pcie_phy_init_gen3 },
{},
};
-static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
-{
- int err;
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- resource_size_t iobase;
- struct resource_entry *win, *tmp;
-
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
- &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pci->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
- struct resource *res = win->res;
-
- if (resource_type(res) == IORESOURCE_IO) {
- err = pci_remap_iospace(res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
-
- resource_list_destroy_entry(win);
- }
- }
- }
-
- return 0;
-
-out_release_res:
- pci_free_resource_list(&pci->resources);
- return err;
-}
-
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
unsigned int data;
int err;
- int (*hw_init_fn)(struct rcar_pcie *);
+ int (*phy_init_fn)(struct rcar_pcie *);
struct pci_host_bridge *bridge;
bridge = pci_alloc_host_bridge(sizeof(*pcie));
@@ -1118,36 +1127,45 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
- INIT_LIST_HEAD(&pcie->resources);
-
- err = rcar_pcie_parse_request_of_pci_ranges(pcie);
+ err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
if (err)
goto err_free_bridge;
+ pm_runtime_enable(pcie->dev);
+ err = pm_runtime_get_sync(pcie->dev);
+ if (err < 0) {
+ dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+ goto err_pm_disable;
+ }
+
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(dev, "failed to request resources: %d\n", err);
- goto err_free_resource_list;
+ goto err_pm_put;
+ }
+
+ err = clk_prepare_enable(pcie->bus_clk);
+ if (err) {
+ dev_err(dev, "failed to enable bus clock: %d\n", err);
+ goto err_unmap_msi_irqs;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
- goto err_free_resource_list;
+ goto err_clk_disable;
- pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
- if (err < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
- goto err_pm_disable;
+ phy_init_fn = of_device_get_match_data(dev);
+ err = phy_init_fn(pcie);
+ if (err) {
+ dev_err(dev, "failed to init PCIe PHY\n");
+ goto err_clk_disable;
}
/* Failure to get a link might just be that no cards are inserted */
- hw_init_fn = of_device_get_match_data(dev);
- err = hw_init_fn(pcie);
- if (err) {
+ if (rcar_pcie_hw_init(pcie)) {
dev_info(dev, "PCIe link down\n");
err = -ENODEV;
- goto err_pm_put;
+ goto err_clk_disable;
}
data = rcar_pci_read_reg(pcie, MACSR);
@@ -1159,24 +1177,34 @@ static int rcar_pcie_probe(struct platform_device *pdev)
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
- goto err_pm_put;
+ goto err_clk_disable;
}
}
err = rcar_pcie_enable(pcie);
if (err)
- goto err_pm_put;
+ goto err_msi_teardown;
return 0;
+err_msi_teardown:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pcie_teardown_msi(pcie);
+
+err_clk_disable:
+ clk_disable_unprepare(pcie->bus_clk);
+
+err_unmap_msi_irqs:
+ irq_dispose_mapping(pcie->msi.irq2);
+ irq_dispose_mapping(pcie->msi.irq1);
+
err_pm_put:
pm_runtime_put(dev);
err_pm_disable:
pm_runtime_disable(dev);
-
-err_free_resource_list:
pci_free_resource_list(&pcie->resources);
+
err_free_bridge:
pci_free_host_bridge(bridge);
diff --git a/drivers/pci/host/pcie-rockchip-ep.c b/drivers/pci/host/pcie-rockchip-ep.c
new file mode 100644
index 0000000000000..fc267a49a932e
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-ep.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe endpoint controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/configfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epf.h>
+#include <linux/sizes.h>
+
+#include "pcie-rockchip.h"
+
+/**
+ * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
+ * @rockchip: Rockchip PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ * the sending of a memory write (MSI) / normal message (legacy
+ * IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ * the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct rockchip_pcie_ep {
+ struct rockchip_pcie rockchip;
+ struct pci_epc *epc;
+ u32 max_regions;
+ unsigned long ob_region_map;
+ phys_addr_t *ob_addr;
+ phys_addr_t irq_phys_addr;
+ void __iomem *irq_cpu_addr;
+ u64 irq_pci_addr;
+ u8 irq_pci_fn;
+ u8 irq_pending;
+};
+
+static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
+ u32 region)
+{
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
+}
+
+static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+ u32 r, u32 type, u64 cpu_addr,
+ u64 pci_addr, size_t size)
+{
+ u64 sz = 1ULL << fls64(size - 1);
+ int num_pass_bits = ilog2(sz);
+ u32 addr0, addr1, desc0, desc1;
+ bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
+
+ /* The minimal region size is 1MB */
+ if (num_pass_bits < 8)
+ num_pass_bits = 8;
+
+ cpu_addr -= rockchip->mem_res->start;
+ addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
+ PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+ addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
+ desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
+ desc1 = 0;
+
+ if (is_nor_msg) {
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+ rockchip_pcie_write(rockchip, 0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+ rockchip_pcie_write(rockchip, desc0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+ rockchip_pcie_write(rockchip, desc1,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+ } else {
+ /* PCI bus address region */
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+ rockchip_pcie_write(rockchip, desc0,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+ rockchip_pcie_write(rockchip, desc1,
+ ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+
+ addr0 =
+ ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+ (lower_32_bits(cpu_addr) &
+ PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+ addr1 = upper_32_bits(cpu_addr);
+ }
+
+ /* CPU bus address region */
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
+}
+
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+ struct pci_epf_header *hdr)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ /* All functions share the same vendor ID with function 0 */
+ if (fn == 0) {
+ u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
+ (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
+
+ rockchip_pcie_write(rockchip, vid_regs,
+ PCIE_CORE_CONFIG_VENDOR);
+ }
+
+ rockchip_pcie_write(rockchip, hdr->deviceid << 16,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID);
+
+ rockchip_pcie_write(rockchip,
+ hdr->revid |
+ hdr->progif_code << 8 |
+ hdr->subclass_code << 16 |
+ hdr->baseclass_code << 24,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
+ rockchip_pcie_write(rockchip, hdr->cache_line_size,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_CACHE_LINE_SIZE);
+ rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_SUBSYSTEM_VENDOR_ID);
+ rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ PCI_INTERRUPT_LINE);
+
+ return 0;
+}
+
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ dma_addr_t bar_phys = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ int flags = epf_bar->flags;
+ u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+ u64 sz;
+
+ /* BAR size is 2^(aperture + 7) */
+ sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
+
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values.
+ */
+ sz = 1ULL << fls64(sz - 1);
+ aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
+ } else {
+ bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+ bool is_64bits = sz > SZ_2G;
+
+ if (is_64bits && (bar & 1))
+ return -EINVAL;
+
+ if (is_64bits && is_prefetch)
+ ctrl =
+ ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+ else if (is_prefetch)
+ ctrl =
+ ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+ else if (is_64bits)
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
+ else
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
+ }
+
+ if (bar < BAR_4) {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ addr0 = lower_32_bits(bar_phys);
+ addr1 = upper_32_bits(bar_phys);
+
+ cfg = rockchip_pcie_read(rockchip, reg);
+ cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+
+ rockchip_pcie_write(rockchip, cfg, reg);
+ rockchip_pcie_write(rockchip, addr0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+ rockchip_pcie_write(rockchip, addr1,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 reg, cfg, b, ctrl;
+ enum pci_barno bar = epf_bar->barno;
+
+ if (bar < BAR_4) {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
+ cfg = rockchip_pcie_read(rockchip, reg);
+ cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+
+ rockchip_pcie_write(rockchip, cfg, reg);
+ rockchip_pcie_write(rockchip, 0x0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+ rockchip_pcie_write(rockchip, 0x0,
+ ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+}
+
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr, u64 pci_addr,
+ size_t size)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *pcie = &ep->rockchip;
+ u32 r;
+
+ r = find_first_zero_bit(&ep->ob_region_map,
+ sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ /*
+ * Region 0 is reserved for configuration space and shouldn't
+ * be used elsewhere per TRM, so leave it out.
+ */
+ if (r >= ep->max_regions - 1) {
+ dev_err(&epc->dev, "no free outbound region\n");
+ return -EINVAL;
+ }
+
+ rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
+ pci_addr, size);
+
+ set_bit(r, &ep->ob_region_map);
+ ep->ob_addr[r] = addr;
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 r;
+
+ for (r = 0; r < ep->max_regions - 1; r++)
+ if (ep->ob_addr[r] == addr)
+ break;
+
+ /*
+ * Region 0 is reserved for configuration space and shouldn't
+ * be used elsewhere per TRM, so leave it out.
+ */
+ if (r == ep->max_regions - 1)
+ return;
+
+ rockchip_pcie_clear_ep_ob_atu(rockchip, r);
+
+ ep->ob_addr[r] = 0;
+ clear_bit(r, &ep->ob_region_map);
+}
+
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+ u8 multi_msg_cap)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags;
+
+ flags = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
+ flags |=
+ ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ PCI_MSI_FLAGS_64BIT;
+ flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
+ rockchip_pcie_write(rockchip, flags,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ return 0;
+}
+
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags;
+
+ flags = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+ return -EINVAL;
+
+ return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+}
+
+static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx, bool is_asserted)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 r = ep->max_regions - 1;
+ u32 offset;
+ u16 status;
+ u8 msg_code;
+
+ if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
+ ep->irq_pci_fn != fn)) {
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
+ AXI_WRAPPER_NOR_MSG,
+ ep->irq_phys_addr, 0, 0);
+ ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR;
+ ep->irq_pci_fn = fn;
+ }
+
+ intx &= 3;
+ if (is_asserted) {
+ ep->irq_pending |= BIT(intx);
+ msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx;
+ } else {
+ ep->irq_pending &= ~BIT(intx);
+ msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx;
+ }
+
+ status = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+ status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+
+ if ((status != 0) ^ (ep->irq_pending != 0)) {
+ status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS;
+ rockchip_pcie_write(rockchip, status,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+ }
+
+ offset =
+ ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) |
+ ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA;
+ writel(0, ep->irq_cpu_addr + offset);
+}
+
+static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx)
+{
+ u16 cmd;
+
+ cmd = rockchip_pcie_read(&ep->rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_CMD_STATUS);
+
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ return -EINVAL;
+
+ /*
+ * Should add some delay between toggling INTx per TRM vaguely saying
+ * it depends on some cycles of the AHB bus clock to function it. So
+ * add sufficient 1ms here.
+ */
+ rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
+ mdelay(1);
+ rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
+ return 0;
+}
+
+static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 interrupt_num)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u16 flags, mme, data, data_mask;
+ u8 msi_count;
+ u64 pci_addr, pci_addr_mask = 0xff;
+
+ /* Check MSI enable bit */
+ flags = rockchip_pcie_read(&ep->rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+ if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+ return -EINVAL;
+
+ /* Get MSI numbers from MME */
+ mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Set MSI private data */
+ data_mask = msi_count - 1;
+ data = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_DATA_64);
+ data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
+
+ /* Get MSI PCI address */
+ pci_addr = rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= rockchip_pcie_read(rockchip,
+ ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+ ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+ PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ /* Set the outbound region if needed. */
+ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
+ ep->irq_pci_fn != fn)) {
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
+ AXI_WRAPPER_MEM_WRITE,
+ ep->irq_phys_addr,
+ pci_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
+ ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
+ ep->irq_pci_fn = fn;
+ }
+
+ writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+ return 0;
+}
+
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+ enum pci_epc_irq_type type,
+ u8 interrupt_num)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
+ case PCI_EPC_IRQ_MSI:
+ return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rockchip_pcie_ep_start(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct pci_epf *epf;
+ u32 cfg;
+
+ cfg = BIT(0);
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ cfg |= BIT(epf->func_no);
+
+ rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ pci_epf_linkup(epf);
+
+ return 0;
+}
+
+static const struct pci_epc_ops rockchip_pcie_epc_ops = {
+ .write_header = rockchip_pcie_ep_write_header,
+ .set_bar = rockchip_pcie_ep_set_bar,
+ .clear_bar = rockchip_pcie_ep_clear_bar,
+ .map_addr = rockchip_pcie_ep_map_addr,
+ .unmap_addr = rockchip_pcie_ep_unmap_addr,
+ .set_msi = rockchip_pcie_ep_set_msi,
+ .get_msi = rockchip_pcie_ep_get_msi,
+ .raise_irq = rockchip_pcie_ep_raise_irq,
+ .start = rockchip_pcie_ep_start,
+};
+
+static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
+ struct rockchip_pcie_ep *ep)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
+
+ err = of_property_read_u32(dev->of_node,
+ "rockchip,max-outbound-regions",
+ &ep->max_regions);
+ if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
+ ep->max_regions = MAX_REGION_LIMIT;
+
+ err = of_property_read_u8(dev->of_node, "max-functions",
+ &ep->epc->max_functions);
+ if (err < 0)
+ ep->epc->max_functions = 1;
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_pcie_ep_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie-ep"},
+ {},
+};
+
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie_ep *ep;
+ struct rockchip_pcie *rockchip;
+ struct pci_epc *epc;
+ size_t max_regions;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ rockchip = &ep->rockchip;
+ rockchip->is_rc = false;
+ rockchip->dev = dev;
+
+ epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ err = rockchip_pcie_parse_ep_dt(rockchip, ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_disable_clocks;
+
+ /* Establish the link automatically */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ max_regions = ep->max_regions;
+ ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr),
+ GFP_KERNEL);
+
+ if (!ep->ob_addr) {
+ err = -ENOMEM;
+ goto err_uninit_port;
+ }
+
+ /* Only enable function 0 by default */
+ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+ err = pci_epc_mem_init(epc, rockchip->mem_res->start,
+ resource_size(rockchip->mem_res));
+ if (err < 0) {
+ dev_err(dev, "failed to initialize the memory space\n");
+ goto err_uninit_port;
+ }
+
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+ SZ_128K);
+ if (!ep->irq_cpu_addr) {
+ dev_err(dev, "failed to reserve memory space for MSI\n");
+ err = -ENOMEM;
+ goto err_epc_mem_exit;
+ }
+
+ ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+
+ return 0;
+err_epc_mem_exit:
+ pci_epc_mem_exit(epc);
+err_uninit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_disable_clocks:
+ rockchip_pcie_disable_clocks(rockchip);
+ return err;
+}
+
+static struct platform_driver rockchip_pcie_ep_driver = {
+ .driver = {
+ .name = "rockchip-pcie-ep",
+ .of_match_table = rockchip_pcie_ep_of_match,
+ },
+ .probe = rockchip_pcie_ep_probe,
+};
+
+builtin_platform_driver(rockchip_pcie_ep_driver);
diff --git a/drivers/pci/host/pcie-rockchip-host.c b/drivers/pci/host/pcie-rockchip-host.c
new file mode 100644
index 0000000000000..1372d270764f9
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip-host.c
@@ -0,0 +1,1142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include "../pci.h"
+#include "pcie-rockchip.h"
+
+static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+
+ /* Update Tx credit maximum update interval */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
+ val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
+ val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
+ rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
+}
+
+static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, int dev)
+{
+ /* access only one slot on each root port */
+ if (bus->number == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ /*
+ * do not read more than one device on the bus directly attached
+ * to RC's downstream side.
+ */
+ if (bus->primary == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ return 1;
+}
+
+static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+ u8 map;
+
+ if (rockchip->legacy_phy)
+ return GENMASK(MAX_LANE_NUM - 1, 0);
+
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
+ map = val & PCIE_CORE_LANE_MAP_MASK;
+
+ /* The link may be using a reverse-indexed mapping. */
+ if (val & PCIE_CORE_LANE_MAP_REVERSE)
+ map = bitrev8(map) >> 4;
+
+ return map;
+}
+
+static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 val)
+{
+ u32 mask, tmp, offset;
+ void __iomem *addr;
+
+ offset = where & ~0x3;
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+
+ /*
+ * N.B. This read/modify/write isn't safe in general because it can
+ * corrupt RW1C bits in adjacent registers. But the hardware
+ * doesn't support smaller writes.
+ */
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ if (!IS_ALIGNED(busdev, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (bus->parent->number == rockchip->root_bus_nr)
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
+ if (size == 4) {
+ *val = readl(rockchip->reg_base + busdev);
+ } else if (size == 2) {
+ *val = readw(rockchip->reg_base + busdev);
+ } else if (size == 1) {
+ *val = readb(rockchip->reg_base + busdev);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+ if (!IS_ALIGNED(busdev, size))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (bus->parent->number == rockchip->root_bus_nr)
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
+ if (size == 4)
+ writel(val, rockchip->reg_base + busdev);
+ else if (size == 2)
+ writew(val, rockchip->reg_base + busdev);
+ else if (size == 1)
+ writeb(val, rockchip->reg_base + busdev);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
+ val);
+}
+
+static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
+ val);
+}
+
+static struct pci_ops rockchip_pcie_ops = {
+ .read = rockchip_pcie_rd_conf,
+ .write = rockchip_pcie_wr_conf,
+};
+
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+ int curr;
+ u32 status, scale, power;
+
+ if (IS_ERR(rockchip->vpcie3v3))
+ return;
+
+ /*
+ * Set RC's captured slot power limit and scale if
+ * vpcie3v3 available. The default values are both zero
+ * which means the software should set these two according
+ * to the actual power supply.
+ */
+ curr = regulator_get_current_limit(rockchip->vpcie3v3);
+ if (curr <= 0)
+ return;
+
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+}
+
+/**
+ * rockchip_pcie_host_init_port - Initialize hardware
+ * @rockchip: PCIe port information
+ */
+static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err, i = MAX_LANE_NUM;
+ u32 status;
+
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ return err;
+
+ /* Fix the transmitted FTS count desired to exit from L0s. */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
+ status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+
+ rockchip_pcie_set_power_limit(rockchip);
+
+ /* Set RC's clock architecture as common clock */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKSTA_SLC << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Set RC's RCB to 128 */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RCB;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Enable Gen1 training */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+
+ /* 500ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+ status, PCIE_LINK_UP(status), 20,
+ 500 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(dev, "PCIe link training gen1 timeout!\n");
+ goto err_power_off_phy;
+ }
+
+ if (rockchip->link_gen == 2) {
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ status, PCIE_LINK_IS_GEN2(status), 20,
+ 500 * USEC_PER_MSEC);
+ if (err)
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ }
+
+ /* Check the final link width from negotiated lane counter from MGMT */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_SHIFT);
+ dev_dbg(dev, "current link width is x%d\n", status);
+
+ /* Power off unused lane(s) */
+ rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ if (!(rockchip->lanes_map & BIT(i))) {
+ dev_dbg(dev, "idling lane %d\n", i);
+ phy_power_off(rockchip->phys[i]);
+ }
+ }
+
+ rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ PCIE_CORE_CONFIG_VENDOR);
+ rockchip_pcie_write(rockchip,
+ PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+ PCIE_RC_CONFIG_RID_CCR);
+
+ /* Clear THP cap's next cap pointer to remove L1 substate cap */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+ status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
+ /* Clear L0s from RC's link cap */
+ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
+ status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+ status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+ status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+
+ return 0;
+err_power_off_phy:
+ while (i--)
+ phy_power_off(rockchip->phys[i]);
+ i = MAX_LANE_NUM;
+ while (i--)
+ phy_exit(rockchip->phys[i]);
+ return err;
+}
+
+static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 sub_reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LOCAL) {
+ dev_dbg(dev, "local interrupt received\n");
+ sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
+ if (sub_reg & PCIE_CORE_INT_PRFPE)
+ dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFPE)
+ dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_RRPE)
+ dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_PRFO)
+ dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFO)
+ dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_RT)
+ dev_dbg(dev, "replay timer timed out\n");
+
+ if (sub_reg & PCIE_CORE_INT_RTR)
+ dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
+
+ if (sub_reg & PCIE_CORE_INT_PE)
+ dev_dbg(dev, "phy error detected on receive side\n");
+
+ if (sub_reg & PCIE_CORE_INT_MTR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_UCR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_FCE)
+ dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+
+ if (sub_reg & PCIE_CORE_INT_CT)
+ dev_dbg(dev, "a request timed out waiting for completion\n");
+
+ if (sub_reg & PCIE_CORE_INT_UTC)
+ dev_dbg(dev, "unmapped TC error\n");
+
+ if (sub_reg & PCIE_CORE_INT_MMVC)
+ dev_dbg(dev, "MSI mask register changes\n");
+
+ rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
+ } else if (reg & PCIE_CLIENT_INT_PHY) {
+ dev_dbg(dev, "phy link changes\n");
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_clr_bw_int(rockchip);
+ }
+
+ rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
+ dev_dbg(dev, "legacy done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_MSG)
+ dev_dbg(dev, "message done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_HOT_RST)
+ dev_dbg(dev, "hot reset interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_DPA)
+ dev_dbg(dev, "dpa interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_FATAL_ERR)
+ dev_dbg(dev, "fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
+ dev_dbg(dev, "no fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_CORR_ERR)
+ dev_dbg(dev, "correctable error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_PHY)
+ dev_dbg(dev, "phy interrupt received\n");
+
+ rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
+ PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
+ PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
+ PCIE_CLIENT_INT_NFATAL_ERR |
+ PCIE_CLIENT_INT_CORR_ERR |
+ PCIE_CLIENT_INT_PHY),
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 hwirq;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
+
+ while (reg) {
+ hwirq = ffs(reg) - 1;
+ reg &= ~BIT(hwirq);
+
+ virq = irq_find_mapping(rockchip->irq_domain, hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+{
+ int irq, err;
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0) {
+ dev_err(dev, "missing sys IRQ resource\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+ IRQF_SHARED, "pcie-sys", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+ return err;
+ }
+
+ irq = platform_get_irq_byname(pdev, "legacy");
+ if (irq < 0) {
+ dev_err(dev, "missing legacy IRQ resource\n");
+ return irq;
+ }
+
+ irq_set_chained_handler_and_data(irq,
+ rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ irq = platform_get_irq_byname(pdev, "client");
+ if (irq < 0) {
+ dev_err(dev, "missing client IRQ resource\n");
+ return irq;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+ IRQF_SHARED, "pcie-client", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe client IRQ\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * rockchip_pcie_parse_host_dt - Parse Device Tree
+ * @rockchip: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_setup_irq(rockchip);
+ if (err)
+ return err;
+
+ rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+ if (IS_ERR(rockchip->vpcie12v)) {
+ if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie12v regulator found\n");
+ }
+
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie3v3 regulator found\n");
+ }
+
+ rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8)) {
+ if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie1v8 regulator found\n");
+ }
+
+ rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9)) {
+ if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie0v9 regulator found\n");
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie12v)) {
+ err = regulator_enable(rockchip->vpcie12v);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie12v regulator\n");
+ goto err_out;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie3v3)) {
+ err = regulator_enable(rockchip->vpcie3v3);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie3v3 regulator\n");
+ goto err_disable_12v;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie1v8)) {
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
+ }
+ }
+
+ return 0;
+
+err_disable_1v8:
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+err_disable_3v3:
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+err_disable_12v:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+err_out:
+ return err;
+}
+
+static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
+ (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
+ PCIE_CORE_INT_MASK);
+
+ rockchip_pcie_enable_bw_int(rockchip);
+}
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct device_node *intc = of_get_next_child(dev->of_node, NULL);
+
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
+ int region_no, int type, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ob_addr_0;
+ u32 ob_addr_1;
+ u32 ob_desc_0;
+ u32 aw_offset;
+
+ if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < 8)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+ if (region_no == 0) {
+ if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+ if (region_no != 0) {
+ if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+
+ aw_offset = (region_no << OB_REG_SIZE_SHIFT);
+
+ ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
+ ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
+ ob_addr_1 = upper_addr;
+ ob_desc_0 = (1 << 23 | type);
+
+ rockchip_pcie_write(rockchip, ob_addr_0,
+ PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_addr_1,
+ PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_desc_0,
+ PCIE_CORE_OB_REGION_DESC0 + aw_offset);
+ rockchip_pcie_write(rockchip, 0,
+ PCIE_CORE_OB_REGION_DESC1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
+ int region_no, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ib_addr_0;
+ u32 ib_addr_1;
+ u32 aw_offset;
+
+ if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+
+ aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
+
+ ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
+ ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
+ ib_addr_1 = upper_addr;
+
+ rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int offset;
+ int err;
+ int reg_no;
+
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+
+ for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ rockchip->mem_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ return err;
+ }
+
+ offset = rockchip->mem_size >> 20;
+ for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ rockchip->io_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ /* assign message regions */
+ rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
+ AXI_WRAPPER_NOR_MSG,
+ 20 - 1, 0, 0);
+
+ rockchip->msg_bus_addr = rockchip->mem_bus_addr +
+ ((reg_no + offset) << 20);
+ return err;
+}
+
+static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
+{
+ u32 value;
+ int err;
+
+ /* send PME_TURN_OFF message */
+ writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+
+ /* read LTSSM and wait for falling into L2 link state */
+ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
+ value, PCIE_LINK_IS_L2(value), 20,
+ jiffies_to_usecs(5 * HZ));
+ if (err) {
+ dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
+{
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ int ret;
+
+ /* disable core and cli int since we don't need to ack PME_ACK */
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
+ PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
+
+ ret = rockchip_pcie_wait_l2(rockchip);
+ if (ret) {
+ rockchip_pcie_enable_interrupts(rockchip);
+ return ret;
+ }
+
+ rockchip_pcie_deinit_phys(rockchip);
+
+ rockchip_pcie_disable_clocks(rockchip);
+
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
+ return ret;
+}
+
+static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
+{
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ goto err_disable_0v9;
+
+ err = rockchip_pcie_host_init_port(rockchip);
+ if (err)
+ goto err_pcie_resume;
+
+ err = rockchip_pcie_cfg_atu(rockchip);
+ if (err)
+ goto err_err_deinit_port;
+
+ /* Need this to enter L1 again */
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ return 0;
+
+err_err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_pcie_resume:
+ rockchip_pcie_disable_clocks(rockchip);
+err_disable_0v9:
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+ return err;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct rockchip_pcie *rockchip;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *win;
+ resource_size_t io_base;
+ struct resource *mem;
+ struct resource *io;
+ int err;
+
+ LIST_HEAD(res);
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
+ if (!bridge)
+ return -ENOMEM;
+
+ rockchip = pci_host_bridge_priv(bridge);
+
+ platform_set_drvdata(pdev, rockchip);
+ rockchip->dev = dev;
+ rockchip->is_rc = true;
+
+ err = rockchip_pcie_parse_host_dt(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_set_vpcie(rockchip);
+ if (err) {
+ dev_err(dev, "failed to set vpcie regulator\n");
+ goto err_set_vpcie;
+ }
+
+ err = rockchip_pcie_host_init_port(rockchip);
+ if (err)
+ goto err_vpcie;
+
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ err = rockchip_pcie_init_irq_domain(rockchip);
+ if (err < 0)
+ goto err_deinit_port;
+
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+ &res, &io_base);
+ if (err)
+ goto err_remove_irq_domain;
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto err_free_res;
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "I/O";
+ rockchip->io_size = resource_size(io);
+ rockchip->io_bus_addr = io->start - win->offset;
+ err = pci_remap_iospace(io, io_base);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, io);
+ continue;
+ }
+ rockchip->io = io;
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ mem->name = "MEM";
+ rockchip->mem_size = resource_size(mem);
+ rockchip->mem_bus_addr = mem->start - win->offset;
+ break;
+ case IORESOURCE_BUS:
+ rockchip->root_bus_nr = win->res->start;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ err = rockchip_pcie_cfg_atu(rockchip);
+ if (err)
+ goto err_unmap_iospace;
+
+ rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
+ if (!rockchip->msg_region) {
+ err = -ENOMEM;
+ goto err_unmap_iospace;
+ }
+
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = rockchip;
+ bridge->busnr = 0;
+ bridge->ops = &rockchip_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err < 0)
+ goto err_unmap_iospace;
+
+ bus = bridge->bus;
+
+ rockchip->root_bus = bus;
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+ return 0;
+
+err_unmap_iospace:
+ pci_unmap_iospace(rockchip->io);
+err_free_res:
+ pci_free_resource_list(&res);
+err_remove_irq_domain:
+ irq_domain_remove(rockchip->irq_domain);
+err_deinit_port:
+ rockchip_pcie_deinit_phys(rockchip);
+err_vpcie:
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+err_set_vpcie:
+ rockchip_pcie_disable_clocks(rockchip);
+ return err;
+}
+
+static int rockchip_pcie_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+
+ pci_stop_root_bus(rockchip->root_bus);
+ pci_remove_root_bus(rockchip->root_bus);
+ pci_unmap_iospace(rockchip->io);
+ irq_domain_remove(rockchip->irq_domain);
+
+ rockchip_pcie_deinit_phys(rockchip);
+
+ rockchip_pcie_disable_clocks(rockchip);
+
+ if (!IS_ERR(rockchip->vpcie12v))
+ regulator_disable(rockchip->vpcie12v);
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
+ return 0;
+}
+
+static const struct dev_pm_ops rockchip_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+ rockchip_pcie_resume_noirq)
+};
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .pm = &rockchip_pcie_pm_ops,
+ },
+ .probe = rockchip_pcie_probe,
+ .remove = rockchip_pcie_remove,
+};
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Rockchip Inc");
+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index f1e8f97ea1fb1..c53d1322a3d6c 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -11,535 +11,154 @@
* ARM PCI Host generic driver.
*/
-#include <linux/bitrev.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/iopoll.h>
-#include <linux/irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_pci.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <linux/regmap.h>
-/*
- * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
- * bits. This allows atomic updates of the register without locking.
- */
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
-
-#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
-#define MAX_LANE_NUM 4
-
-#define PCIE_CLIENT_BASE 0x0
-#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
-#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
-#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
-#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
-#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
-#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
-#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
-#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
-#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
-#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
-#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
-#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
-#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
-#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
-#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
-#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
-#define PCIE_CLIENT_INTR_SHIFT 5
-#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
-#define PCIE_CLIENT_INT_MSG BIT(14)
-#define PCIE_CLIENT_INT_HOT_RST BIT(13)
-#define PCIE_CLIENT_INT_DPA BIT(12)
-#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
-#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
-#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
-#define PCIE_CLIENT_INT_INTD BIT(8)
-#define PCIE_CLIENT_INT_INTC BIT(7)
-#define PCIE_CLIENT_INT_INTB BIT(6)
-#define PCIE_CLIENT_INT_INTA BIT(5)
-#define PCIE_CLIENT_INT_LOCAL BIT(4)
-#define PCIE_CLIENT_INT_UDMA BIT(3)
-#define PCIE_CLIENT_INT_PHY BIT(2)
-#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
-#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
-
-#define PCIE_CLIENT_INT_LEGACY \
- (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
- PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
-
-#define PCIE_CLIENT_INT_CLI \
- (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
- PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
- PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
- PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
- PCIE_CLIENT_INT_PHY)
-
-#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
-#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
-#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
-#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
-#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
-#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
-#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
-#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
-#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
-#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
-#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
-#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
- (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
-#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
-#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
-#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
-#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
-#define PCIE_CORE_INT_PRFPE BIT(0)
-#define PCIE_CORE_INT_CRFPE BIT(1)
-#define PCIE_CORE_INT_RRPE BIT(2)
-#define PCIE_CORE_INT_PRFO BIT(3)
-#define PCIE_CORE_INT_CRFO BIT(4)
-#define PCIE_CORE_INT_RT BIT(5)
-#define PCIE_CORE_INT_RTR BIT(6)
-#define PCIE_CORE_INT_PE BIT(7)
-#define PCIE_CORE_INT_MTR BIT(8)
-#define PCIE_CORE_INT_UCR BIT(9)
-#define PCIE_CORE_INT_FCE BIT(10)
-#define PCIE_CORE_INT_CT BIT(11)
-#define PCIE_CORE_INT_UTC BIT(18)
-#define PCIE_CORE_INT_MMVC BIT(19)
-#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
-#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
-#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
-
-#define PCIE_CORE_INT \
- (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
- PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
- PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
- PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
- PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
- PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
- PCIE_CORE_INT_MMVC)
-
-#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
-#define PCIE_RC_CONFIG_BASE 0xa00000
-#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
-#define PCIE_RC_CONFIG_SCC_SHIFT 16
-#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
-#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
-#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
-#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
-#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
-#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
-#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
-#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
-#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
-#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
-#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
-#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
-#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
-
-#define PCIE_CORE_AXI_CONF_BASE 0xc00000
-#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
-#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
-#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
-#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
-#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
-#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
-
-#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
-#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
-#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
-#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
-#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
-
-/* Size of one AXI Region (not Region 0) */
-#define AXI_REGION_SIZE BIT(20)
-/* Size of Region 0, equal to sum of sizes of other regions */
-#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
-#define OB_REG_SIZE_SHIFT 5
-#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
-#define AXI_WRAPPER_IO_WRITE 0x6
-#define AXI_WRAPPER_MEM_WRITE 0x2
-#define AXI_WRAPPER_TYPE0_CFG 0xa
-#define AXI_WRAPPER_TYPE1_CFG 0xb
-#define AXI_WRAPPER_NOR_MSG 0xc
-
-#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
-#define MIN_AXI_ADDR_BITS_PASSED 8
-#define PCIE_RC_SEND_PME_OFF 0x11960
-#define ROCKCHIP_VENDOR_ID 0x1d87
-#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
-#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
-#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
-#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
-#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
- (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
- PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
-#define PCIE_LINK_IS_L2(x) \
- (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
-#define PCIE_LINK_UP(x) \
- (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
-#define PCIE_LINK_IS_GEN2(x) \
- (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
-
-#define RC_REGION_0_ADDR_TRANS_H 0x00000000
-#define RC_REGION_0_ADDR_TRANS_L 0x00000000
-#define RC_REGION_0_PASS_BITS (25 - 1)
-#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
-#define MAX_AXI_WRAPPER_REGION_NUM 33
-
-struct rockchip_pcie {
- void __iomem *reg_base; /* DT axi-base */
- void __iomem *apb_base; /* DT apb-base */
- bool legacy_phy;
- struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
- struct regulator *vpcie12v; /* 12V power supply */
- struct regulator *vpcie3v3; /* 3.3V power supply */
- struct regulator *vpcie1v8; /* 1.8V power supply */
- struct regulator *vpcie0v9; /* 0.9V power supply */
- struct gpio_desc *ep_gpio;
- u32 lanes;
- u8 lanes_map;
- u8 root_bus_nr;
- int link_gen;
- struct device *dev;
- struct irq_domain *irq_domain;
- int offset;
- struct pci_bus *root_bus;
- struct resource *io;
- phys_addr_t io_bus_addr;
- u32 io_size;
- void __iomem *msg_region;
- u32 mem_size;
- phys_addr_t msg_bus_addr;
- phys_addr_t mem_bus_addr;
-};
-
-static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
-{
- return readl(rockchip->apb_base + reg);
-}
-
-static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
- u32 reg)
-{
- writel(val, rockchip->apb_base + reg);
-}
-
-static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
-{
- u32 status;
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-}
-
-static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
-{
- u32 status;
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-}
-
-static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
-{
- u32 val;
-
- /* Update Tx credit maximum update interval */
- val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
- val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
- val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
- rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
-}
+#include "../pci.h"
+#include "pcie-rockchip.h"
-static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, int dev)
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
{
- /* access only one slot on each root port */
- if (bus->number == rockchip->root_bus_nr && dev > 0)
- return 0;
-
- /*
- * do not read more than one device on the bus directly attached
- * to RC's downstream side.
- */
- if (bus->primary == rockchip->root_bus_nr && dev > 0)
- return 0;
-
- return 1;
-}
-
-static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
-{
- u32 val;
- u8 map;
-
- if (rockchip->legacy_phy)
- return GENMASK(MAX_LANE_NUM - 1, 0);
-
- val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
- map = val & PCIE_CORE_LANE_MAP_MASK;
-
- /* The link may be using a reverse-indexed mapping. */
- if (val & PCIE_CORE_LANE_MAP_REVERSE)
- map = bitrev8(map) >> 4;
-
- return map;
-}
-
-static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
- int where, int size, u32 *val)
-{
- void __iomem *addr;
-
- addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
-
- if (!IS_ALIGNED((uintptr_t)addr, size)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
+ struct resource *regs;
+ int err;
- if (size == 4) {
- *val = readl(addr);
- } else if (size == 2) {
- *val = readw(addr);
- } else if (size == 1) {
- *val = readb(addr);
+ if (rockchip->is_rc) {
+ regs = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "axi-base");
+ rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
+ if (IS_ERR(rockchip->reg_base))
+ return PTR_ERR(rockchip->reg_base);
} else {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->mem_res =
+ platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mem-base");
+ if (!rockchip->mem_res)
+ return -EINVAL;
}
- return PCIBIOS_SUCCESSFUL;
-}
-static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
- int where, int size, u32 val)
-{
- u32 mask, tmp, offset;
- void __iomem *addr;
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "apb-base");
+ rockchip->apb_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
- offset = where & ~0x3;
- addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+ err = rockchip_pcie_get_phys(rockchip);
+ if (err)
+ return err;
- if (size == 4) {
- writel(val, addr);
- return PCIBIOS_SUCCESSFUL;
+ rockchip->lanes = 1;
+ err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
+ if (!err && (rockchip->lanes == 0 ||
+ rockchip->lanes == 3 ||
+ rockchip->lanes > 4)) {
+ dev_warn(dev, "invalid num-lanes, default to use one lane\n");
+ rockchip->lanes = 1;
}
- mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
-
- /*
- * N.B. This read/modify/write isn't safe in general because it can
- * corrupt RW1C bits in adjacent registers. But the hardware
- * doesn't support smaller writes.
- */
- tmp = readl(addr) & mask;
- tmp |= val << ((where & 0x3) * 8);
- writel(tmp, addr);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static void rockchip_pcie_cfg_configuration_accesses(
- struct rockchip_pcie *rockchip, u32 type)
-{
- u32 ob_desc_0;
-
- /* Configuration Accesses for region 0 */
- rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
-
- rockchip_pcie_write(rockchip,
- (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
- PCIE_CORE_OB_REGION_ADDR0);
- rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
- PCIE_CORE_OB_REGION_ADDR1);
- ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
- ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
- ob_desc_0 |= (type | (0x1 << 23));
- rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
- rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
-}
-
-static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, u32 devfn,
- int where, int size, u32 *val)
-{
- u32 busdev;
-
- busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
+ rockchip->link_gen = of_pci_get_max_link_speed(node);
+ if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+ rockchip->link_gen = 2;
- if (!IS_ALIGNED(busdev, size)) {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(rockchip->core_rst)) {
+ if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing core reset property in node\n");
+ return PTR_ERR(rockchip->core_rst);
}
- if (bus->parent->number == rockchip->root_bus_nr)
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
- else
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE1_CFG);
-
- if (size == 4) {
- *val = readl(rockchip->reg_base + busdev);
- } else if (size == 2) {
- *val = readw(rockchip->reg_base + busdev);
- } else if (size == 1) {
- *val = readb(rockchip->reg_base + busdev);
- } else {
- *val = 0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
+ rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
+ if (IS_ERR(rockchip->mgmt_rst)) {
+ if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_rst);
}
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
- struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
-{
- u32 busdev;
-
- busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
- if (!IS_ALIGNED(busdev, size))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (bus->parent->number == rockchip->root_bus_nr)
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
- else
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE1_CFG);
-
- if (size == 4)
- writel(val, rockchip->reg_base + busdev);
- else if (size == 2)
- writew(val, rockchip->reg_base + busdev);
- else if (size == 1)
- writeb(val, rockchip->reg_base + busdev);
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- return PCIBIOS_SUCCESSFUL;
-}
-static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
- int size, u32 *val)
-{
- struct rockchip_pcie *rockchip = bus->sysdata;
-
- if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
+ rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+ "mgmt-sticky");
+ if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+ if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt-sticky reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_sticky_rst);
}
- if (bus->number == rockchip->root_bus_nr)
- return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
-
- return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val);
-}
+ rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(rockchip->pipe_rst)) {
+ if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pipe reset property in node\n");
+ return PTR_ERR(rockchip->pipe_rst);
+ }
-static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
-{
- struct rockchip_pcie *rockchip = bus->sysdata;
+ rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
+ if (IS_ERR(rockchip->pm_rst)) {
+ if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pm reset property in node\n");
+ return PTR_ERR(rockchip->pm_rst);
+ }
- if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
+ rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
+ if (IS_ERR(rockchip->pclk_rst)) {
+ if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pclk reset property in node\n");
+ return PTR_ERR(rockchip->pclk_rst);
+ }
- if (bus->number == rockchip->root_bus_nr)
- return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+ rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_rst)) {
+ if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing aclk reset property in node\n");
+ return PTR_ERR(rockchip->aclk_rst);
+ }
- return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val);
-}
+ if (rockchip->is_rc) {
+ rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->ep_gpio)) {
+ dev_err(dev, "missing ep-gpios property in node\n");
+ return PTR_ERR(rockchip->ep_gpio);
+ }
+ }
-static struct pci_ops rockchip_pcie_ops = {
- .read = rockchip_pcie_rd_conf,
- .write = rockchip_pcie_wr_conf,
-};
+ rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_pcie)) {
+ dev_err(dev, "aclk clock not found\n");
+ return PTR_ERR(rockchip->aclk_pcie);
+ }
-static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
-{
- int curr;
- u32 status, scale, power;
+ rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
+ if (IS_ERR(rockchip->aclk_perf_pcie)) {
+ dev_err(dev, "aclk_perf clock not found\n");
+ return PTR_ERR(rockchip->aclk_perf_pcie);
+ }
- if (IS_ERR(rockchip->vpcie3v3))
- return;
+ rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
+ if (IS_ERR(rockchip->hclk_pcie)) {
+ dev_err(dev, "hclk clock not found\n");
+ return PTR_ERR(rockchip->hclk_pcie);
+ }
- /*
- * Set RC's captured slot power limit and scale if
- * vpcie3v3 available. The default values are both zero
- * which means the software should set these two according
- * to the actual power supply.
- */
- curr = regulator_get_current_limit(rockchip->vpcie3v3);
- if (curr <= 0)
- return;
-
- scale = 3; /* 0.001x */
- curr = curr / 1000; /* convert to mA */
- power = (curr * 3300) / 1000; /* milliwatt */
- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
- if (!scale) {
- dev_warn(rockchip->dev, "invalid power supply\n");
- return;
- }
- scale--;
- power = power / 10;
+ rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
+ if (IS_ERR(rockchip->clk_pcie_pm)) {
+ dev_err(dev, "pm clock not found\n");
+ return PTR_ERR(rockchip->clk_pcie_pm);
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
-/**
- * rockchip_pcie_init_port - Initialize hardware
- * @rockchip: PCIe port information
- */
-static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err, i;
- u32 status;
-
- gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+ u32 regs;
err = reset_control_assert(rockchip->aclk_rst);
if (err) {
@@ -618,13 +237,15 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
PCIE_CLIENT_CONFIG);
- rockchip_pcie_write(rockchip,
- PCIE_CLIENT_CONF_ENABLE |
- PCIE_CLIENT_LINK_TRAIN_ENABLE |
- PCIE_CLIENT_ARI_ENABLE |
- PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
- PCIE_CLIENT_MODE_RC,
- PCIE_CLIENT_CONFIG);
+ regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+ PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
+
+ if (rockchip->is_rc)
+ regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+ else
+ regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
+
+ rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG);
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_power_on(rockchip->phys[i]);
@@ -662,93 +283,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /* Fix the transmitted FTS count desired to exit from L0s. */
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
- status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
- (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
-
- rockchip_pcie_set_power_limit(rockchip);
-
- /* Set RC's clock architecture as common clock */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKSTA_SLC << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- /* Set RC's RCB to 128 */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKCTL_RCB;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- /* Enable Gen1 training */
- rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
- PCIE_CLIENT_CONFIG);
-
- gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
-
- /* 500ms timeout value should be enough for Gen1/2 training */
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
- status, PCIE_LINK_UP(status), 20,
- 500 * USEC_PER_MSEC);
- if (err) {
- dev_err(dev, "PCIe link training gen1 timeout!\n");
- goto err_power_off_phy;
- }
-
- if (rockchip->link_gen == 2) {
- /*
- * Enable retrain for gen2. This should be configured only after
- * gen1 finished.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKCTL_RL;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
-
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
- status, PCIE_LINK_IS_GEN2(status), 20,
- 500 * USEC_PER_MSEC);
- if (err)
- dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
- }
-
- /* Check the final link width from negotiated lane counter from MGMT */
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
- PCIE_CORE_PL_CONF_LANE_SHIFT);
- dev_dbg(dev, "current link width is x%d\n", status);
-
- /* Power off unused lane(s) */
- rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
- for (i = 0; i < MAX_LANE_NUM; i++) {
- if (!(rockchip->lanes_map & BIT(i))) {
- dev_dbg(dev, "idling lane %d\n", i);
- phy_power_off(rockchip->phys[i]);
- }
- }
-
- rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
- PCIE_CORE_CONFIG_VENDOR);
- rockchip_pcie_write(rockchip,
- PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
- PCIE_RC_CONFIG_RID_CCR);
-
- /* Clear THP cap's next cap pointer to remove L1 substate cap */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
- status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
-
- /* Clear L0s from RC's link cap */
- if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
- status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
- }
-
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
- status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
- status |= PCIE_RC_CONFIG_DCSR_MPS_256;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
-
return 0;
err_power_off_phy:
while (i--)
@@ -759,156 +293,9 @@ err_exit_phy:
phy_exit(rockchip->phys[i]);
return err;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_init_port);
-static void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
-{
- int i;
-
- for (i = 0; i < MAX_LANE_NUM; i++) {
- /* inactive lanes are already powered off */
- if (rockchip->lanes_map & BIT(i))
- phy_power_off(rockchip->phys[i]);
- phy_exit(rockchip->phys[i]);
- }
-}
-
-static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
-{
- struct rockchip_pcie *rockchip = arg;
- struct device *dev = rockchip->dev;
- u32 reg;
- u32 sub_reg;
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- if (reg & PCIE_CLIENT_INT_LOCAL) {
- dev_dbg(dev, "local interrupt received\n");
- sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
- if (sub_reg & PCIE_CORE_INT_PRFPE)
- dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_CRFPE)
- dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_RRPE)
- dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
-
- if (sub_reg & PCIE_CORE_INT_PRFO)
- dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
-
- if (sub_reg & PCIE_CORE_INT_CRFO)
- dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
-
- if (sub_reg & PCIE_CORE_INT_RT)
- dev_dbg(dev, "replay timer timed out\n");
-
- if (sub_reg & PCIE_CORE_INT_RTR)
- dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
-
- if (sub_reg & PCIE_CORE_INT_PE)
- dev_dbg(dev, "phy error detected on receive side\n");
-
- if (sub_reg & PCIE_CORE_INT_MTR)
- dev_dbg(dev, "malformed TLP received from the link\n");
-
- if (sub_reg & PCIE_CORE_INT_UCR)
- dev_dbg(dev, "malformed TLP received from the link\n");
-
- if (sub_reg & PCIE_CORE_INT_FCE)
- dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
-
- if (sub_reg & PCIE_CORE_INT_CT)
- dev_dbg(dev, "a request timed out waiting for completion\n");
-
- if (sub_reg & PCIE_CORE_INT_UTC)
- dev_dbg(dev, "unmapped TC error\n");
-
- if (sub_reg & PCIE_CORE_INT_MMVC)
- dev_dbg(dev, "MSI mask register changes\n");
-
- rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
- } else if (reg & PCIE_CLIENT_INT_PHY) {
- dev_dbg(dev, "phy link changes\n");
- rockchip_pcie_update_txcredit_mui(rockchip);
- rockchip_pcie_clr_bw_int(rockchip);
- }
-
- rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
- PCIE_CLIENT_INT_STATUS);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
-{
- struct rockchip_pcie *rockchip = arg;
- struct device *dev = rockchip->dev;
- u32 reg;
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
- dev_dbg(dev, "legacy done interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_MSG)
- dev_dbg(dev, "message done interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_HOT_RST)
- dev_dbg(dev, "hot reset interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_DPA)
- dev_dbg(dev, "dpa interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_FATAL_ERR)
- dev_dbg(dev, "fatal error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
- dev_dbg(dev, "no fatal error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_CORR_ERR)
- dev_dbg(dev, "correctable error interrupt received\n");
-
- if (reg & PCIE_CLIENT_INT_PHY)
- dev_dbg(dev, "phy interrupt received\n");
-
- rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
- PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
- PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
- PCIE_CLIENT_INT_NFATAL_ERR |
- PCIE_CLIENT_INT_CORR_ERR |
- PCIE_CLIENT_INT_PHY),
- PCIE_CLIENT_INT_STATUS);
-
- return IRQ_HANDLED;
-}
-
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
- struct device *dev = rockchip->dev;
- u32 reg;
- u32 hwirq;
- u32 virq;
-
- chained_irq_enter(chip, desc);
-
- reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
- reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
-
- while (reg) {
- hwirq = ffs(reg) - 1;
- reg &= ~BIT(hwirq);
-
- virq = irq_find_mapping(rockchip->irq_domain, hwirq);
- if (virq)
- generic_handle_irq(virq);
- else
- dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
struct phy *phy;
@@ -948,452 +335,22 @@ static int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys);
-static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
{
- int irq, err;
- struct device *dev = rockchip->dev;
- struct platform_device *pdev = to_platform_device(dev);
-
- irq = platform_get_irq_byname(pdev, "sys");
- if (irq < 0) {
- dev_err(dev, "missing sys IRQ resource\n");
- return irq;
- }
-
- err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
- IRQF_SHARED, "pcie-sys", rockchip);
- if (err) {
- dev_err(dev, "failed to request PCIe subsystem IRQ\n");
- return err;
- }
-
- irq = platform_get_irq_byname(pdev, "legacy");
- if (irq < 0) {
- dev_err(dev, "missing legacy IRQ resource\n");
- return irq;
- }
-
- irq_set_chained_handler_and_data(irq,
- rockchip_pcie_legacy_int_handler,
- rockchip);
-
- irq = platform_get_irq_byname(pdev, "client");
- if (irq < 0) {
- dev_err(dev, "missing client IRQ resource\n");
- return irq;
- }
-
- err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
- IRQF_SHARED, "pcie-client", rockchip);
- if (err) {
- dev_err(dev, "failed to request PCIe client IRQ\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * rockchip_pcie_parse_dt - Parse Device Tree
- * @rockchip: PCIe port information
- *
- * Return: '0' on success and error value on failure
- */
-static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node;
- struct resource *regs;
- int err;
-
- regs = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- "axi-base");
- rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
- if (IS_ERR(rockchip->reg_base))
- return PTR_ERR(rockchip->reg_base);
-
- regs = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- "apb-base");
- rockchip->apb_base = devm_ioremap_resource(dev, regs);
- if (IS_ERR(rockchip->apb_base))
- return PTR_ERR(rockchip->apb_base);
-
- err = rockchip_pcie_get_phys(rockchip);
- if (err)
- return err;
-
- rockchip->lanes = 1;
- err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
- if (!err && (rockchip->lanes == 0 ||
- rockchip->lanes == 3 ||
- rockchip->lanes > 4)) {
- dev_warn(dev, "invalid num-lanes, default to use one lane\n");
- rockchip->lanes = 1;
- }
-
- rockchip->link_gen = of_pci_get_max_link_speed(node);
- if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
- rockchip->link_gen = 2;
-
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
-
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
-
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
-
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
-
- rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
- if (IS_ERR(rockchip->ep_gpio)) {
- dev_err(dev, "missing ep-gpios property in node\n");
- return PTR_ERR(rockchip->ep_gpio);
- }
-
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
-
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
-
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
-
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
-
- err = rockchip_pcie_setup_irq(rockchip);
- if (err)
- return err;
-
- rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
- if (IS_ERR(rockchip->vpcie12v)) {
- if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie12v regulator found\n");
- }
-
- rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
- if (IS_ERR(rockchip->vpcie3v3)) {
- if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie3v3 regulator found\n");
- }
-
- rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
- if (IS_ERR(rockchip->vpcie1v8)) {
- if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie1v8 regulator found\n");
- }
-
- rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
- if (IS_ERR(rockchip->vpcie0v9)) {
- if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(dev, "no vpcie0v9 regulator found\n");
- }
-
- return 0;
-}
-
-static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- int err;
-
- if (!IS_ERR(rockchip->vpcie12v)) {
- err = regulator_enable(rockchip->vpcie12v);
- if (err) {
- dev_err(dev, "fail to enable vpcie12v regulator\n");
- goto err_out;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie3v3)) {
- err = regulator_enable(rockchip->vpcie3v3);
- if (err) {
- dev_err(dev, "fail to enable vpcie3v3 regulator\n");
- goto err_disable_12v;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie1v8)) {
- err = regulator_enable(rockchip->vpcie1v8);
- if (err) {
- dev_err(dev, "fail to enable vpcie1v8 regulator\n");
- goto err_disable_3v3;
- }
- }
-
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- goto err_disable_1v8;
- }
- }
-
- return 0;
-
-err_disable_1v8:
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
-err_disable_3v3:
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
-err_disable_12v:
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
-err_out:
- return err;
-}
-
-static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
-{
- rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
- (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
- rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
- PCIE_CORE_INT_MASK);
-
- rockchip_pcie_enable_bw_int(rockchip);
-}
-
-static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
- .map = rockchip_pcie_intx_map,
-};
-
-static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- struct device_node *intc = of_get_next_child(dev->of_node, NULL);
-
- if (!intc) {
- dev_err(dev, "missing child interrupt-controller node\n");
- return -EINVAL;
- }
-
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
- if (!rockchip->irq_domain) {
- dev_err(dev, "failed to get a INTx IRQ domain\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
- int region_no, int type, u8 num_pass_bits,
- u32 lower_addr, u32 upper_addr)
-{
- u32 ob_addr_0;
- u32 ob_addr_1;
- u32 ob_desc_0;
- u32 aw_offset;
-
- if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
- return -EINVAL;
- if (num_pass_bits + 1 < 8)
- return -EINVAL;
- if (num_pass_bits > 63)
- return -EINVAL;
- if (region_no == 0) {
- if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
- return -EINVAL;
- }
- if (region_no != 0) {
- if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
- return -EINVAL;
- }
-
- aw_offset = (region_no << OB_REG_SIZE_SHIFT);
-
- ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
- ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
- ob_addr_1 = upper_addr;
- ob_desc_0 = (1 << 23 | type);
-
- rockchip_pcie_write(rockchip, ob_addr_0,
- PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
- rockchip_pcie_write(rockchip, ob_addr_1,
- PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
- rockchip_pcie_write(rockchip, ob_desc_0,
- PCIE_CORE_OB_REGION_DESC0 + aw_offset);
- rockchip_pcie_write(rockchip, 0,
- PCIE_CORE_OB_REGION_DESC1 + aw_offset);
-
- return 0;
-}
-
-static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
- int region_no, u8 num_pass_bits,
- u32 lower_addr, u32 upper_addr)
-{
- u32 ib_addr_0;
- u32 ib_addr_1;
- u32 aw_offset;
-
- if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
- return -EINVAL;
- if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
- return -EINVAL;
- if (num_pass_bits > 63)
- return -EINVAL;
-
- aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
-
- ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
- ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
- ib_addr_1 = upper_addr;
-
- rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
- rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
-
- return 0;
-}
-
-static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->dev;
- int offset;
- int err;
- int reg_no;
-
- rockchip_pcie_cfg_configuration_accesses(rockchip,
- AXI_WRAPPER_TYPE0_CFG);
-
- for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
- AXI_WRAPPER_MEM_WRITE,
- 20 - 1,
- rockchip->mem_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC mem outbound ATU failed\n");
- return err;
- }
- }
-
- err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
- if (err) {
- dev_err(dev, "program RC mem inbound ATU failed\n");
- return err;
- }
-
- offset = rockchip->mem_size >> 20;
- for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip,
- reg_no + 1 + offset,
- AXI_WRAPPER_IO_WRITE,
- 20 - 1,
- rockchip->io_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC io outbound ATU failed\n");
- return err;
- }
- }
-
- /* assign message regions */
- rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
- AXI_WRAPPER_NOR_MSG,
- 20 - 1, 0, 0);
-
- rockchip->msg_bus_addr = rockchip->mem_bus_addr +
- ((reg_no + offset) << 20);
- return err;
-}
-
-static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
-{
- u32 value;
- int err;
-
- /* send PME_TURN_OFF message */
- writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+ int i;
- /* read LTSSM and wait for falling into L2 link state */
- err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
- value, PCIE_LINK_IS_L2(value), 20,
- jiffies_to_usecs(5 * HZ));
- if (err) {
- dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
- return err;
+ for (i = 0; i < MAX_LANE_NUM; i++) {
+ /* inactive lanes are already powered off */
+ if (rockchip->lanes_map & BIT(i))
+ phy_power_off(rockchip->phys[i]);
+ phy_exit(rockchip->phys[i]);
}
-
- return 0;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys);
-static int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int err;
@@ -1432,8 +389,9 @@ err_aclk_perf_pcie:
clk_disable_unprepare(rockchip->aclk_pcie);
return err;
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-static void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(void *data)
{
struct rockchip_pcie *rockchip = data;
@@ -1442,267 +400,25 @@ static void rockchip_pcie_disable_clocks(void *data)
clk_disable_unprepare(rockchip->aclk_perf_pcie);
clk_disable_unprepare(rockchip->aclk_pcie);
}
+EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
-static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
-{
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
- int ret;
-
- /* disable core and cli int since we don't need to ack PME_ACK */
- rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
- PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
- rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
-
- ret = rockchip_pcie_wait_l2(rockchip);
- if (ret) {
- rockchip_pcie_enable_interrupts(rockchip);
- return ret;
- }
-
- rockchip_pcie_deinit_phys(rockchip);
-
- rockchip_pcie_disable_clocks(rockchip);
-
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
-
- return ret;
-}
-
-static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
-{
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
- int err;
-
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- return err;
- }
- }
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- goto err_disable_0v9;
-
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_pcie_resume;
-
- err = rockchip_pcie_cfg_atu(rockchip);
- if (err)
- goto err_err_deinit_port;
-
- /* Need this to enter L1 again */
- rockchip_pcie_update_txcredit_mui(rockchip);
- rockchip_pcie_enable_interrupts(rockchip);
-
- return 0;
-
-err_err_deinit_port:
- rockchip_pcie_deinit_phys(rockchip);
-err_pcie_resume:
- rockchip_pcie_disable_clocks(rockchip);
-err_disable_0v9:
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
- return err;
-}
-
-static int rockchip_pcie_probe(struct platform_device *pdev)
-{
- struct rockchip_pcie *rockchip;
- struct device *dev = &pdev->dev;
- struct pci_bus *bus, *child;
- struct pci_host_bridge *bridge;
- struct resource_entry *win;
- resource_size_t io_base;
- struct resource *mem;
- struct resource *io;
- int err;
-
- LIST_HEAD(res);
-
- if (!dev->of_node)
- return -ENODEV;
-
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
- if (!bridge)
- return -ENOMEM;
-
- rockchip = pci_host_bridge_priv(bridge);
-
- platform_set_drvdata(pdev, rockchip);
- rockchip->dev = dev;
-
- err = rockchip_pcie_parse_dt(rockchip);
- if (err)
- return err;
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- return err;
-
- err = rockchip_pcie_set_vpcie(rockchip);
- if (err) {
- dev_err(dev, "failed to set vpcie regulator\n");
- goto err_set_vpcie;
- }
-
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_vpcie;
-
- rockchip_pcie_enable_interrupts(rockchip);
-
- err = rockchip_pcie_init_irq_domain(rockchip);
- if (err < 0)
- goto err_deinit_port;
-
- err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
- &res, &io_base);
- if (err)
- goto err_remove_irq_domain;
-
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto err_free_res;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "I/O";
- rockchip->io_size = resource_size(io);
- rockchip->io_bus_addr = io->start - win->offset;
- err = pci_remap_iospace(io, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, io);
- continue;
- }
- rockchip->io = io;
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "MEM";
- rockchip->mem_size = resource_size(mem);
- rockchip->mem_bus_addr = mem->start - win->offset;
- break;
- case IORESOURCE_BUS:
- rockchip->root_bus_nr = win->res->start;
- break;
- default:
- continue;
- }
- }
-
- err = rockchip_pcie_cfg_atu(rockchip);
- if (err)
- goto err_unmap_iospace;
-
- rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
- if (!rockchip->msg_region) {
- err = -ENOMEM;
- goto err_unmap_iospace;
- }
-
- list_splice_init(&res, &bridge->windows);
- bridge->dev.parent = dev;
- bridge->sysdata = rockchip;
- bridge->busnr = 0;
- bridge->ops = &rockchip_pcie_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
-
- err = pci_scan_root_bus_bridge(bridge);
- if (err < 0)
- goto err_unmap_iospace;
-
- bus = bridge->bus;
-
- rockchip->root_bus = bus;
-
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(bus);
- return 0;
-
-err_unmap_iospace:
- pci_unmap_iospace(rockchip->io);
-err_free_res:
- pci_free_resource_list(&res);
-err_remove_irq_domain:
- irq_domain_remove(rockchip->irq_domain);
-err_deinit_port:
- rockchip_pcie_deinit_phys(rockchip);
-err_vpcie:
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
-err_set_vpcie:
- rockchip_pcie_disable_clocks(rockchip);
- return err;
-}
-
-static int rockchip_pcie_remove(struct platform_device *pdev)
+void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type)
{
- struct device *dev = &pdev->dev;
- struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
-
- pci_stop_root_bus(rockchip->root_bus);
- pci_remove_root_bus(rockchip->root_bus);
- pci_unmap_iospace(rockchip->io);
- irq_domain_remove(rockchip->irq_domain);
-
- rockchip_pcie_deinit_phys(rockchip);
-
- rockchip_pcie_disable_clocks(rockchip);
+ u32 ob_desc_0;
- if (!IS_ERR(rockchip->vpcie12v))
- regulator_disable(rockchip->vpcie12v);
- if (!IS_ERR(rockchip->vpcie3v3))
- regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ /* Configuration Accesses for region 0 */
+ rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
- return 0;
+ rockchip_pcie_write(rockchip,
+ (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+ PCIE_CORE_OB_REGION_ADDR0);
+ rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+ PCIE_CORE_OB_REGION_ADDR1);
+ ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
+ ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
+ ob_desc_0 |= (type | (0x1 << 23));
+ rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
}
-
-static const struct dev_pm_ops rockchip_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
- rockchip_pcie_resume_noirq)
-};
-
-static const struct of_device_id rockchip_pcie_of_match[] = {
- { .compatible = "rockchip,rk3399-pcie", },
- {}
-};
-MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
-
-static struct platform_driver rockchip_pcie_driver = {
- .driver = {
- .name = "rockchip-pcie",
- .of_match_table = rockchip_pcie_of_match,
- .pm = &rockchip_pcie_pm_ops,
- },
- .probe = rockchip_pcie_probe,
- .remove = rockchip_pcie_remove,
-};
-module_platform_driver(rockchip_pcie_driver);
-
-MODULE_AUTHOR("Rockchip Inc");
-MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
-MODULE_LICENSE("GPL v2");
+EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
diff --git a/drivers/pci/host/pcie-rockchip.h b/drivers/pci/host/pcie-rockchip.h
new file mode 100644
index 0000000000000..8e87a059ce73d
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip.h
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *
+ */
+
+#ifndef _PCIE_ROCKCHIP_H
+#define _PCIE_ROCKCHIP_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
+ * bits. This allows atomic updates of the register without locking.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define MAX_LANE_NUM 4
+#define MAX_REGION_LIMIT 32
+#define MIN_EP_APERTURE 28
+
+#define PCIE_CLIENT_BASE 0x0
+#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
+#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
+#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
+#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
+#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
+#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
+#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
+#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
+#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
+#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
+#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
+#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
+#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
+#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
+#define PCIE_CLIENT_INTR_SHIFT 5
+#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
+#define PCIE_CLIENT_INT_MSG BIT(14)
+#define PCIE_CLIENT_INT_HOT_RST BIT(13)
+#define PCIE_CLIENT_INT_DPA BIT(12)
+#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
+#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
+#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
+#define PCIE_CLIENT_INT_INTD BIT(8)
+#define PCIE_CLIENT_INT_INTC BIT(7)
+#define PCIE_CLIENT_INT_INTB BIT(6)
+#define PCIE_CLIENT_INT_INTA BIT(5)
+#define PCIE_CLIENT_INT_LOCAL BIT(4)
+#define PCIE_CLIENT_INT_UDMA BIT(3)
+#define PCIE_CLIENT_INT_PHY BIT(2)
+#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
+#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
+
+#define PCIE_CLIENT_INT_LEGACY \
+ (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
+ PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CLIENT_INT_CLI \
+ (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
+ PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
+ PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
+ PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
+ PCIE_CLIENT_INT_PHY)
+
+#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
+#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
+#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
+#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
+#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
+#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
+#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
+#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
+#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
+#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
+ (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
+#define PCIE_CORE_LANE_MAP_MASK 0x0000000f
+#define PCIE_CORE_LANE_MAP_REVERSE BIT(16)
+#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
+#define PCIE_CORE_INT_PRFPE BIT(0)
+#define PCIE_CORE_INT_CRFPE BIT(1)
+#define PCIE_CORE_INT_RRPE BIT(2)
+#define PCIE_CORE_INT_PRFO BIT(3)
+#define PCIE_CORE_INT_CRFO BIT(4)
+#define PCIE_CORE_INT_RT BIT(5)
+#define PCIE_CORE_INT_RTR BIT(6)
+#define PCIE_CORE_INT_PE BIT(7)
+#define PCIE_CORE_INT_MTR BIT(8)
+#define PCIE_CORE_INT_UCR BIT(9)
+#define PCIE_CORE_INT_FCE BIT(10)
+#define PCIE_CORE_INT_CT BIT(11)
+#define PCIE_CORE_INT_UTC BIT(18)
+#define PCIE_CORE_INT_MMVC BIT(19)
+#define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44)
+#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
+#define PCIE_CORE_PHY_FUNC_CFG (PCIE_CORE_CTRL_MGMT_BASE + 0x2c0)
+#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED 0x0
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS 0x1
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS 0x4
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS 0x6
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
+
+#define PCIE_CORE_INT \
+ (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
+ PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
+ PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
+ PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
+ PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
+ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
+ PCIE_CORE_INT_MMVC)
+
+#define PCIE_RC_RP_ATS_BASE 0x400000
+#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
+#define PCIE_RC_CONFIG_BASE 0xa00000
+#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
+#define PCIE_RC_CONFIG_SCC_SHIFT 16
+#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
+#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
+#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
+#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
+#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
+#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
+#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
+#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
+#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
+#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
+#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
+
+#define PCIE_CORE_AXI_CONF_BASE 0xc00000
+#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
+#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
+#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
+#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
+
+#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
+#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+
+/* Size of one AXI Region (not Region 0) */
+#define AXI_REGION_SIZE BIT(20)
+/* Size of Region 0, equal to sum of sizes of other regions */
+#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
+#define OB_REG_SIZE_SHIFT 5
+#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
+#define AXI_WRAPPER_IO_WRITE 0x6
+#define AXI_WRAPPER_MEM_WRITE 0x2
+#define AXI_WRAPPER_TYPE0_CFG 0xa
+#define AXI_WRAPPER_TYPE1_CFG 0xb
+#define AXI_WRAPPER_NOR_MSG 0xc
+
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
+#define MIN_AXI_ADDR_BITS_PASSED 8
+#define PCIE_RC_SEND_PME_OFF 0x11960
+#define ROCKCHIP_VENDOR_ID 0x1d87
+#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
+#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
+#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
+#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
+#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
+ (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
+ PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
+#define PCIE_LINK_IS_L2(x) \
+ (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_UP(x) \
+ (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
+#define PCIE_LINK_IS_GEN2(x) \
+ (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
+
+#define RC_REGION_0_ADDR_TRANS_H 0x00000000
+#define RC_REGION_0_ADDR_TRANS_L 0x00000000
+#define RC_REGION_0_PASS_BITS (25 - 1)
+#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
+#define MAX_AXI_WRAPPER_REGION_NUM 33
+
+#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2
+#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3
+#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4
+#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27
+#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5)
+#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
+ (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
+#define ROCKCHIP_PCIE_MSG_CODE_MASK GENMASK(15, 8)
+#define ROCKCHIP_PCIE_MSG_CODE(code) \
+ (((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK)
+#define ROCKCHIP_PCIE_MSG_NO_DATA BIT(16)
+
+#define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4
+#define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24)
+#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
+#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
+#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+ (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ (((devfn) << 12) & \
+ ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+ (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
+ (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \
+ (PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+ (GENMASK(4, 0) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << ((b) * 8)) & \
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+ (GENMASK(7, 5) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 8 + 5)) & \
+ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+struct rockchip_pcie {
+ void __iomem *reg_base; /* DT axi-base */
+ void __iomem *apb_base; /* DT apb-base */
+ bool legacy_phy;
+ struct phy *phys[MAX_LANE_NUM];
+ struct reset_control *core_rst;
+ struct reset_control *mgmt_rst;
+ struct reset_control *mgmt_sticky_rst;
+ struct reset_control *pipe_rst;
+ struct reset_control *pm_rst;
+ struct reset_control *aclk_rst;
+ struct reset_control *pclk_rst;
+ struct clk *aclk_pcie;
+ struct clk *aclk_perf_pcie;
+ struct clk *hclk_pcie;
+ struct clk *clk_pcie_pm;
+ struct regulator *vpcie12v; /* 12V power supply */
+ struct regulator *vpcie3v3; /* 3.3V power supply */
+ struct regulator *vpcie1v8; /* 1.8V power supply */
+ struct regulator *vpcie0v9; /* 0.9V power supply */
+ struct gpio_desc *ep_gpio;
+ u32 lanes;
+ u8 lanes_map;
+ u8 root_bus_nr;
+ int link_gen;
+ struct device *dev;
+ struct irq_domain *irq_domain;
+ int offset;
+ struct pci_bus *root_bus;
+ struct resource *io;
+ phys_addr_t io_bus_addr;
+ u32 io_size;
+ void __iomem *msg_region;
+ u32 mem_size;
+ phys_addr_t msg_bus_addr;
+ phys_addr_t mem_bus_addr;
+ bool is_rc;
+ struct resource *mem_res;
+};
+
+static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
+{
+ return readl(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
+{
+ writel(val, rockchip->apb_base + reg);
+}
+
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip);
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
+void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type);
+
+#endif /* _PCIE_ROCKCHIP_H */
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4839ae578711b..6a4bbb5b3de00 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
+#include "../pci.h"
+
/* Bridge core config registers */
#define BRCFG_PCIE_RX0 0x00000000
#define BRCFG_INTERRUPT 0x00000010
@@ -825,7 +827,6 @@ static const struct of_device_id nwl_pcie_of_match[] = {
static int nwl_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
@@ -855,7 +856,8 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
- err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 0ad188effc091..b110a3a814e35 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -23,6 +23,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
/* Register definitions */
#define XILINX_PCIE_REG_BIR 0x00000130
#define XILINX_PCIE_REG_IDR 0x00000138
@@ -643,8 +645,8 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
- err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, &res,
- &iobase);
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 930a8fa08bd66..942b64fc7f1f0 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -24,6 +24,28 @@
#define VMD_MEMBAR1 2
#define VMD_MEMBAR2 4
+#define PCI_REG_VMCAP 0x40
+#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
+#define PCI_REG_VMCONFIG 0x44
+#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
+#define PCI_REG_VMLOCK 0x70
+#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
+
+enum vmd_features {
+ /*
+ * Device may contain registers which hint the physical location of the
+ * membars, in order to allow proper address translation during
+ * resource assignment to enable guest virtualization
+ */
+ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
+
+ /*
+ * Device may provide root port configuration information which limits
+ * bus numbering
+ */
+ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
+};
+
/*
* Lock for manipulating VMD IRQ lists.
*/
@@ -546,7 +568,7 @@ static int vmd_find_free_domain(void)
return domain + 1;
}
-static int vmd_enable_domain(struct vmd_dev *vmd)
+static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{
struct pci_sysdata *sd = &vmd->sysdata;
struct fwnode_handle *fn;
@@ -554,12 +576,57 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
u32 upper_bits;
unsigned long flags;
LIST_HEAD(resources);
+ resource_size_t offset[2] = {0};
+ resource_size_t membar2_offset = 0x2000, busn_start = 0;
+
+ /*
+ * Shadow registers may exist in certain VMD device ids which allow
+ * guests to correctly assign host physical addresses to the root ports
+ * and child devices. These registers will either return the host value
+ * or 0, depending on an enable bit in the VMD device.
+ */
+ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
+ u32 vmlock;
+ int ret;
+
+ membar2_offset = 0x2018;
+ ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
+ if (ret || vmlock == ~0)
+ return -ENODEV;
+
+ if (MB2_SHADOW_EN(vmlock)) {
+ void __iomem *membar2;
+
+ membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
+ if (!membar2)
+ return -ENOMEM;
+ offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
+ readq(membar2 + 0x2008);
+ offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
+ readq(membar2 + 0x2010);
+ pci_iounmap(vmd->dev, membar2);
+ }
+ }
+
+ /*
+ * Certain VMD devices may have a root port configuration option which
+ * limits the bus range to between 0-127 or 128-255
+ */
+ if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
+ u32 vmcap, vmconfig;
+
+ pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
+ pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
+ if (BUS_RESTRICT_CAP(vmcap) &&
+ (BUS_RESTRICT_CFG(vmconfig) == 0x1))
+ busn_start = 128;
+ }
res = &vmd->dev->resource[VMD_CFGBAR];
vmd->resources[0] = (struct resource) {
.name = "VMD CFGBAR",
- .start = 0,
- .end = (resource_size(res) >> 20) - 1,
+ .start = busn_start,
+ .end = busn_start + (resource_size(res) >> 20) - 1,
.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
};
@@ -600,7 +667,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
flags &= ~IORESOURCE_MEM_64;
vmd->resources[2] = (struct resource) {
.name = "VMD MEMBAR2",
- .start = res->start + 0x2000,
+ .start = res->start + membar2_offset,
.end = res->end,
.flags = flags,
.parent = res,
@@ -624,10 +691,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
return -ENODEV;
pci_add_resource(&resources, &vmd->resources[0]);
- pci_add_resource(&resources, &vmd->resources[1]);
- pci_add_resource(&resources, &vmd->resources[2]);
- vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
- &resources);
+ pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+ pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+
+ vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
+ sd, &resources);
if (!vmd->bus) {
pci_free_resource_list(&resources);
irq_domain_remove(vmd->irq_domain);
@@ -713,7 +781,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
- err = vmd_enable_domain(vmd);
+ err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
if (err)
return err;
@@ -778,7 +846,10 @@ static int vmd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
static const struct pci_device_id vmd_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);