summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mci/Kconfig9
-rw-r--r--drivers/mci/Makefile1
-rw-r--r--drivers/mci/stm32_sdmmc2.c672
-rw-r--r--drivers/mfd/stpmic1.c1
-rw-r--r--drivers/net/Kconfig27
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/designware_eqos.c876
-rw-r--r--drivers/net/designware_eqos.h84
-rw-r--r--drivers/net/designware_stm32.c245
-rw-r--r--drivers/net/designware_tegra186.c347
-rw-r--r--drivers/nvmem/Kconfig8
-rw-r--r--drivers/nvmem/Makefile5
-rw-r--r--drivers/nvmem/bsec.c221
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c4
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/pinctrl-stm32.c182
-rw-r--r--drivers/regulator/Kconfig9
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/helpers.c186
-rw-r--r--drivers/regulator/of_regulator.c229
-rw-r--r--drivers/regulator/stpmic1_regulator.c436
-rw-r--r--drivers/watchdog/stpmic1_wdt.c5
22 files changed, 3466 insertions, 90 deletions
diff --git a/drivers/mci/Kconfig b/drivers/mci/Kconfig
index 08c8c84e8c..4a71a46097 100644
--- a/drivers/mci/Kconfig
+++ b/drivers/mci/Kconfig
@@ -153,4 +153,13 @@ config MMC_SPI_CRC_ON
help
Enable CRC protection for transfers
+config MCI_STM32_SDMMC2
+ bool "STMicroelectronics STM32H7 SD/MMC Host Controller support"
+ depends on ARM_AMBA
+ depends on RESET_CONTROLLER
+ help
+ This selects support for the SD/MMC controller on STM32H7 SoCs.
+ If you have a board based on such a SoC and with a SD/MMC slot,
+ say Y or M here.
+
endif
diff --git a/drivers/mci/Makefile b/drivers/mci/Makefile
index 25a1d073dc..04c1287fee 100644
--- a/drivers/mci/Makefile
+++ b/drivers/mci/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_MCI_TEGRA) += tegra-sdmmc.o
obj-$(CONFIG_MCI_SPI) += mci_spi.o
obj-$(CONFIG_MCI_DW) += dw_mmc.o
obj-$(CONFIG_MCI_MMCI) += mmci.o
+obj-$(CONFIG_MCI_STM32_SDMMC2) += stm32_sdmmc2.o
diff --git a/drivers/mci/stm32_sdmmc2.c b/drivers/mci/stm32_sdmmc2.c
new file mode 100644
index 0000000000..1a41c34d24
--- /dev/null
+++ b/drivers/mci/stm32_sdmmc2.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Patrice Chotard, <patrice.chotard@st.com> for STMicroelectronics.
+ */
+
+#include <common.h>
+#include <dma.h>
+#include <init.h>
+#include <linux/amba/bus.h>
+#include <linux/iopoll.h>
+#include <linux/log2.h>
+#include <linux/reset.h>
+#include <mci.h>
+
+#define DRIVER_NAME "stm32_sdmmc"
+
+/* SDMMC REGISTERS OFFSET */
+#define SDMMC_POWER 0x00 /* SDMMC power control */
+#define SDMMC_CLKCR 0x04 /* SDMMC clock control */
+#define SDMMC_ARG 0x08 /* SDMMC argument */
+#define SDMMC_CMD 0x0C /* SDMMC command */
+#define SDMMC_RESP1 0x14 /* SDMMC response 1 */
+#define SDMMC_RESP2 0x18 /* SDMMC response 2 */
+#define SDMMC_RESP3 0x1C /* SDMMC response 3 */
+#define SDMMC_RESP4 0x20 /* SDMMC response 4 */
+#define SDMMC_DTIMER 0x24 /* SDMMC data timer */
+#define SDMMC_DLEN 0x28 /* SDMMC data length */
+#define SDMMC_DCTRL 0x2C /* SDMMC data control */
+#define SDMMC_DCOUNT 0x30 /* SDMMC data counter */
+#define SDMMC_STA 0x34 /* SDMMC status */
+#define SDMMC_ICR 0x38 /* SDMMC interrupt clear */
+#define SDMMC_MASK 0x3C /* SDMMC mask */
+#define SDMMC_IDMACTRL 0x50 /* SDMMC DMA control */
+#define SDMMC_IDMABASE0 0x58 /* SDMMC DMA buffer 0 base address */
+
+/* SDMMC_POWER register */
+#define SDMMC_POWER_PWRCTRL_MASK GENMASK(1, 0)
+#define SDMMC_POWER_PWRCTRL_OFF 0
+#define SDMMC_POWER_PWRCTRL_CYCLE 2
+#define SDMMC_POWER_PWRCTRL_ON 3
+#define SDMMC_POWER_VSWITCH BIT(2)
+#define SDMMC_POWER_VSWITCHEN BIT(3)
+#define SDMMC_POWER_DIRPOL BIT(4)
+
+/* SDMMC_CLKCR register */
+#define SDMMC_CLKCR_CLKDIV GENMASK(9, 0)
+#define SDMMC_CLKCR_CLKDIV_MAX SDMMC_CLKCR_CLKDIV
+#define SDMMC_CLKCR_PWRSAV BIT(12)
+#define SDMMC_CLKCR_WIDBUS_4 BIT(14)
+#define SDMMC_CLKCR_WIDBUS_8 BIT(15)
+#define SDMMC_CLKCR_NEGEDGE BIT(16)
+#define SDMMC_CLKCR_HWFC_EN BIT(17)
+#define SDMMC_CLKCR_DDR BIT(18)
+#define SDMMC_CLKCR_BUSSPEED BIT(19)
+#define SDMMC_CLKCR_SELCLKRX_MASK GENMASK(21, 20)
+#define SDMMC_CLKCR_SELCLKRX_CK 0
+#define SDMMC_CLKCR_SELCLKRX_CKIN BIT(20)
+#define SDMMC_CLKCR_SELCLKRX_FBCK BIT(21)
+
+/* SDMMC_CMD register */
+#define SDMMC_CMD_CMDINDEX GENMASK(5, 0)
+#define SDMMC_CMD_CMDTRANS BIT(6)
+#define SDMMC_CMD_CMDSTOP BIT(7)
+#define SDMMC_CMD_WAITRESP GENMASK(9, 8)
+#define SDMMC_CMD_WAITRESP_0 BIT(8)
+#define SDMMC_CMD_WAITRESP_1 BIT(9)
+#define SDMMC_CMD_WAITINT BIT(10)
+#define SDMMC_CMD_WAITPEND BIT(11)
+#define SDMMC_CMD_CPSMEN BIT(12)
+#define SDMMC_CMD_DTHOLD BIT(13)
+#define SDMMC_CMD_BOOTMODE BIT(14)
+#define SDMMC_CMD_BOOTEN BIT(15)
+#define SDMMC_CMD_CMDSUSPEND BIT(16)
+
+/* SDMMC_DCTRL register */
+#define SDMMC_DCTRL_DTEN BIT(0)
+#define SDMMC_DCTRL_DTDIR BIT(1)
+#define SDMMC_DCTRL_DTMODE GENMASK(3, 2)
+#define SDMMC_DCTRL_DBLOCKSIZE GENMASK(7, 4)
+#define SDMMC_DCTRL_DBLOCKSIZE_SHIFT 4
+#define SDMMC_DCTRL_RWSTART BIT(8)
+#define SDMMC_DCTRL_RWSTOP BIT(9)
+#define SDMMC_DCTRL_RWMOD BIT(10)
+#define SDMMC_DCTRL_SDMMCEN BIT(11)
+#define SDMMC_DCTRL_BOOTACKEN BIT(12)
+#define SDMMC_DCTRL_FIFORST BIT(13)
+
+/* SDMMC_STA register */
+#define SDMMC_STA_CCRCFAIL BIT(0)
+#define SDMMC_STA_DCRCFAIL BIT(1)
+#define SDMMC_STA_CTIMEOUT BIT(2)
+#define SDMMC_STA_DTIMEOUT BIT(3)
+#define SDMMC_STA_TXUNDERR BIT(4)
+#define SDMMC_STA_RXOVERR BIT(5)
+#define SDMMC_STA_CMDREND BIT(6)
+#define SDMMC_STA_CMDSENT BIT(7)
+#define SDMMC_STA_DATAEND BIT(8)
+#define SDMMC_STA_DHOLD BIT(9)
+#define SDMMC_STA_DBCKEND BIT(10)
+#define SDMMC_STA_DABORT BIT(11)
+#define SDMMC_STA_DPSMACT BIT(12)
+#define SDMMC_STA_CPSMACT BIT(13)
+#define SDMMC_STA_TXFIFOHE BIT(14)
+#define SDMMC_STA_RXFIFOHF BIT(15)
+#define SDMMC_STA_TXFIFOF BIT(16)
+#define SDMMC_STA_RXFIFOF BIT(17)
+#define SDMMC_STA_TXFIFOE BIT(18)
+#define SDMMC_STA_RXFIFOE BIT(19)
+#define SDMMC_STA_BUSYD0 BIT(20)
+#define SDMMC_STA_BUSYD0END BIT(21)
+#define SDMMC_STA_SDMMCIT BIT(22)
+#define SDMMC_STA_ACKFAIL BIT(23)
+#define SDMMC_STA_ACKTIMEOUT BIT(24)
+#define SDMMC_STA_VSWEND BIT(25)
+#define SDMMC_STA_CKSTOP BIT(26)
+#define SDMMC_STA_IDMATE BIT(27)
+#define SDMMC_STA_IDMABTC BIT(28)
+
+/* SDMMC_ICR register */
+#define SDMMC_ICR_CCRCFAILC BIT(0)
+#define SDMMC_ICR_DCRCFAILC BIT(1)
+#define SDMMC_ICR_CTIMEOUTC BIT(2)
+#define SDMMC_ICR_DTIMEOUTC BIT(3)
+#define SDMMC_ICR_TXUNDERRC BIT(4)
+#define SDMMC_ICR_RXOVERRC BIT(5)
+#define SDMMC_ICR_CMDRENDC BIT(6)
+#define SDMMC_ICR_CMDSENTC BIT(7)
+#define SDMMC_ICR_DATAENDC BIT(8)
+#define SDMMC_ICR_DHOLDC BIT(9)
+#define SDMMC_ICR_DBCKENDC BIT(10)
+#define SDMMC_ICR_DABORTC BIT(11)
+#define SDMMC_ICR_BUSYD0ENDC BIT(21)
+#define SDMMC_ICR_SDMMCITC BIT(22)
+#define SDMMC_ICR_ACKFAILC BIT(23)
+#define SDMMC_ICR_ACKTIMEOUTC BIT(24)
+#define SDMMC_ICR_VSWENDC BIT(25)
+#define SDMMC_ICR_CKSTOPC BIT(26)
+#define SDMMC_ICR_IDMATEC BIT(27)
+#define SDMMC_ICR_IDMABTCC BIT(28)
+#define SDMMC_ICR_STATIC_FLAGS ((GENMASK(28, 21)) | (GENMASK(11, 0)))
+
+/* SDMMC_MASK register */
+#define SDMMC_MASK_CCRCFAILIE BIT(0)
+#define SDMMC_MASK_DCRCFAILIE BIT(1)
+#define SDMMC_MASK_CTIMEOUTIE BIT(2)
+#define SDMMC_MASK_DTIMEOUTIE BIT(3)
+#define SDMMC_MASK_TXUNDERRIE BIT(4)
+#define SDMMC_MASK_RXOVERRIE BIT(5)
+#define SDMMC_MASK_CMDRENDIE BIT(6)
+#define SDMMC_MASK_CMDSENTIE BIT(7)
+#define SDMMC_MASK_DATAENDIE BIT(8)
+#define SDMMC_MASK_DHOLDIE BIT(9)
+#define SDMMC_MASK_DBCKENDIE BIT(10)
+#define SDMMC_MASK_DABORTIE BIT(11)
+#define SDMMC_MASK_TXFIFOHEIE BIT(14)
+#define SDMMC_MASK_RXFIFOHFIE BIT(15)
+#define SDMMC_MASK_RXFIFOFIE BIT(17)
+#define SDMMC_MASK_TXFIFOEIE BIT(18)
+#define SDMMC_MASK_BUSYD0ENDIE BIT(21)
+#define SDMMC_MASK_SDMMCITIE BIT(22)
+#define SDMMC_MASK_ACKFAILIE BIT(23)
+#define SDMMC_MASK_ACKTIMEOUTIE BIT(24)
+#define SDMMC_MASK_VSWENDIE BIT(25)
+#define SDMMC_MASK_CKSTOPIE BIT(26)
+#define SDMMC_MASK_IDMABTCIE BIT(28)
+
+/* SDMMC_IDMACTRL register */
+#define SDMMC_IDMACTRL_IDMAEN BIT(0)
+
+#define SDMMC_CMD_TIMEOUT 0xFFFFFFFF
+#define SDMMC_BUSYD0END_TIMEOUT_US 2000000
+
+#define IS_RISING_EDGE(reg) ((reg) & SDMMC_CLKCR_NEGEDGE ? 0 : 1)
+
+struct stm32_sdmmc2_priv {
+ void __iomem *base;
+ struct mci_host mci;
+ struct device_d *dev;
+ struct clk *clk;
+ struct reset_control *reset_ctl;
+ u32 clk_reg_msk;
+ u32 pwr_reg_msk;
+};
+
+#define to_mci_host(mci) container_of(mci, struct stm32_sdmmc2_priv, mci)
+
+/*
+ * Reset the SDMMC with the RCC.SDMMCxRST register bit.
+ * This will reset the SDMMC to the reset state and the CPSM and DPSM
+ * to the Idle state. SDMMC is disabled, Signals Hiz.
+ */
+static int stm32_sdmmc2_reset(struct mci_host *mci, struct device_d *mci_dev)
+{
+ struct stm32_sdmmc2_priv *priv = to_mci_host(mci);
+
+ reset_control_assert(priv->reset_ctl);
+ udelay(2);
+ reset_control_deassert(priv->reset_ctl);
+
+ /* init the needed SDMMC register after reset */
+ writel(priv->pwr_reg_msk, priv->base + SDMMC_POWER);
+
+ return 0;
+}
+
+/*
+ * Set the SDMMC in power-cycle state.
+ * This will make that the SDMMC_D[7:0],
+ * SDMMC_CMD and SDMMC_CK are driven low, to prevent the card from being
+ * supplied through the signal lines.
+ */
+static void stm32_sdmmc2_pwrcycle(struct stm32_sdmmc2_priv *priv)
+{
+ if ((readl(priv->base + SDMMC_POWER) & SDMMC_POWER_PWRCTRL_MASK) ==
+ SDMMC_POWER_PWRCTRL_CYCLE)
+ return;
+
+ stm32_sdmmc2_reset(&priv->mci, priv->dev);
+ writel(SDMMC_POWER_PWRCTRL_CYCLE | priv->pwr_reg_msk,
+ priv->base + SDMMC_POWER);
+}
+
+/*
+ * set the SDMMC state Power-on: the card is clocked
+ * manage the SDMMC state control:
+ * Reset => Power-Cycle => Power-Off => Power
+ * PWRCTRL=10 PWCTRL=00 PWCTRL=11
+ */
+static void stm32_sdmmc2_pwron(struct stm32_sdmmc2_priv *priv)
+{
+ u32 pwrctrl =
+ readl(priv->base + SDMMC_POWER) & SDMMC_POWER_PWRCTRL_MASK;
+
+ if (pwrctrl == SDMMC_POWER_PWRCTRL_ON)
+ return;
+
+ /* warning: same PWRCTRL value after reset and for power-off state
+ * it is the reset state here = the only managed by the driver
+ */
+ if (pwrctrl == SDMMC_POWER_PWRCTRL_OFF) {
+ writel(SDMMC_POWER_PWRCTRL_CYCLE | priv->pwr_reg_msk,
+ priv->base + SDMMC_POWER);
+ }
+
+ /*
+ * the remaining case is SDMMC_POWER_PWRCTRL_CYCLE
+ * switch to Power-Off state: SDMCC disable, signals drive 1
+ */
+ writel(SDMMC_POWER_PWRCTRL_OFF | priv->pwr_reg_msk,
+ priv->base + SDMMC_POWER);
+
+ /* After the 1ms delay set the SDMMC to power-on */
+ mdelay(1);
+ writel(SDMMC_POWER_PWRCTRL_ON | priv->pwr_reg_msk,
+ priv->base + SDMMC_POWER);
+
+ /* during the first 74 SDMMC_CK cycles the SDMMC is still disabled. */
+}
+
+static void stm32_sdmmc2_start_data(struct stm32_sdmmc2_priv *priv,
+ struct mci_data *data, u32 data_length)
+{
+ unsigned int num_bytes = data->blocks * data->blocksize;
+ u32 data_ctrl, idmabase0;
+
+ /* Configure the SDMMC DPSM (Data Path State Machine) */
+ data_ctrl = (__ilog2_u32(data->blocksize) <<
+ SDMMC_DCTRL_DBLOCKSIZE_SHIFT) &
+ SDMMC_DCTRL_DBLOCKSIZE;
+
+ if (data->flags & MMC_DATA_READ) {
+ data_ctrl |= SDMMC_DCTRL_DTDIR;
+ idmabase0 = (u32)data->dest;
+ } else {
+ idmabase0 = (u32)data->src;
+ }
+
+ /* Set the SDMMC DataLength value */
+ writel(data_length, priv->base + SDMMC_DLEN);
+
+ /* Write to SDMMC DCTRL */
+ writel(data_ctrl, priv->base + SDMMC_DCTRL);
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_sync_single_for_device((unsigned long)idmabase0,
+ num_bytes, DMA_TO_DEVICE);
+ else
+ dma_sync_single_for_device((unsigned long)idmabase0,
+ num_bytes, DMA_FROM_DEVICE);
+
+ /* Enable internal DMA */
+ writel(idmabase0, priv->base + SDMMC_IDMABASE0);
+ writel(SDMMC_IDMACTRL_IDMAEN, priv->base + SDMMC_IDMACTRL);
+}
+
+static void stm32_sdmmc2_start_cmd(struct stm32_sdmmc2_priv *priv,
+ struct mci_cmd *cmd, u32 cmd_param,
+ u32 data_length)
+{
+ u32 timeout = 0;
+
+ if (readl(priv->base + SDMMC_CMD) & SDMMC_CMD_CPSMEN)
+ writel(0, priv->base + SDMMC_CMD);
+
+ cmd_param |= cmd->cmdidx | SDMMC_CMD_CPSMEN;
+ if (cmd->resp_type & MMC_RSP_PRESENT) {
+ if (cmd->resp_type & MMC_RSP_136)
+ cmd_param |= SDMMC_CMD_WAITRESP;
+ else if (cmd->resp_type & MMC_RSP_CRC)
+ cmd_param |= SDMMC_CMD_WAITRESP_0;
+ else
+ cmd_param |= SDMMC_CMD_WAITRESP_1;
+ }
+
+ /*
+ * SDMMC_DTIME must be set in two case:
+ * - on data transfert.
+ * - on busy request.
+ * If not done or too short, the dtimeout flag occurs and DPSM stays
+ * enabled/busy and waits for abort (stop transmission cmd).
+ * Next data command is not possible whereas DPSM is activated.
+ */
+ if (data_length) {
+ timeout = SDMMC_CMD_TIMEOUT;
+ } else {
+ writel(0, priv->base + SDMMC_DCTRL);
+
+ if (cmd->resp_type & MMC_RSP_BUSY)
+ timeout = SDMMC_CMD_TIMEOUT;
+ }
+
+ /* Set the SDMMC Data TimeOut value */
+ writel(timeout, priv->base + SDMMC_DTIMER);
+
+ /* Clear flags */
+ writel(SDMMC_ICR_STATIC_FLAGS, priv->base + SDMMC_ICR);
+
+ /* Set SDMMC argument value */
+ writel(cmd->cmdarg, priv->base + SDMMC_ARG);
+
+ /* Set SDMMC command parameters */
+ writel(cmd_param, priv->base + SDMMC_CMD);
+}
+
+static int stm32_sdmmc2_end_cmd(struct stm32_sdmmc2_priv *priv,
+ struct mci_cmd *cmd)
+{
+ u32 mask = SDMMC_STA_CTIMEOUT;
+ u32 status;
+ int ret;
+
+ if (cmd->resp_type & MMC_RSP_PRESENT) {
+ mask |= SDMMC_STA_CMDREND;
+ if (cmd->resp_type & MMC_RSP_CRC)
+ mask |= SDMMC_STA_CCRCFAIL;
+ } else {
+ mask |= SDMMC_STA_CMDSENT;
+ }
+
+ /* Polling status register */
+ ret = readl_poll_timeout(priv->base + SDMMC_STA, status, status & mask,
+ SDMMC_BUSYD0END_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(priv->dev, "timeout reading SDMMC_STA register\n");
+ return ret;
+ }
+
+ /* Check status */
+ if (status & SDMMC_STA_CTIMEOUT) {
+ dev_err(priv->dev, "%s: error SDMMC_STA_CTIMEOUT (0x%x) for cmd %d\n",
+ __func__, status, cmd->cmdidx);
+ return -ETIMEDOUT;
+ }
+
+ if (status & SDMMC_STA_CCRCFAIL && cmd->resp_type & MMC_RSP_CRC) {
+ dev_err(priv->dev, "%s: error SDMMC_STA_CCRCFAIL (0x%x) for cmd %d\n",
+ __func__, status, cmd->cmdidx);
+ return -EILSEQ;
+ }
+
+ if (status & SDMMC_STA_CMDREND && cmd->resp_type & MMC_RSP_PRESENT) {
+ cmd->response[0] = readl(priv->base + SDMMC_RESP1);
+ if (cmd->resp_type & MMC_RSP_136) {
+ cmd->response[1] = readl(priv->base + SDMMC_RESP2);
+ cmd->response[2] = readl(priv->base + SDMMC_RESP3);
+ cmd->response[3] = readl(priv->base + SDMMC_RESP4);
+ }
+
+ /* Wait for BUSYD0END flag if busy status is detected */
+ if (cmd->resp_type & MMC_RSP_BUSY &&
+ status & SDMMC_STA_BUSYD0) {
+ mask = SDMMC_STA_DTIMEOUT | SDMMC_STA_BUSYD0END;
+
+ /* Polling status register */
+ ret = readl_poll_timeout(priv->base + SDMMC_STA,
+ status, status & mask,
+ SDMMC_BUSYD0END_TIMEOUT_US);
+
+ if (ret < 0) {
+ dev_err(priv->dev, "%s: timeout reading SDMMC_STA\n",
+ __func__);
+ return ret;
+ }
+
+ if (status & SDMMC_STA_DTIMEOUT) {
+ dev_err(priv->dev, "%s: error SDMMC_STA_DTIMEOUT (0x%x)\n",
+ __func__, status);
+ return -ETIMEDOUT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int stm32_sdmmc2_end_data(struct stm32_sdmmc2_priv *priv,
+ struct mci_cmd *cmd,
+ struct mci_data *data)
+{
+ u32 mask = SDMMC_STA_DCRCFAIL | SDMMC_STA_DTIMEOUT |
+ SDMMC_STA_IDMATE | SDMMC_STA_DATAEND;
+ unsigned int num_bytes = data->blocks * data->blocksize;
+ u32 status;
+ int ret;
+
+ if (data->flags & MMC_DATA_READ)
+ mask |= SDMMC_STA_RXOVERR;
+ else
+ mask |= SDMMC_STA_TXUNDERR;
+
+ ret = readl_poll_timeout(priv->base + SDMMC_STA, status, status & mask,
+ SDMMC_BUSYD0END_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(priv->dev, "Time out on waiting for SDMMC_STA. cmd %d\n",
+ cmd->cmdidx);
+ return ret;
+ }
+
+ if (data->flags & MMC_DATA_WRITE)
+ dma_sync_single_for_cpu((unsigned long)data->src,
+ num_bytes, DMA_TO_DEVICE);
+ else
+ dma_sync_single_for_cpu((unsigned long)data->dest,
+ num_bytes, DMA_FROM_DEVICE);
+
+ if (status & SDMMC_STA_DCRCFAIL) {
+ dev_err(priv->dev, "error SDMMC_STA_DCRCFAIL (0x%x) for cmd %d\n",
+ status, cmd->cmdidx);
+ return -EILSEQ;
+ }
+
+ if (status & SDMMC_STA_DTIMEOUT) {
+ dev_err(priv->dev, "error SDMMC_STA_DTIMEOUT (0x%x) for cmd %d\n",
+ status, cmd->cmdidx);
+ return -ETIMEDOUT;
+ }
+
+ if (status & SDMMC_STA_TXUNDERR) {
+ dev_err(priv->dev, "error SDMMC_STA_TXUNDERR (0x%x) for cmd %d\n",
+ status, cmd->cmdidx);
+ return -EIO;
+ }
+
+ if (status & SDMMC_STA_RXOVERR) {
+ dev_err(priv->dev, "error SDMMC_STA_RXOVERR (0x%x) for cmd %d\n",
+ status, cmd->cmdidx);
+ return -EIO;
+ }
+
+ if (status & SDMMC_STA_IDMATE) {
+ dev_err(priv->dev, "%s: error SDMMC_STA_IDMATE (0x%x) for cmd %d\n",
+ __func__, status, cmd->cmdidx);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int stm32_sdmmc2_send_cmd(struct mci_host *mci, struct mci_cmd *cmd,
+ struct mci_data *data)
+{
+ struct stm32_sdmmc2_priv *priv = to_mci_host(mci);
+ u32 cmdat = data ? SDMMC_CMD_CMDTRANS : 0;
+ u32 data_length;
+ int ret, retry = 3;
+
+retry_cmd:
+ data_length = 0;
+
+ if (data) {
+ data_length = data->blocks * data->blocksize;
+ stm32_sdmmc2_start_data(priv, data, data_length);
+ }
+
+ stm32_sdmmc2_start_cmd(priv, cmd, cmdat, data_length);
+
+ dev_dbg(priv->dev, "%s: send cmd %d data: 0x%x @ 0x%x\n", __func__,
+ cmd->cmdidx, data ? data_length : 0, (unsigned int)data);
+
+ ret = stm32_sdmmc2_end_cmd(priv, cmd);
+
+ if (data && !ret)
+ ret = stm32_sdmmc2_end_data(priv, cmd, data);
+
+ /* Clear flags */
+ writel(SDMMC_ICR_STATIC_FLAGS, priv->base + SDMMC_ICR);
+ if (data)
+ writel(0x0, priv->base + SDMMC_IDMACTRL);
+
+ /*
+ * To stop Data Path State Machine, a stop_transmission command
+ * shall be send on cmd or data errors.
+ */
+ if (ret && cmd->cmdidx != MMC_CMD_STOP_TRANSMISSION) {
+ struct mci_cmd stop_cmd;
+
+ stop_cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
+ stop_cmd.cmdarg = 0;
+ stop_cmd.resp_type = MMC_RSP_R1b;
+
+ dev_dbg(priv->dev, "%s: send STOP command to abort dpsm treatments\n",
+ __func__);
+
+ data_length = 0;
+
+ stm32_sdmmc2_start_cmd(priv, &stop_cmd,
+ SDMMC_CMD_CMDSTOP, data_length);
+ ret = stm32_sdmmc2_end_cmd(priv, &stop_cmd);
+
+ writel(SDMMC_ICR_STATIC_FLAGS, priv->base + SDMMC_ICR);
+ }
+
+ if (ret && retry) {
+ dev_warn(priv->dev, "%s: cmd %d failed, retrying ...\n",
+ __func__, cmd->cmdidx);
+
+ stm32_sdmmc2_pwrcycle(priv);
+ stm32_sdmmc2_pwron(priv);
+ retry--;
+
+ goto retry_cmd;
+ }
+
+ dev_dbg(priv->dev, "%s: end for CMD %d, ret = %d\n", __func__,
+ cmd->cmdidx, ret);
+
+ return ret;
+}
+
+static void stm32_sdmmc2_set_ios(struct mci_host *mci, struct mci_ios *ios)
+{
+ struct stm32_sdmmc2_priv *priv = to_mci_host(mci);
+ u32 desired = mci->clock;
+ u32 sys_clock = clk_get_rate(priv->clk);
+ u32 clk = 0;
+
+ dev_dbg(priv->dev, "%s: bus_with = %d, clock = %d\n", __func__,
+ mci->bus_width, mci->clock);
+
+ if (mci->clock)
+ stm32_sdmmc2_pwron(priv);
+ else
+ stm32_sdmmc2_pwrcycle(priv);
+
+ /*
+ * clk_div = 0 => command and data generated on SDMMCCLK falling edge
+ * clk_div > 0 and NEGEDGE = 0 => command and data generated on
+ * SDMMCCLK rising edge
+ * clk_div > 0 and NEGEDGE = 1 => command and data generated on
+ * SDMMCCLK falling edge
+ */
+ if (desired && (sys_clock > desired ||
+ IS_RISING_EDGE(priv->clk_reg_msk))) {
+ clk = DIV_ROUND_UP(sys_clock, 2 * desired);
+ if (clk > SDMMC_CLKCR_CLKDIV_MAX)
+ clk = SDMMC_CLKCR_CLKDIV_MAX;
+ }
+
+ if (mci->bus_width == 4)
+ clk |= SDMMC_CLKCR_WIDBUS_4;
+ if (mci->bus_width == 8)
+ clk |= SDMMC_CLKCR_WIDBUS_8;
+
+ writel(clk | priv->clk_reg_msk | SDMMC_CLKCR_HWFC_EN,
+ priv->base + SDMMC_CLKCR);
+}
+
+static int stm32_sdmmc2_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ struct device_d *dev = &adev->dev;
+ struct device_node *np = dev->device_node;
+ struct stm32_sdmmc2_priv *priv;
+ struct mci_host *mci;
+ int ret;
+
+ priv = xzalloc(sizeof(*priv));
+
+ priv->base = amba_get_mem_region(adev);
+ priv->dev = dev;
+
+ mci = &priv->mci;
+ mci->send_cmd = stm32_sdmmc2_send_cmd,
+ mci->set_ios = stm32_sdmmc2_set_ios,
+ mci->init = stm32_sdmmc2_reset;
+ mci->hw_dev = dev;
+
+ priv->clk = clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto priv_free;
+ }
+
+ ret = clk_enable(priv->clk);
+ if (ret)
+ goto priv_free;
+
+ if (of_get_property(np, "st,neg-edge", NULL))
+ priv->clk_reg_msk |= SDMMC_CLKCR_NEGEDGE;
+ if (of_get_property(np, "st,sig-dir", NULL))
+ priv->pwr_reg_msk |= SDMMC_POWER_DIRPOL;
+ if (of_get_property(np, "st,use-ckin", NULL))
+ priv->clk_reg_msk |= SDMMC_CLKCR_SELCLKRX_CKIN;
+
+ priv->reset_ctl = reset_control_get(dev, NULL);
+ if (IS_ERR(priv->reset_ctl))
+ priv->reset_ctl = NULL;
+
+ mci->f_min = 400000;
+ /* f_max is taken from kernel v5.3 variant_stm32_sdmmc */
+ mci->f_max = 208000000;
+ mci->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
+
+ mci_of_parse(&priv->mci);
+
+ if (mci->host_caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
+ dev_notice(dev, "Fixing bus-width to 1 due to driver limitation\n");
+ mci->host_caps &= ~(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
+ }
+
+ return mci_register(&priv->mci);
+
+priv_free:
+ free(priv);
+
+ return ret;
+}
+
+static struct amba_id stm32_sdmmc2_ids[] = {
+ /* ST Micro STM32MP157C */
+ {
+ .id = 0x10153180,
+ .mask = 0xf0ffffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver stm32_sdmmc2_driver = {
+ .drv = {
+ .name = DRIVER_NAME,
+ },
+ .probe = stm32_sdmmc2_probe,
+ .id_table = stm32_sdmmc2_ids,
+};
+
+static int stm32_sdmmc2_init(void)
+{
+ amba_driver_register(&stm32_sdmmc2_driver);
+ return 0;
+}
+device_initcall(stm32_sdmmc2_init);
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index 88c7921990..eae6fe3a4e 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -69,7 +69,6 @@ static int __init stpmic1_probe(struct device_d *dev)
stpmic1->client = to_i2c_client(dev);
regmap = regmap_init(dev, &regmap_stpmic1_i2c_bus,
stpmic1, &stpmic1_regmap_i2c_config);
- dev->priv = regmap;
ret = regmap_register_cdev(regmap, NULL);
if (ret)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 57f0b57d64..62e522a302 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -61,7 +61,7 @@ config DRIVER_NET_DAVINCI_EMAC
select PHYLIB
config DRIVER_NET_DESIGNWARE
- bool "Designware Universal MAC ethernet platform support"
+ bool "Designware Universal MAC1000 ethernet platform support"
depends on HAS_DMA
select PHYLIB
help
@@ -87,6 +87,31 @@ config DRIVER_NET_DESIGNWARE_SOCFPGA
endif
+config DRIVER_NET_DESIGNWARE_EQOS
+ bool "Designware Designware Ethernet QoS support"
+ depends on HAS_DMA
+ select PHYLIB
+ select OFTREE
+ help
+ This option enables support for the Synopsys
+ Designware Ethernet Quality-of-Service (GMAC4).
+
+if DRIVER_NET_DESIGNWARE_EQOS
+
+config DRIVER_NET_DESIGNWARE_STM32
+ bool "Designware EQOS STM32 driver"
+ select MFD_SYSCON
+ help
+ This option enables support for the ethernet MAC on the STM32MP platforms.
+
+config DRIVER_NET_DESIGNWARE_TEGRA186
+ bool "Designware Universal MAC ethernet driver for Tegra 186 platforms"
+ select RESET_CONTROLLER
+ help
+ This option enables support for the ethernet MAC on the Tegra186 & 194.
+
+endif
+
config DRIVER_NET_DM9K
bool "Davicom dm9k[E|A|B] ethernet driver"
depends on HAS_DM9000
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f6a8213613..656d45a868 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -11,6 +11,9 @@ obj-$(CONFIG_DRIVER_NET_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_DRIVER_NET_DESIGNWARE) += designware.o
obj-$(CONFIG_DRIVER_NET_DESIGNWARE_GENERIC) += designware_generic.o
obj-$(CONFIG_DRIVER_NET_DESIGNWARE_SOCFPGA) += designware_socfpga.o
+obj-$(CONFIG_DRIVER_NET_DESIGNWARE_EQOS) += designware_eqos.o
+obj-$(CONFIG_DRIVER_NET_DESIGNWARE_STM32) += designware_stm32.o
+obj-$(CONFIG_DRIVER_NET_DESIGNWARE_TEGRA186) += designware_tegra186.o
obj-$(CONFIG_DRIVER_NET_DM9K) += dm9k.o
obj-$(CONFIG_DRIVER_NET_E1000) += e1000/regio.o e1000/main.o e1000/eeprom.o
obj-$(CONFIG_DRIVER_NET_ENC28J60) += enc28j60.o
diff --git a/drivers/net/designware_eqos.c b/drivers/net/designware_eqos.c
new file mode 100644
index 0000000000..a49239e057
--- /dev/null
+++ b/drivers/net/designware_eqos.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.
+ * Copyright (c) 2019, Ahmad Fatoum, Pengutronix
+ *
+ * Portions based on U-Boot's rtl8169.c and dwc_eth_qos.
+ */
+
+#include <common.h>
+#include <init.h>
+#include <dma.h>
+#include <net.h>
+#include <of_net.h>
+#include <linux/iopoll.h>
+#include <linux/time.h>
+#include <linux/sizes.h>
+
+#include "designware_eqos.h"
+
+/* Core registers */
+
+#define EQOS_MAC_REGS_BASE 0x000
+struct eqos_mac_regs {
+ u32 config; /* 0x000 */
+ u32 ext_config; /* 0x004 */
+ u32 unused_004[(0x070 - 0x008) / 4]; /* 0x008 */
+ u32 q0_tx_flow_ctrl; /* 0x070 */
+ u32 unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
+ u32 rx_flow_ctrl; /* 0x090 */
+ u32 unused_094; /* 0x094 */
+ u32 txq_prty_map0; /* 0x098 */
+ u32 unused_09c; /* 0x09c */
+ u32 rxq_ctrl0; /* 0x0a0 */
+ u32 unused_0a4; /* 0x0a4 */
+ u32 rxq_ctrl2; /* 0x0a8 */
+ u32 unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
+ u32 us_tic_counter; /* 0x0dc */
+ u32 unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
+ u32 hw_feature0; /* 0x11c */
+ u32 hw_feature1; /* 0x120 */
+ u32 hw_feature2; /* 0x124 */
+ u32 unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
+ u32 mdio_address; /* 0x200 */
+ u32 mdio_data; /* 0x204 */
+ u32 unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
+ u32 macaddr0hi; /* 0x300 */
+ u32 macaddr0lo; /* 0x304 */
+};
+
+#define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
+#define EQOS_MAC_CONFIGURATION_CST BIT(21)
+#define EQOS_MAC_CONFIGURATION_ACS BIT(20)
+#define EQOS_MAC_CONFIGURATION_WD BIT(19)
+#define EQOS_MAC_CONFIGURATION_JD BIT(17)
+#define EQOS_MAC_CONFIGURATION_JE BIT(16)
+#define EQOS_MAC_CONFIGURATION_PS BIT(15)
+#define EQOS_MAC_CONFIGURATION_FES BIT(14)
+#define EQOS_MAC_CONFIGURATION_DM BIT(13)
+#define EQOS_MAC_CONFIGURATION_TE BIT(1)
+#define EQOS_MAC_CONFIGURATION_RE BIT(0)
+
+#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
+#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
+#define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
+
+#define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
+#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
+
+#define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
+#define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
+
+#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
+#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
+#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
+#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
+
+#define EQOS_MTL_REGS_BASE 0xd00
+struct eqos_mtl_regs {
+ u32 txq0_operation_mode; /* 0xd00 */
+ u32 unused_d04; /* 0xd04 */
+ u32 txq0_debug; /* 0xd08 */
+ u32 unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
+ u32 txq0_quantum_weight; /* 0xd18 */
+ u32 unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
+ u32 rxq0_operation_mode; /* 0xd30 */
+ u32 unused_d34; /* 0xd34 */
+ u32 rxq0_debug; /* 0xd38 */
+};
+
+#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
+#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
+#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
+#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
+#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
+#define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
+#define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
+
+#define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
+#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
+#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
+
+#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
+#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
+#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
+#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
+#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
+#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
+#define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
+#define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
+
+#define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
+#define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
+#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
+#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
+
+#define EQOS_DMA_REGS_BASE 0x1000
+struct eqos_dma_regs {
+ u32 mode; /* 0x1000 */
+ u32 sysbus_mode; /* 0x1004 */
+ u32 unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
+ u32 ch0_control; /* 0x1100 */
+ u32 ch0_tx_control; /* 0x1104 */
+ u32 ch0_rx_control; /* 0x1108 */
+ u32 unused_110c; /* 0x110c */
+ u32 ch0_txdesc_list_haddress; /* 0x1110 */
+ u32 ch0_txdesc_list_address; /* 0x1114 */
+ u32 ch0_rxdesc_list_haddress; /* 0x1118 */
+ u32 ch0_rxdesc_list_address; /* 0x111c */
+ u32 ch0_txdesc_tail_pointer; /* 0x1120 */
+ u32 unused_1124; /* 0x1124 */
+ u32 ch0_rxdesc_tail_pointer; /* 0x1128 */
+ u32 ch0_txdesc_ring_length; /* 0x112c */
+ u32 ch0_rxdesc_ring_length; /* 0x1130 */
+};
+
+#define EQOS_DMA_MODE_SWR BIT(0)
+
+#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
+#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
+#define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
+#define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
+#define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
+#define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
+
+#define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
+
+#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
+#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
+#define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
+#define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
+
+#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
+#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
+#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
+#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
+#define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
+
+/* Descriptors */
+
+#define EQOS_DESCRIPTOR_WORDS 4
+#define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4)
+/* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */
+#define EQOS_DESCRIPTOR_ALIGN 64
+#define EQOS_DESCRIPTORS_TX 4
+#define EQOS_DESCRIPTORS_RX 4
+#define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
+#define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \
+ EQOS_DESCRIPTOR_SIZE, EQOS_DESCRIPTOR_ALIGN)
+#define EQOS_BUFFER_ALIGN EQOS_DESCRIPTOR_ALIGN
+#define EQOS_MAX_PACKET_SIZE ALIGN(1568, EQOS_DESCRIPTOR_ALIGN)
+
+struct eqos_desc {
+ u32 des0; /* PA of buffer 1 or TSO header */
+ u32 des1; /* PA of buffer 2 with descriptor rings */
+ u32 des2; /* Length, VLAN, Timestamps, Interrupts */
+ u32 des3; /* All other flags */
+};
+
+#define EQOS_DESC3_OWN BIT(31)
+#define EQOS_DESC3_FD BIT(29)
+#define EQOS_DESC3_LD BIT(28)
+#define EQOS_DESC3_BUF1V BIT(24)
+
+#define EQOS_MDIO_ADDR(reg) ((addr << 21) & GENMASK(25, 21))
+#define EQOS_MDIO_REG(reg) ((reg << 16) & GENMASK(20, 16))
+#define EQOS_MDIO_CLK_CSR(clk_csr) ((clk_csr << 8) & GENMASK(11, 8))
+
+#define MII_BUSY (1 << 0)
+
+static int eqos_mdio_wait_idle(struct eqos *eqos)
+{
+ u32 idle;
+ return readl_poll_timeout(&eqos->mac_regs->mdio_address, idle,
+ !(idle & MII_BUSY), 10 * USEC_PER_MSEC);
+}
+
+static int eqos_mdio_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct eqos *eqos = bus->priv;
+ u32 miiaddr;
+ int ret;
+
+ ret = eqos_mdio_wait_idle(eqos);
+ if (ret) {
+ eqos_err(eqos, "MDIO not idle at entry\n");
+ return ret;
+ }
+
+ miiaddr = readl(&eqos->mac_regs->mdio_address);
+ miiaddr &= EQOS_MDIO_ADDR_SKAP | EQOS_MDIO_ADDR_C45E;
+ miiaddr |= EQOS_MDIO_ADDR_GOC_READ << EQOS_MDIO_ADDR_GOC_SHIFT;
+
+ miiaddr |= EQOS_MDIO_CLK_CSR(eqos->ops->clk_csr);
+ miiaddr |= EQOS_MDIO_ADDR(addr) | EQOS_MDIO_REG(reg);
+ miiaddr |= MII_BUSY;
+
+ writel(miiaddr, &eqos->mac_regs->mdio_address);
+
+ udelay(eqos->ops->mdio_wait_us);
+
+ ret = eqos_mdio_wait_idle(eqos);
+ if (ret) {
+ eqos_err(eqos, "MDIO read didn't complete\n");
+ return ret;
+ }
+
+ return readl(&eqos->mac_regs->mdio_data) & 0xffff;
+}
+
+static int eqos_mdio_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct eqos *eqos = bus->priv;
+ u32 miiaddr = 0;
+ int ret;
+
+ ret = eqos_mdio_wait_idle(eqos);
+ if (ret) {
+ eqos_err(eqos, "MDIO not idle at entry\n");
+ return ret;
+ }
+
+ miiaddr = readl(&eqos->mac_regs->mdio_address);
+ miiaddr &= EQOS_MDIO_ADDR_SKAP | EQOS_MDIO_ADDR_C45E;
+ miiaddr |= EQOS_MDIO_ADDR_GOC_WRITE << EQOS_MDIO_ADDR_GOC_SHIFT;
+
+ miiaddr |= EQOS_MDIO_CLK_CSR(eqos->ops->clk_csr);
+ miiaddr |= EQOS_MDIO_ADDR(addr) | EQOS_MDIO_REG(reg);
+ miiaddr |= MII_BUSY;
+
+ writel(val, &eqos->mac_regs->mdio_data);
+ writel(addr, &eqos->mac_regs->mdio_address);
+
+ udelay(eqos->ops->mdio_wait_us);
+
+ ret = eqos_mdio_wait_idle(eqos);
+ if (ret) {
+ eqos_err(eqos, "MDIO read didn't complete\n");
+ return ret;
+ }
+
+ /* Needed as a fix for ST-Phy */
+ eqos_mdio_read(bus, addr, reg);
+ return 0;
+}
+
+
+static inline void eqos_set_full_duplex(struct eqos *eqos)
+{
+ setbits_le32(&eqos->mac_regs->config, EQOS_MAC_CONFIGURATION_DM);
+}
+
+static inline void eqos_set_half_duplex(struct eqos *eqos)
+{
+ clrbits_le32(&eqos->mac_regs->config, EQOS_MAC_CONFIGURATION_DM);
+
+ /* WAR: Flush TX queue when switching to half-duplex */
+ setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
+ EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
+}
+
+static inline void eqos_set_gmii_speed(struct eqos *eqos)
+{
+ clrbits_le32(&eqos->mac_regs->config,
+ EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
+}
+
+static inline void eqos_set_mii_speed_100(struct eqos *eqos)
+{
+ setbits_le32(&eqos->mac_regs->config,
+ EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
+}
+
+static inline void eqos_set_mii_speed_10(struct eqos *eqos)
+{
+ clrsetbits_le32(&eqos->mac_regs->config,
+ EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
+}
+
+void eqos_adjust_link(struct eth_device *edev)
+{
+ struct eqos *eqos = edev->priv;
+ unsigned speed = edev->phydev->speed;
+
+ if (edev->phydev->duplex)
+ eqos_set_full_duplex(eqos);
+ else
+ eqos_set_half_duplex(eqos);
+
+ switch (speed) {
+ case SPEED_1000:
+ eqos_set_gmii_speed(eqos);
+ break;
+ case SPEED_100:
+ eqos_set_mii_speed_100(eqos);
+ break;
+ case SPEED_10:
+ eqos_set_mii_speed_10(eqos);
+ break;
+ default:
+ eqos_warn(eqos, "invalid speed %d\n", speed);
+ return;
+ }
+}
+
+int eqos_get_ethaddr(struct eth_device *edev, unsigned char *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+int eqos_set_ethaddr(struct eth_device *edev, const unsigned char *mac)
+{
+ struct eqos *eqos = edev->priv;
+ __le32 mac_hi, mac_lo;
+
+ memcpy(eqos->macaddr, mac, ETH_ALEN);
+
+ /* Update the MAC address */
+ memcpy(&mac_hi, &mac[4], 2);
+ memcpy(&mac_lo, &mac[0], 4);
+
+ __raw_writel(mac_hi, &eqos->mac_regs->macaddr0hi);
+ __raw_writel(mac_lo, &eqos->mac_regs->macaddr0lo);
+
+ return 0;
+}
+
+/* Get PHY out of power saving mode. If this is needed elsewhere then
+ * consider making it part of phy-core and adding a resume method to
+ * the phy device ops. */
+static int phy_resume(struct phy_device *phydev)
+{
+ int bmcr;
+
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_PDOWN) {
+ bmcr &= ~BMCR_PDOWN;
+ return phy_write(phydev, MII_BMCR, bmcr);
+ }
+
+ return 0;
+}
+
+int eqos_start(struct eth_device *edev)
+{
+ struct eqos *eqos = edev->priv;
+ u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
+ unsigned long last_rx_desc;
+ unsigned long rate;
+ u32 mode_set;
+ int ret;
+ int i;
+
+ setbits_le32(&eqos->dma_regs->mode, EQOS_DMA_MODE_SWR);
+
+ ret = readl_poll_timeout(&eqos->dma_regs->mode, mode_set,
+ !(mode_set & EQOS_DMA_MODE_SWR),
+ 100 * USEC_PER_MSEC);
+ if (ret) {
+ eqos_err(eqos, "EQOS_DMA_MODE_SWR stuck: 0x%08x\n", mode_set);
+ return ret;
+ }
+
+ /* Reset above clears MAC address */
+ eqos_set_ethaddr(edev, eqos->macaddr);
+
+ /* Required for accurate time keeping with EEE counters */
+ rate = eqos->ops->get_csr_clk_rate(eqos);
+
+ val = (rate / USEC_PER_SEC) - 1; /* -1 because the data sheet says so */
+ writel(val, &eqos->mac_regs->us_tic_counter);
+
+ ret = phy_device_connect(edev, &eqos->miibus, eqos->phy_addr,
+ eqos->ops->adjust_link, 0, eqos->interface);
+ if (ret)
+ return ret;
+
+ /* Before we reset the mac, we must insure the PHY is not powered down
+ * as the dw controller needs all clock domains to be running, including
+ * the PHY clock, to come out of a mac reset. */
+ ret = phy_resume(edev->phydev);
+ if (ret)
+ return ret;
+
+ /* Configure MTL */
+
+ /* Enable Store and Forward mode for TX */
+ /* Program Tx operating mode */
+ setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
+ EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
+ (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
+ EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
+
+ /* Transmit Queue weight */
+ writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
+
+ /* Enable Store and Forward mode for RX, since no jumbo frame */
+ setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
+ EQOS_MTL_RXQ0_OPERATION_MODE_RSF);
+
+ /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
+ val = readl(&eqos->mac_regs->hw_feature1);
+ tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
+ EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
+ rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
+ EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
+
+ /*
+ * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
+ * r/tqs is encoded as (n / 256) - 1.
+ */
+ tqs = (128 << tx_fifo_sz) / 256 - 1;
+ rqs = (128 << rx_fifo_sz) / 256 - 1;
+
+ clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
+ EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
+ EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
+ tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
+
+ clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
+ EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
+ EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
+ rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
+
+ /* Flow control used only if each channel gets 4KB or more FIFO */
+ if (rqs >= ((SZ_4K / 256) - 1)) {
+ u32 rfd, rfa;
+
+ setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
+ EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
+
+ /*
+ * Set Threshold for Activating Flow Contol space for min 2
+ * frames ie, (1500 * 1) = 1500 bytes.
+ *
+ * Set Threshold for Deactivating Flow Contol for space of
+ * min 1 frame (frame size 1500bytes) in receive fifo
+ */
+ if (rqs == ((SZ_4K / 256) - 1)) {
+ /*
+ * This violates the above formula because of FIFO size
+ * limit therefore overflow may occur inspite of this.
+ */
+ rfd = 0x3; /* Full-3K */
+ rfa = 0x1; /* Full-1.5K */
+ } else if (rqs == ((SZ_8K / 256) - 1)) {
+ rfd = 0x6; /* Full-4K */
+ rfa = 0xa; /* Full-6K */
+ } else if (rqs == ((16384 / 256) - 1)) {
+ rfd = 0x6; /* Full-4K */
+ rfa = 0x12; /* Full-10K */
+ } else {
+ rfd = 0x6; /* Full-4K */
+ rfa = 0x1E; /* Full-16K */
+ }
+
+ clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
+ (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
+ EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
+ (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
+ EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
+ (rfd <<
+ EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
+ (rfa <<
+ EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
+ }
+
+ /* Configure MAC */
+
+ clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
+ EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
+ EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
+ eqos->ops->config_mac <<
+ EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
+
+ /* Set TX flow control parameters */
+ /* Set Pause Time */
+ setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
+ 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
+ /* Assign priority for TX flow control */
+ clrbits_le32(&eqos->mac_regs->txq_prty_map0,
+ EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
+ EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
+ /* Assign priority for RX flow control */
+ clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
+ EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
+ EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
+ /* Enable flow control */
+ setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
+ EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
+ setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
+ EQOS_MAC_RX_FLOW_CTRL_RFE);
+
+ clrsetbits_le32(&eqos->mac_regs->config,
+ EQOS_MAC_CONFIGURATION_GPSLCE |
+ EQOS_MAC_CONFIGURATION_WD |
+ EQOS_MAC_CONFIGURATION_JD |
+ EQOS_MAC_CONFIGURATION_JE,
+ EQOS_MAC_CONFIGURATION_CST |
+ EQOS_MAC_CONFIGURATION_ACS);
+
+ /* Configure DMA */
+
+ /* Enable OSP mode */
+ setbits_le32(&eqos->dma_regs->ch0_tx_control,
+ EQOS_DMA_CH0_TX_CONTROL_OSP);
+
+ /* RX buffer size. Must be a multiple of bus width */
+ clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
+ EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
+ EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
+ EQOS_MAX_PACKET_SIZE <<
+ EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
+
+ setbits_le32(&eqos->dma_regs->ch0_control,
+ EQOS_DMA_CH0_CONTROL_PBLX8);
+
+ /*
+ * Burst length must be < 1/2 FIFO size.
+ * FIFO size in tqs is encoded as (n / 256) - 1.
+ * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
+ * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
+ */
+ pbl = tqs + 1;
+ if (pbl > 32)
+ pbl = 32;
+ clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
+ EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
+ EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
+ pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
+
+ clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
+ EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
+ EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
+ 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
+
+ /* DMA performance configuration */
+ val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
+ EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
+ EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
+ writel(val, &eqos->dma_regs->sysbus_mode);
+
+ /* Set up descriptors */
+
+ eqos->tx_currdescnum = eqos->rx_currdescnum = 0;
+
+ for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
+ struct eqos_desc *rx_desc = &eqos->rx_descs[i];
+
+ writel(EQOS_DESC3_BUF1V | EQOS_DESC3_OWN, &rx_desc->des3);
+ }
+
+ writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
+ writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address);
+ writel(EQOS_DESCRIPTORS_TX - 1,
+ &eqos->dma_regs->ch0_txdesc_ring_length);
+
+ writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
+ writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address);
+ writel(EQOS_DESCRIPTORS_RX - 1,
+ &eqos->dma_regs->ch0_rxdesc_ring_length);
+
+ /* Enable everything */
+
+ setbits_le32(&eqos->mac_regs->config,
+ EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
+
+ setbits_le32(&eqos->dma_regs->ch0_tx_control,
+ EQOS_DMA_CH0_TX_CONTROL_ST);
+ setbits_le32(&eqos->dma_regs->ch0_rx_control,
+ EQOS_DMA_CH0_RX_CONTROL_SR);
+
+ /* TX tail pointer not written until we need to TX a packet */
+ /*
+ * Point RX tail pointer at last descriptor. Ideally, we'd point at the
+ * first descriptor, implying all descriptors were available. However,
+ * that's not distinguishable from none of the descriptors being
+ * available.
+ */
+ last_rx_desc = (ulong)&eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)];
+ writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
+
+ barrier();
+
+ eqos->started = true;
+
+ return 0;
+}
+
+void eqos_stop(struct eth_device *edev)
+{
+ struct eqos *eqos = edev->priv;
+ int i;
+
+ if (!eqos->started)
+ return;
+
+ eqos->started = false;
+
+ barrier();
+
+ /* Disable TX DMA */
+ clrbits_le32(&eqos->dma_regs->ch0_tx_control,
+ EQOS_DMA_CH0_TX_CONTROL_ST);
+
+ /* Wait for TX all packets to drain out of MTL */
+ for (i = 0; i < 1000000; i++) {
+ u32 val = readl(&eqos->mtl_regs->txq0_debug);
+ u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
+ EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
+ u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
+ if ((trcsts != 1) && (!txqsts))
+ break;
+ }
+
+ /* Turn off MAC TX and RX */
+ clrbits_le32(&eqos->mac_regs->config,
+ EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
+
+ /* Wait for all RX packets to drain out of MTL */
+ for (i = 0; i < 1000000; i++) {
+ u32 val = readl(&eqos->mtl_regs->rxq0_debug);
+ u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
+ EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
+ u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
+ EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
+ if ((!prxq) && (!rxqsts))
+ break;
+ }
+
+ /* Turn off RX DMA */
+ clrbits_le32(&eqos->dma_regs->ch0_rx_control,
+ EQOS_DMA_CH0_RX_CONTROL_SR);
+}
+
+static int eqos_send(struct eth_device *edev, void *packet, int length)
+{
+ struct eqos *eqos = edev->priv;
+ struct device_d *dev = &eqos->netdev.dev;
+ struct eqos_desc *tx_desc;
+ dma_addr_t dma;
+ u32 des3;
+ int ret;
+
+ tx_desc = &eqos->tx_descs[eqos->tx_currdescnum];
+ eqos->tx_currdescnum++;
+ eqos->tx_currdescnum %= EQOS_DESCRIPTORS_TX;
+
+ dma = dma_map_single(dev, packet, length, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ return -EFAULT;
+
+ tx_desc->des0 = (unsigned long)dma;
+ tx_desc->des1 = 0;
+ tx_desc->des2 = length;
+ /*
+ * Make sure the compiler doesn't reorder the _OWN write below, before
+ * the writes to the rest of the descriptor.
+ */
+ barrier();
+
+ writel(EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length, &tx_desc->des3);
+ writel((ulong)(tx_desc + 1), &eqos->dma_regs->ch0_txdesc_tail_pointer);
+
+ ret = readl_poll_timeout(&tx_desc->des3, des3,
+ !(des3 & EQOS_DESC3_OWN),
+ 100 * USEC_PER_MSEC);
+
+ dma_unmap_single(dev, dma, length, DMA_TO_DEVICE);
+
+ if (ret == -ETIMEDOUT)
+ eqos_dbg(eqos, "TX timeout\n");
+
+ return ret;
+}
+
+static int eqos_recv(struct eth_device *edev)
+{
+ struct eqos *eqos = edev->priv;
+ struct eqos_desc *rx_desc;
+ void *frame;
+ int length;
+
+ rx_desc = &eqos->rx_descs[eqos->rx_currdescnum];
+ if (readl(&rx_desc->des3) & EQOS_DESC3_OWN)
+ return 0;
+
+ frame = phys_to_virt(rx_desc->des0);
+ length = rx_desc->des3 & 0x7fff;
+
+ dma_sync_single_for_cpu((unsigned long)frame, length, DMA_FROM_DEVICE);
+ net_receive(edev, frame, length);
+ dma_sync_single_for_device((unsigned long)frame, length, DMA_FROM_DEVICE);
+
+ rx_desc->des0 = (unsigned long)frame;
+ rx_desc->des1 = 0;
+ rx_desc->des2 = 0;
+ /*
+ * Make sure that if HW sees the _OWN write below, it will see all the
+ * writes to the rest of the descriptor too.
+ */
+ rx_desc->des3 |= EQOS_DESC3_BUF1V;
+ rx_desc->des3 |= EQOS_DESC3_OWN;
+ barrier();
+
+ writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
+
+ eqos->rx_currdescnum++;
+ eqos->rx_currdescnum %= EQOS_DESCRIPTORS_RX;
+
+ return 0;
+}
+
+static int eqos_init_resources(struct eqos *eqos)
+{
+ struct device_d *dev = eqos->netdev.parent;
+ int ret = -ENOMEM;
+ void *descs;
+ void *p;
+ int i;
+
+ descs = dma_alloc_coherent(EQOS_DESCRIPTORS_SIZE, DMA_ADDRESS_BROKEN);
+ if (!descs)
+ goto err;
+
+ eqos->tx_descs = (struct eqos_desc *)descs;
+ eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX);
+
+ p = dma_alloc(EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE);
+ if (!p)
+ goto err_free_desc;
+
+ for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
+ struct eqos_desc *rx_desc = &eqos->rx_descs[i];
+ dma_addr_t dma;
+
+ dma = dma_map_single(dev, p, EQOS_MAX_PACKET_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ ret = -EFAULT;
+ goto err_free_rx_bufs;
+ }
+
+ rx_desc->des0 = dma;
+
+ p += EQOS_MAX_PACKET_SIZE;
+ }
+
+ return 0;
+
+err_free_rx_bufs:
+ dma_free(phys_to_virt(eqos->rx_descs[0].des0));
+err_free_desc:
+ dma_free_coherent(descs, 0, EQOS_DESCRIPTORS_SIZE);
+err:
+
+ return ret;
+}
+
+static int eqos_init(struct device_d *dev, struct eqos *eqos)
+{
+ int ret;
+
+ ret = eqos_init_resources(eqos);
+ if (ret) {
+ dev_err(dev, "eqos_init_resources() failed: %s\n", strerror(-ret));
+ return ret;
+ }
+
+ if (eqos->ops->init)
+ ret = eqos->ops->init(dev, eqos);
+
+ return ret;
+}
+
+static void eqos_probe_dt(struct device_d *dev, struct eqos *eqos)
+{
+ struct device_node *child;
+
+ eqos->interface = of_get_phy_mode(dev->device_node);
+ eqos->phy_addr = -1;
+
+ /* Set MDIO bus device node, if present. */
+ for_each_child_of_node(dev->device_node, child) {
+ if (of_device_is_compatible(child, "snps,dwmac-mdio") ||
+ (child->name && !of_node_cmp(child->name, "mdio"))) {
+ eqos->miibus.dev.device_node = child;
+ break;
+ }
+ }
+}
+
+int eqos_probe(struct device_d *dev, const struct eqos_ops *ops, void *priv)
+{
+ struct mii_bus *miibus;
+ struct resource *iores;
+ struct eqos *eqos;
+ struct eth_device *edev;
+ int ret;
+
+ eqos = xzalloc(sizeof(*eqos));
+
+ iores = dev_request_mem_resource(dev, 0);
+ if (IS_ERR(iores))
+ return PTR_ERR(iores);
+ eqos->regs = IOMEM(iores->start);
+
+ eqos->mac_regs = eqos->regs + EQOS_MAC_REGS_BASE;
+ eqos->mtl_regs = eqos->regs + EQOS_MTL_REGS_BASE;
+ eqos->dma_regs = eqos->regs + EQOS_DMA_REGS_BASE;
+ eqos->ops = ops;
+ eqos->priv = priv;
+
+ eqos_probe_dt(dev, eqos);
+
+ edev = &eqos->netdev;
+
+ dev->priv = edev->priv = eqos;
+
+ edev->parent = dev;
+ edev->open = ops->start;
+ edev->send = eqos_send;
+ edev->recv = eqos_recv;
+ edev->halt = ops->stop;
+ edev->get_ethaddr = ops->get_ethaddr;
+ edev->set_ethaddr = ops->set_ethaddr;
+
+ miibus = &eqos->miibus;
+ miibus->parent = edev->parent;
+ miibus->read = eqos_mdio_read;
+ miibus->write = eqos_mdio_write;
+ miibus->priv = eqos;
+
+ ret = eqos_init(dev, eqos);
+ if (ret)
+ return ret;
+
+ ret = mdiobus_register(miibus);
+ if (ret)
+ return ret;
+
+ return eth_register(edev);
+}
+
+void eqos_remove(struct device_d *dev)
+{
+ struct eqos *eqos = dev->priv;
+
+ mdiobus_unregister(&eqos->miibus);
+
+ dma_free(phys_to_virt(eqos->rx_descs[0].des0));
+ dma_free_coherent(eqos->tx_descs, 0, EQOS_DESCRIPTORS_SIZE);
+}
diff --git a/drivers/net/designware_eqos.h b/drivers/net/designware_eqos.h
new file mode 100644
index 0000000000..969a524c0a
--- /dev/null
+++ b/drivers/net/designware_eqos.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Ahmad Fatoum, Pengutronix
+ */
+
+#ifndef __EQOS_H_
+#define __EQOS_H_
+
+struct eqos;
+struct eth_device;
+
+struct eqos_ops {
+ int (*init)(struct device_d *dev, struct eqos *priv);
+ int (*start)(struct eth_device *edev);
+ void (*stop)(struct eth_device *edev);
+ int (*get_ethaddr)(struct eth_device *dev, unsigned char *mac);
+ int (*set_ethaddr)(struct eth_device *edev, const unsigned char *mac);
+ void (*adjust_link)(struct eth_device *edev);
+ unsigned long (*get_csr_clk_rate)(struct eqos *);
+
+ bool enh_desc;
+ int mdio_wait_us;
+
+#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
+#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
+#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
+#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
+#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
+ unsigned clk_csr;
+
+#define EQOS_MDIO_ADDR_CR_20_35 2
+#define EQOS_MDIO_ADDR_CR_250_300 5
+#define EQOS_MDIO_ADDR_SKAP BIT(4)
+#define EQOS_MDIO_ADDR_GOC_SHIFT 2
+#define EQOS_MDIO_ADDR_GOC_READ 3
+#define EQOS_MDIO_ADDR_GOC_WRITE 1
+#define EQOS_MDIO_ADDR_C45E BIT(1)
+ unsigned config_mac;
+};
+
+struct eqos_desc;
+struct eqos_dma_regs;
+struct eqos_mac_regs;
+struct eqos_mtl_regs;
+
+struct eqos {
+ struct eth_device netdev;
+ struct mii_bus miibus;
+
+ u8 macaddr[6];
+
+ u32 tx_currdescnum, rx_currdescnum;
+
+ struct eqos_desc *tx_descs, *rx_descs;
+
+ void __iomem *regs;
+ struct eqos_mac_regs __iomem *mac_regs;
+ struct eqos_dma_regs __iomem *dma_regs;
+ struct eqos_mtl_regs __iomem *mtl_regs;
+
+ int phy_addr;
+ phy_interface_t interface;
+
+ const struct eqos_ops *ops;
+ void *priv;
+ bool started;
+};
+
+struct device_d;
+int eqos_probe(struct device_d *dev, const struct eqos_ops *ops, void *priv);
+void eqos_remove(struct device_d *dev);
+int eqos_reset(struct eqos *priv);
+
+int eqos_get_ethaddr(struct eth_device *edev, unsigned char *mac);
+int eqos_set_ethaddr(struct eth_device *edev, const unsigned char *mac);
+int eqos_start(struct eth_device *edev);
+void eqos_stop(struct eth_device *edev);
+void eqos_adjust_link(struct eth_device *edev);
+
+#define eqos_dbg(eqos, ...) dev_dbg(&eqos->netdev.dev, __VA_ARGS__)
+#define eqos_warn(eqos, ...) dev_warn(&eqos->netdev.dev, __VA_ARGS__)
+#define eqos_err(eqos, ...) dev_err(&eqos->netdev.dev, __VA_ARGS__)
+
+#endif
diff --git a/drivers/net/designware_stm32.c b/drivers/net/designware_stm32.c
new file mode 100644
index 0000000000..5b087ad5a3
--- /dev/null
+++ b/drivers/net/designware_stm32.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.
+ * Copyright (c) 2019, Ahmad Fatoum, Pengutronix
+ *
+ * Portions based on U-Boot's rtl8169.c and dwc_eth_qos.
+ */
+
+#include <common.h>
+#include <init.h>
+#include <net.h>
+#include <linux/clk.h>
+#include <mfd/syscon.h>
+
+#include "designware_eqos.h"
+
+#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16)
+#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17)
+
+/* Ethernet PHY interface selection in register SYSCFG Configuration
+ *------------------------------------------
+ * src |BIT(23)| BIT(22)| BIT(21)|BIT(20)|
+ *------------------------------------------
+ * MII | 0 | 0 | 0 | 1 |
+ *------------------------------------------
+ * GMII | 0 | 0 | 0 | 0 |
+ *------------------------------------------
+ * RGMII | 0 | 0 | 1 | n/a |
+ *------------------------------------------
+ * RMII | 1 | 0 | 0 | n/a |
+ *------------------------------------------
+ */
+#define SYSCFG_PMCR_ETH_SEL_MII BIT(20)
+#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21)
+#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23)
+#define SYSCFG_PMCR_ETH_SEL_GMII 0
+#define SYSCFG_MCU_ETH_SEL_MII 0
+#define SYSCFG_MCU_ETH_SEL_RMII 1
+
+/* Descriptors */
+
+#define SYSCFG_MCU_ETH_MASK BIT(23)
+#define SYSCFG_MP1_ETH_MASK GENMASK(23, 16)
+#define SYSCFG_PMCCLRR_OFFSET 0x40
+
+struct eqos_stm32 {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct regmap *regmap;
+ u32 mode_reg;
+ int eth_clk_sel_reg;
+ int eth_ref_clk_sel_reg;
+};
+
+static inline struct eqos_stm32 *to_stm32(struct eqos *eqos)
+{
+ return eqos->priv;
+}
+
+enum { CLK_STMMACETH, CLK_MAX_RX, CLK_MAX_TX, CLK_SYSCFG, };
+static const struct clk_bulk_data stm32_clks[] = {
+ [CLK_STMMACETH] = { .id = "stmmaceth" },
+ [CLK_MAX_RX] = { .id = "mac-clk-rx" },
+ [CLK_MAX_TX] = { .id = "mac-clk-tx" },
+ [CLK_SYSCFG] = { .id = "syscfg-clk" },
+};
+
+static unsigned long eqos_get_csr_clk_rate_stm32(struct eqos *eqos)
+{
+ return clk_get_rate(to_stm32(eqos)->clks[CLK_STMMACETH].clk);
+}
+
+static int eqos_set_mode_stm32(struct eqos_stm32 *priv, phy_interface_t interface)
+{
+ u32 val, reg = priv->mode_reg;
+ int ret;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = SYSCFG_PMCR_ETH_SEL_MII;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ val = SYSCFG_PMCR_ETH_SEL_GMII;
+ if (priv->eth_clk_sel_reg)
+ val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = SYSCFG_PMCR_ETH_SEL_RMII;
+ if (priv->eth_ref_clk_sel_reg)
+ val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = SYSCFG_PMCR_ETH_SEL_RGMII;
+ if (priv->eth_clk_sel_reg)
+ val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Need to update PMCCLRR (clear register) */
+ ret = regmap_write(priv->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
+ SYSCFG_MP1_ETH_MASK);
+ if (ret)
+ return -EIO;
+
+ /* Update PMCSETR (set register) */
+ regmap_update_bits(priv->regmap, reg, GENMASK(23, 16), val);
+
+ return 0;
+}
+
+static int eqos_init_stm32(struct device_d *dev, struct eqos *eqos)
+{
+ struct device_node *np = dev->device_node;
+ struct eqos_stm32 *priv = to_stm32(eqos);
+ struct clk_bulk_data *eth_ck;
+ int ret;
+
+ /* Gigabit Ethernet 125MHz clock selection. */
+ priv->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
+
+ /* Ethernet 50Mhz RMII clock selection */
+ priv->eth_ref_clk_sel_reg =
+ of_property_read_bool(np, "st,eth-ref-clk-sel");
+
+ priv->regmap = syscon_regmap_lookup_by_phandle(dev->device_node,
+ "st,syscon");
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "Could not get st,syscon node\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = of_property_read_u32_index(dev->device_node, "st,syscon",
+ 1, &priv->mode_reg);
+ if (ret) {
+ dev_err(dev, "Can't get sysconfig mode offset (%s)\n",
+ strerror(-ret));
+ return -EINVAL;
+ }
+
+ ret = eqos_set_mode_stm32(priv, eqos->interface);
+ if (ret)
+ dev_warn(dev, "Configuring syscfg failed: %s\n", strerror(-ret));
+
+ priv->num_clks = ARRAY_SIZE(stm32_clks) + 1;
+ priv->clks = xmalloc(priv->num_clks * sizeof(*priv->clks));
+ memcpy(priv->clks, stm32_clks, sizeof stm32_clks);
+
+ ret = clk_bulk_get(dev, ARRAY_SIZE(stm32_clks), priv->clks);
+ if (ret) {
+ dev_err(dev, "Failed to get clks: %s\n", strerror(-ret));
+ return ret;
+ }
+
+ eth_ck = &priv->clks[ARRAY_SIZE(stm32_clks)];
+ eth_ck->id = "eth-ck";
+ eth_ck->clk = clk_get(dev, eth_ck->id);
+ if (IS_ERR(eth_ck->clk)) {
+ priv->num_clks--;
+ dev_dbg(dev, "No phy clock provided. Continuing without.\n");
+ }
+
+ return 0;
+
+}
+
+static int eqos_start_stm32(struct eth_device *edev)
+{
+ struct eqos *eqos = edev->priv;
+ struct eqos_stm32 *priv = to_stm32(eqos);
+ int ret;
+
+ ret = clk_bulk_enable(priv->num_clks, priv->clks);
+ if (ret < 0) {
+ eqos_err(eqos, "clk_bulk_enable() failed: %s\n",
+ strerror(-ret));
+ return ret;
+ }
+
+ udelay(10);
+
+ ret = eqos_start(edev);
+ if (ret)
+ goto err_stop_clks;
+
+ return 0;
+
+err_stop_clks:
+ clk_bulk_disable(priv->num_clks, priv->clks);
+
+ return ret;
+}
+
+static void eqos_stop_stm32(struct eth_device *edev)
+{
+ struct eqos_stm32 *priv = to_stm32(edev->priv);
+
+ clk_bulk_disable(priv->num_clks, priv->clks);
+}
+
+// todo split!
+static struct eqos_ops stm32_ops = {
+ .init = eqos_init_stm32,
+ .get_ethaddr = eqos_get_ethaddr,
+ .set_ethaddr = eqos_set_ethaddr,
+ .start = eqos_start_stm32,
+ .stop = eqos_stop_stm32,
+ .adjust_link = eqos_adjust_link,
+ .get_csr_clk_rate = eqos_get_csr_clk_rate_stm32,
+
+ .mdio_wait_us = 10 * USEC_PER_MSEC,
+ .clk_csr = EQOS_MDIO_ADDR_CR_250_300,
+ .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
+};
+
+static int eqos_probe_stm32(struct device_d *dev)
+{
+ return eqos_probe(dev, &stm32_ops, xzalloc(sizeof(struct eqos_stm32)));
+}
+
+static void eqos_remove_stm32(struct device_d *dev)
+{
+ struct eqos_stm32 *priv = to_stm32(dev->priv);
+
+ eqos_remove(dev);
+
+ clk_bulk_put(priv->num_clks, priv->clks);
+}
+
+static const struct of_device_id eqos_stm32_ids[] = {
+ { .compatible = "st,stm32mp1-dwmac" },
+ { /* sentinel */ }
+};
+
+static struct driver_d eqos_stm32_driver = {
+ .name = "eqos-stm32",
+ .probe = eqos_probe_stm32,
+ .remove = eqos_remove_stm32,
+ .of_compatible = DRV_OF_COMPAT(eqos_stm32_ids),
+};
+device_platform_driver(eqos_stm32_driver);
diff --git a/drivers/net/designware_tegra186.c b/drivers/net/designware_tegra186.c
new file mode 100644
index 0000000000..58484d4095
--- /dev/null
+++ b/drivers/net/designware_tegra186.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.
+ * Copyright (c) 2019, Ahmad Fatoum, Pengutronix
+ *
+ * Portions based on U-Boot's rtl8169.c and dwc_eth_qos.
+ */
+
+#include <common.h>
+#include <init.h>
+#include <gpio.h>
+#include <of_gpio.h>
+#include <linux/clk.h>
+#include <net.h>
+#include <linux/reset.h>
+
+#include "designware_eqos.h"
+
+/* These registers are Tegra186-specific */
+#define EQOS_TEGRA186_REGS_BASE 0x8800
+struct eqos_tegra186_regs {
+ uint32_t sdmemcomppadctrl; /* 0x8800 */
+ uint32_t auto_cal_config; /* 0x8804 */
+ uint32_t unused_8808; /* 0x8808 */
+ uint32_t auto_cal_status; /* 0x880c */
+};
+
+#define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
+
+#define EQOS_AUTO_CAL_CONFIG_START BIT(31)
+#define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
+
+#define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+struct eqos_tegra186 {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *rst;
+ struct eqos_tegra186_regs __iomem *tegra186_regs;
+ int phy_reset_gpio;
+};
+
+static inline struct eqos_tegra186 *to_tegra186(struct eqos *eqos)
+{
+ return eqos->priv;
+}
+
+enum { CLK_SLAVE_BUS, CLK_MASTER_BUS, CLK_RX, CLK_PTP_REF, CLK_TX };
+static const struct clk_bulk_data tegra186_clks[] = {
+ [CLK_SLAVE_BUS] = { .id = "slave_bus" },
+ [CLK_MASTER_BUS] = { .id = "master_bus" },
+ [CLK_RX] = { .id = "rx" },
+ [CLK_PTP_REF] = { .id = "ptp_ref" },
+ [CLK_TX] = { .id = "tx" },
+};
+
+static int eqos_clks_set_rate_tegra186(struct eqos_tegra186 *priv)
+{
+ return clk_set_rate(priv->clks[CLK_PTP_REF].clk, 125 * 1000 * 1000);
+}
+
+static int eqos_reset_tegra186(struct eqos_tegra186 *priv, bool reset)
+{
+ int ret;
+
+ if (reset) {
+ reset_control_assert(priv->rst);
+ gpio_set_value(priv->phy_reset_gpio, 1);
+ return 0;
+ }
+
+ gpio_set_value(priv->phy_reset_gpio, 1);
+
+ udelay(2);
+
+ gpio_set_value(priv->phy_reset_gpio, 0);
+
+ ret = reset_control_assert(priv->rst);
+ if (ret < 0)
+ return ret;
+
+ udelay(2);
+
+ return reset_control_deassert(priv->rst);
+}
+
+static int eqos_calibrate_pads_tegra186(struct eqos *eqos)
+{
+ struct eqos_tegra186 *priv = to_tegra186(eqos);
+ u32 active;
+ int ret;
+
+ setbits_le32(&priv->tegra186_regs->sdmemcomppadctrl,
+ EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
+
+ udelay(1);
+
+ setbits_le32(&priv->tegra186_regs->auto_cal_config,
+ EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
+
+ ret = readl_poll_timeout(&priv->tegra186_regs->auto_cal_status, active,
+ active & EQOS_AUTO_CAL_STATUS_ACTIVE,
+ 10000);
+ if (ret) {
+ eqos_err(eqos, "calibrate didn't start\n");
+ goto failed;
+ }
+
+ ret = readl_poll_timeout(&priv->tegra186_regs->auto_cal_status, active,
+ !(active & EQOS_AUTO_CAL_STATUS_ACTIVE),
+ 10000);
+ if (ret) {
+ eqos_err(eqos, "calibrate didn't finish\n");
+ goto failed;
+ }
+
+failed:
+ clrbits_le32(&priv->tegra186_regs->sdmemcomppadctrl,
+ EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
+
+ return ret;
+}
+
+static int eqos_calibrate_link_tegra186(struct eqos *eqos, unsigned speed)
+{
+ struct eqos_tegra186 *priv = to_tegra186(eqos);
+ int ret = 0;
+ unsigned long rate;
+ bool calibrate;
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125 * 1000 * 1000;
+ calibrate = true;
+ break;
+ case SPEED_100:
+ rate = 25 * 1000 * 1000;
+ calibrate = true;
+ break;
+ case SPEED_10:
+ rate = 2.5 * 1000 * 1000;
+ calibrate = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (calibrate) {
+ ret = eqos_calibrate_pads_tegra186(eqos);
+ if (ret)
+ return ret;
+ } else {
+ clrbits_le32(&priv->tegra186_regs->auto_cal_config,
+ EQOS_AUTO_CAL_CONFIG_ENABLE);
+ }
+
+ ret = clk_set_rate(priv->clks[CLK_TX].clk, rate);
+ if (ret < 0) {
+ eqos_err(eqos, "clk_set_rate(tx_clk, %lu) failed: %d\n", rate, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static unsigned long eqos_get_csr_clk_rate_tegra186(struct eqos *eqos)
+{
+ return clk_get_rate(to_tegra186(eqos)->clks[CLK_SLAVE_BUS].clk);
+}
+
+static int eqos_set_ethaddr_tegra186(struct eth_device *edev, const unsigned char *mac)
+{
+ struct eqos *eqos = edev->priv;
+
+ /*
+ * This function may be called before start() or after stop(). At that
+ * time, on at least some configurations of the EQoS HW, all clocks to
+ * the EQoS HW block will be stopped, and a reset signal applied. If
+ * any register access is attempted in this state, bus timeouts or CPU
+ * hangs may occur. This check prevents that.
+ *
+ * A simple solution to this problem would be to not implement
+ * write_hwaddr(), since start() always writes the MAC address into HW
+ * anyway. However, it is desirable to implement write_hwaddr() to
+ * support the case of SW that runs subsequent to U-Boot which expects
+ * the MAC address to already be programmed into the EQoS registers,
+ * which must happen irrespective of whether the U-Boot user (or
+ * scripts) actually made use of the EQoS device, and hence
+ * irrespective of whether start() was ever called.
+ *
+ * Note that this requirement by subsequent SW is not valid for
+ * Tegra186, and is likely not valid for any non-PCI instantiation of
+ * the EQoS HW block. This function is implemented solely as
+ * future-proofing with the expectation the driver will eventually be
+ * ported to some system where the expectation above is true.
+ */
+
+ if (!eqos->started) {
+ memcpy(eqos->macaddr, mac, 6);
+ return 0;
+ }
+
+ return eqos_set_ethaddr(edev, mac);
+}
+
+static int eqos_init_tegra186(struct device_d *dev, struct eqos *eqos)
+{
+ struct eqos_tegra186 *priv = to_tegra186(eqos);
+ int phy_reset;
+ int ret;
+
+ priv->tegra186_regs = IOMEM(eqos->regs + EQOS_TEGRA186_REGS_BASE);
+
+ priv->rst = reset_control_get(dev, "eqos");
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ dev_err(dev, "reset_get_by_name(rst) failed: %s\n", strerror(-ret));
+ return ret;
+ }
+
+ phy_reset = of_get_named_gpio(dev->device_node, "phy-reset-gpios", 0);
+ if (gpio_is_valid(phy_reset)) {
+ ret = gpio_request(phy_reset, "phy-reset");
+ if (ret)
+ goto release_res;
+
+ priv->phy_reset_gpio = phy_reset;
+ }
+
+ priv->clks = xmemdup(tegra186_clks, sizeof(tegra186_clks));
+ priv->num_clks = ARRAY_SIZE(tegra186_clks);
+
+ return 0;
+
+release_res:
+ reset_control_put(priv->rst);
+ return ret;
+}
+
+static int eqos_start_tegra186(struct eth_device *edev)
+{
+ struct eqos *eqos = edev->priv;
+ struct eqos_tegra186 *priv = to_tegra186(eqos);
+ int ret;
+
+ ret = clk_bulk_enable(priv->num_clks, priv->clks);
+ if (ret < 0) {
+ eqos_err(eqos, "clk_bulk_enable() failed: %s\n", strerror(-ret));
+ return ret;
+ }
+
+ ret = eqos_clks_set_rate_tegra186(priv);
+ if (ret < 0) {
+ eqos_err(eqos, "clks_set_rate() failed: %s\n", strerror(-ret));
+ goto err;
+ }
+
+ eqos_reset_tegra186(priv, false);
+ if (ret < 0) {
+ eqos_err(eqos, "reset(0) failed: %s\n", strerror(-ret));
+ goto err_stop_clks;
+ }
+
+ udelay(10);
+
+ ret = eqos_start(edev);
+ if (ret)
+ goto err_stop_resets;
+
+ return 0;
+
+err_stop_resets:
+ eqos_reset_tegra186(priv, true);
+err_stop_clks:
+ clk_bulk_disable(priv->num_clks, priv->clks);
+err:
+ return ret;
+}
+
+
+static void eqos_stop_tegra186(struct eth_device *edev)
+{
+ struct eqos_tegra186 *priv = to_tegra186(edev->priv);
+
+ eqos_reset_tegra186(priv, true);
+
+ clk_bulk_disable(priv->num_clks, priv->clks);
+}
+
+static void eqos_adjust_link_tegra186(struct eth_device *edev)
+{
+ struct eqos *eqos = edev->priv;
+ unsigned speed = edev->phydev->speed;
+ int ret;
+
+ eqos_adjust_link(edev);
+
+ ret = eqos_calibrate_link_tegra186(eqos, speed);
+ if (ret < 0) {
+ eqos_err(eqos, "eqos_calibrate_link_tegra186() failed: %d\n", ret);
+ return;
+ }
+}
+
+static const struct eqos_ops tegra186_ops = {
+ .init = eqos_init_tegra186,
+ .get_ethaddr = eqos_get_ethaddr,
+ .set_ethaddr = eqos_set_ethaddr_tegra186,
+ .start = eqos_start_tegra186,
+ .stop = eqos_stop_tegra186,
+ .adjust_link = eqos_adjust_link_tegra186,
+ .get_csr_clk_rate = eqos_get_csr_clk_rate_tegra186,
+
+ .mdio_wait_us = 10,
+ .clk_csr = EQOS_MDIO_ADDR_CR_20_35,
+ .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
+};
+
+static int eqos_probe_tegra186(struct device_d *dev)
+{
+ return eqos_probe(dev, &tegra186_ops, xzalloc(sizeof(struct eqos_tegra186)));
+}
+
+static void eqos_remove_tegra186(struct device_d *dev)
+{
+ struct eqos_tegra186 *priv = to_tegra186(dev->priv);
+
+ eqos_remove(dev);
+
+ clk_bulk_put(priv->num_clks, priv->clks);
+
+ gpio_free(priv->phy_reset_gpio);
+ reset_control_put(priv->rst);
+}
+
+static const struct of_device_id eqos_tegra186_ids[] = {
+ { .compatible = "nvidia,tegra186-eqos" },
+ { /* sentinel */ }
+};
+
+static struct driver_d eqos_tegra186_driver = {
+ .name = "eqos-tegra186",
+ .probe = eqos_probe_tegra186,
+ .remove = eqos_remove_tegra186,
+ .of_compatible = DRV_OF_COMPAT(eqos_tegra186_ids),
+};
+device_platform_driver(eqos_tegra186_driver);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index c28a6d4e43..968342b281 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -51,4 +51,12 @@ config EEPROM_93XX46
supports both read and write commands and also the command to
erase the whole EEPROM.
+config STM32_BSEC
+ tristate "STM32 Boot and security and OTP control"
+ depends on ARCH_STM32MP
+ depends on OFDEVICE
+ help
+ This adds support for the STM32 OTP controller. Reads and writes
+ to will go to the shadow RAM, not the OTP fuses themselvers.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index abf9dae429..7101c5aca4 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -16,4 +16,7 @@ obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
obj-$(CONFIG_EEPROM_93XX46) += nvmem_eeprom_93xx46.o
-nvmem_eeprom_93xx46-y := eeprom_93xx46.o \ No newline at end of file
+nvmem_eeprom_93xx46-y := eeprom_93xx46.o
+
+obj-$(CONFIG_STM32_BSEC) += nvmem_bsec.o
+nvmem_bsec-y := bsec.o
diff --git a/drivers/nvmem/bsec.c b/drivers/nvmem/bsec.c
new file mode 100644
index 0000000000..8235d468d1
--- /dev/null
+++ b/drivers/nvmem/bsec.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, STMicroelectronics - All Rights Reserved
+ * Copyright (c) 2019 Ahmad Fatoum, Pengutronix
+ */
+
+#include <common.h>
+#include <driver.h>
+#include <malloc.h>
+#include <xfuncs.h>
+#include <errno.h>
+#include <init.h>
+#include <net.h>
+#include <io.h>
+#include <of.h>
+#include <regmap.h>
+#include <mach/bsec.h>
+#include <machine_id.h>
+#include <linux/nvmem-provider.h>
+
+#define BSEC_OTP_SERIAL 13
+
+struct bsec_priv {
+ struct regmap *map;
+ u32 svc_id;
+ struct device_d dev;
+ struct regmap_config map_config;
+ struct nvmem_config config;
+};
+
+struct stm32_bsec_data {
+ unsigned long svc_id;
+ int num_regs;
+};
+
+static int bsec_smc(struct bsec_priv *priv, u8 op, enum bsec_field field,
+ unsigned data2, unsigned *val)
+{
+ enum bsec_smc ret = stm32mp_smc(priv->svc_id, op, field / 4, data2, val);
+ switch(ret)
+ {
+ case BSEC_SMC_OK:
+ return 0;
+ case BSEC_SMC_ERROR:
+ case BSEC_SMC_DISTURBED:
+ case BSEC_SMC_PROG_FAIL:
+ case BSEC_SMC_LOCK_FAIL:
+ case BSEC_SMC_WRITE_FAIL:
+ case BSEC_SMC_SHADOW_FAIL:
+ return -EIO;
+ case BSEC_SMC_INVALID_PARAM:
+ return -EINVAL;
+ case BSEC_SMC_TIMEOUT:
+ return -ETIMEDOUT;
+ }
+
+ return -ENXIO;
+}
+
+static int st32_bsec_read_shadow(void *ctx, unsigned reg, unsigned *val)
+{
+ return bsec_smc(ctx, BSEC_SMC_READ_SHADOW, reg, 0, val);
+}
+
+static int stm32_bsec_reg_write_shadow(void *ctx, unsigned reg, unsigned val)
+{
+ return bsec_smc(ctx, BSEC_SMC_WRITE_SHADOW, reg, val, NULL);
+}
+
+static struct regmap_bus stm32_bsec_regmap_bus = {
+ .reg_write = stm32_bsec_reg_write_shadow,
+ .reg_read = st32_bsec_read_shadow,
+};
+
+static int stm32_bsec_write(struct device_d *dev, int offset,
+ const void *val, int bytes)
+{
+ struct bsec_priv *priv = dev->parent->priv;
+
+ return regmap_bulk_write(priv->map, offset, val, bytes);
+}
+
+static int stm32_bsec_read(struct device_d *dev, int offset,
+ void *val, int bytes)
+{
+ struct bsec_priv *priv = dev->parent->priv;
+
+ return regmap_bulk_read(priv->map, offset, val, bytes);
+}
+
+static const struct nvmem_bus stm32_bsec_nvmem_bus = {
+ .write = stm32_bsec_write,
+ .read = stm32_bsec_read,
+};
+
+static void stm32_bsec_set_unique_machine_id(struct regmap *map)
+{
+ u32 unique_id[3];
+ int ret;
+
+ ret = regmap_bulk_read(map, BSEC_OTP_SERIAL * 4,
+ unique_id, sizeof(unique_id));
+ if (ret)
+ return;
+
+ machine_id_set_hashable(unique_id, sizeof(unique_id));
+}
+
+static int stm32_bsec_read_mac(struct regmap *map, int offset, u8 *mac)
+{
+ u8 res[8];
+ int ret;
+
+ ret = regmap_bulk_read(map, offset * 4, res, 8);
+ if (ret)
+ return ret;
+
+ memcpy(mac, res, ETH_ALEN);
+ return 0;
+}
+
+static void stm32_bsec_init_dt(struct bsec_priv *priv)
+{
+ struct device_node *node = priv->dev.parent->device_node;
+ struct device_node *rnode;
+ u32 phandle, offset;
+ char mac[ETH_ALEN];
+ const __be32 *prop;
+
+ int len;
+ int ret;
+
+ if (!node)
+ return;
+
+ prop = of_get_property(node, "barebox,provide-mac-address", &len);
+ if (!prop)
+ return;
+
+ if (len != 2 * sizeof(__be32))
+ return;
+
+ phandle = be32_to_cpup(prop++);
+
+ rnode = of_find_node_by_phandle(phandle);
+ offset = be32_to_cpup(prop++);
+
+ ret = stm32_bsec_read_mac(priv->map, offset, mac);
+ if (ret) {
+ dev_warn(&priv->dev, "error setting MAC address: %s\n",
+ strerror(-ret));
+ return;
+ }
+
+ of_eth_register_ethaddr(rnode, mac);
+}
+
+static int stm32_bsec_probe(struct device_d *dev)
+{
+ struct bsec_priv *priv;
+ int ret = 0;
+ const struct stm32_bsec_data *data;
+ struct nvmem_device *nvmem;
+
+ ret = dev_get_drvdata(dev, (const void **)&data);
+ if (ret)
+ return ret;
+
+ priv = xzalloc(sizeof(*priv));
+
+ priv->svc_id = data->svc_id;
+
+ dev_set_name(&priv->dev, "bsec");
+ priv->dev.parent = dev;
+ register_device(&priv->dev);
+
+ priv->map_config.reg_bits = 32;
+ priv->map_config.val_bits = 32;
+ priv->map_config.reg_stride = 4;
+ priv->map_config.max_register = data->num_regs;
+
+ priv->map = regmap_init(dev, &stm32_bsec_regmap_bus, priv, &priv->map_config);
+ if (IS_ERR(priv->map))
+ return PTR_ERR(priv->map);
+
+ priv->config.name = "stm32-bsec";
+ priv->config.dev = dev;
+ priv->config.stride = 4;
+ priv->config.word_size = 4;
+ priv->config.size = data->num_regs;
+ priv->config.bus = &stm32_bsec_nvmem_bus;
+ dev->priv = priv;
+
+ nvmem = nvmem_register(&priv->config);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ if (IS_ENABLED(CONFIG_MACHINE_ID))
+ stm32_bsec_set_unique_machine_id(priv->map);
+
+ stm32_bsec_init_dt(priv);
+
+ return 0;
+}
+
+static struct stm32_bsec_data stm32mp15_bsec_data = {
+ .num_regs = 95 * 4,
+ .svc_id = STM32_SMC_BSEC,
+};
+
+static __maybe_unused struct of_device_id stm32_bsec_dt_ids[] = {
+ { .compatible = "st,stm32mp15-bsec", .data = &stm32mp15_bsec_data },
+ { /* sentinel */ }
+};
+
+static struct driver_d stm32_bsec_driver = {
+ .name = "stm32_bsec",
+ .probe = stm32_bsec_probe,
+ .of_compatible = DRV_OF_COMPAT(stm32_bsec_dt_ids),
+};
+postcore_platform_driver(stm32_bsec_driver);
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 9bc259f84c..b527114f1b 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -260,7 +260,7 @@ static int pinctrl_at91_pio4_gpiochip_add(struct device_d *dev,
return ret;
}
- dev_info(dev, "gpio driver registered\n");
+ dev_dbg(dev, "gpio driver registered\n");
return 0;
}
@@ -290,7 +290,7 @@ static int pinctrl_at91_pio4_probe(struct device_d *dev)
if (ret)
return ret;
- dev_info(dev, "pinctrl driver registered\n");
+ dev_dbg(dev, "pinctrl driver registered\n");
if (of_get_property(np, "gpio-controller", NULL))
return pinctrl_at91_pio4_gpiochip_add(dev, pinctrl);
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index 5fd5740e81..b8e9b60372 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -171,7 +171,7 @@ static int bcm2835_gpio_probe(struct device_d *dev)
goto err;
}
- dev_info(dev, "probed gpiochip%d with base %d\n", dev->id, bcmgpio->chip.base);
+ dev_dbg(dev, "probed gpiochip%d with base %d\n", dev->id, bcmgpio->chip.base);
if (IS_ENABLED(CONFIG_PINCTRL)) {
ret = pinctrl_register(&bcmgpio->pctl);
diff --git a/drivers/pinctrl/pinctrl-stm32.c b/drivers/pinctrl/pinctrl-stm32.c
index 7f04cea50b..cdaed510c5 100644
--- a/drivers/pinctrl/pinctrl-stm32.c
+++ b/drivers/pinctrl/pinctrl-stm32.c
@@ -87,110 +87,128 @@ static inline u32 stm32_gpio_get_alt(u32 function)
return 0;
}
-static int stm32_pinctrl_set_state(struct pinctrl_device *pdev, struct device_node *group)
+static int __stm32_pinctrl_set_state(struct device_d *dev, struct device_node *pins)
{
- struct stm32_pinctrl *pinctrl = to_stm32_pinctrl(pdev);
- struct device_node *pins;
int ret;
- ret = hwspinlock_lock_timeout(&pinctrl->hws, 10);
- if (ret == -ETIMEDOUT) {
- dev_err(pdev->dev, "hw spinlock timeout\n");
- return ret;
+ int num_pins = 0, i;
+ u32 slew_rate;
+ bool adjust_slew_rate = false;
+ enum stm32_pin_bias bias = -1;
+ enum stm32_pin_out_type out_type = -1;
+ enum { PIN_INPUT, PIN_OUTPUT_LOW, PIN_OUTPUT_HIGH } dir = -1;
+
+ of_get_property(pins, "pinmux", &num_pins);
+ num_pins /= sizeof(__be32);
+ if (!num_pins) {
+ dev_err(dev, "Invalid pinmux property in %s\n",
+ pins->full_name);
+ return -EINVAL;
}
- for_each_child_of_node(group, pins) {
- int num_pins = 0, i;
- u32 slew_rate;
- bool adjust_slew_rate = false;
- enum stm32_pin_bias bias = -1;
- enum stm32_pin_out_type out_type = -1;
- enum { PIN_INPUT, PIN_OUTPUT_LOW, PIN_OUTPUT_HIGH } dir = -1;
-
- of_get_property(pins, "pinmux", &num_pins);
- num_pins /= sizeof(__be32);
- if (!num_pins) {
- dev_err(pdev->dev, "Invalid pinmux property in %s\n",
- pins->full_name);
- return -EINVAL;
- }
-
- ret = of_property_read_u32(pins, "slew-rate", &slew_rate);
- if (!ret)
- adjust_slew_rate = true;
-
- if (of_get_property(pins, "bias-disable", NULL))
- bias = STM32_PIN_NO_BIAS;
- else if (of_get_property(pins, "bias-pull-up", NULL))
- bias = STM32_PIN_PULL_UP;
- else if (of_get_property(pins, "bias-pull-down", NULL))
- bias = STM32_PIN_PULL_DOWN;
+ ret = of_property_read_u32(pins, "slew-rate", &slew_rate);
+ if (!ret)
+ adjust_slew_rate = true;
+
+ if (of_get_property(pins, "bias-disable", NULL))
+ bias = STM32_PIN_NO_BIAS;
+ else if (of_get_property(pins, "bias-pull-up", NULL))
+ bias = STM32_PIN_PULL_UP;
+ else if (of_get_property(pins, "bias-pull-down", NULL))
+ bias = STM32_PIN_PULL_DOWN;
+
+ if (of_get_property(pins, "drive-push-pull", NULL))
+ out_type = STM32_PIN_OUT_PUSHPULL;
+ else if (of_get_property(pins, "drive-open-drain", NULL))
+ out_type = STM32_PIN_OUT_OPENDRAIN;
+
+ if (of_get_property(pins, "input-enable", NULL))
+ dir = PIN_INPUT;
+ else if (of_get_property(pins, "output-low", NULL))
+ dir = PIN_OUTPUT_LOW;
+ else if (of_get_property(pins, "output-high", NULL))
+ dir = PIN_OUTPUT_HIGH;
+
+ dev_dbg(dev, "%s: multiplexing %d pins\n", pins->full_name, num_pins);
+
+ for (i = 0; i < num_pins; i++) {
+ struct stm32_gpio_bank *bank = NULL;
+ u32 pinfunc, mode, alt;
+ unsigned func;
+ int offset;
+
+ ret = of_property_read_u32_index(pins, "pinmux",
+ i, &pinfunc);
+ if (ret)
+ return ret;
- if (of_get_property(pins, "drive-push-pull", NULL))
- out_type = STM32_PIN_OUT_PUSHPULL;
- else if (of_get_property(pins, "drive-open-drain", NULL))
- out_type = STM32_PIN_OUT_OPENDRAIN;
+ func = STM32_GET_PIN_FUNC(pinfunc);
+ offset = stm32_gpio_pin(STM32_GET_PIN_NO(pinfunc), &bank);
+ if (offset < 0)
+ return -ENODEV;
- if (of_get_property(pins, "input-enable", NULL))
- dir = PIN_INPUT;
- else if (of_get_property(pins, "output-low", NULL))
- dir = PIN_OUTPUT_LOW;
- else if (of_get_property(pins, "output-high", NULL))
- dir = PIN_OUTPUT_HIGH;
+ mode = stm32_gpio_get_mode(func);
+ alt = stm32_gpio_get_alt(func);
- dev_dbg(pdev->dev, "%s: multiplexing %d pins\n",
- pins->full_name, num_pins);
+ dev_dbg(dev, "configuring port %s pin %u with:\n\t"
+ "fn %u, mode %u, alt %u\n",
+ bank->name, offset, func, mode, alt);
- for (i = 0; i < num_pins; i++) {
- struct stm32_gpio_bank *bank = NULL;
- u32 pinfunc, mode, alt;
- unsigned func;
- int offset;
+ clk_enable(bank->clk);
- ret = of_property_read_u32_index(pins, "pinmux",
- i, &pinfunc);
- if (ret)
- return ret;
+ __stm32_pmx_set_mode(bank->base, offset, mode, alt);
- func = STM32_GET_PIN_FUNC(pinfunc);
- offset = stm32_gpio_pin(STM32_GET_PIN_NO(pinfunc), &bank);
- if (offset < 0)
- return -ENODEV;
+ if (adjust_slew_rate)
+ __stm32_pmx_set_speed(bank->base, offset, slew_rate);
- dev_dbg(pdev->dev, "configuring port %s pin %u with:\n\t"
- "fn %u, mode %u, alt %u\n",
- bank->name, offset, func, mode, alt);
+ if (bias != -1)
+ __stm32_pmx_set_bias(bank->base, offset, bias);
- mode = stm32_gpio_get_mode(func);
- alt = stm32_gpio_get_alt(func);
+ if (out_type != -1)
+ __stm32_pmx_set_output_type(bank->base, offset, out_type);
- clk_enable(bank->clk);
+ if (dir == PIN_INPUT)
+ __stm32_pmx_gpio_input(bank->base, offset);
+ else if (dir == PIN_OUTPUT_LOW)
+ __stm32_pmx_gpio_output(bank->base, offset, 0);
+ else if (dir == PIN_OUTPUT_HIGH)
+ __stm32_pmx_gpio_output(bank->base, offset, 1);
- __stm32_pmx_set_mode(bank->base, offset, mode, alt);
+ clk_disable(bank->clk);
+ }
- if (adjust_slew_rate)
- __stm32_pmx_set_speed(bank->base, offset, slew_rate);
+ return 0;
+}
- if (bias != -1)
- __stm32_pmx_set_bias(bank->base, offset, bias);
+static int stm32_pinctrl_set_state(struct pinctrl_device *pdev, struct device_node *np)
+{
+ struct stm32_pinctrl *pinctrl = to_stm32_pinctrl(pdev);
+ struct device_d *dev = pdev->dev;
+ struct device_node *pins;
+ void *prop;
+ int ret;
- if (out_type != -1)
- __stm32_pmx_set_output_type(bank->base, offset, out_type);
+ ret = hwspinlock_lock_timeout(&pinctrl->hws, 10);
+ if (ret == -ETIMEDOUT) {
+ dev_err(dev, "hw spinlock timeout\n");
+ return ret;
+ }
- if (dir == PIN_INPUT)
- __stm32_pmx_gpio_input(bank->base, offset);
- else if (dir == PIN_OUTPUT_LOW)
- __stm32_pmx_gpio_output(bank->base, offset, 0);
- else if (dir == PIN_OUTPUT_HIGH)
- __stm32_pmx_gpio_output(bank->base, offset, 1);
+ prop = of_find_property(np, "pinmux", NULL);
+ if (prop) {
+ ret = __stm32_pinctrl_set_state(dev, np);
+ goto out;
+ }
- clk_disable(bank->clk);
- }
+ for_each_child_of_node(np, pins) {
+ ret = __stm32_pinctrl_set_state(dev, pins);
+ if (ret)
+ goto out;
}
+out:
hwspinlock_unlock(&pinctrl->hws);
-
- return 0;
+ return ret;
}
/* GPIO functions */
@@ -401,7 +419,7 @@ static int stm32_pinctrl_probe(struct device_d *dev)
}
}
- dev_info(dev, "pinctrl/gpio driver registered\n");
+ dev_dbg(dev, "pinctrl/gpio driver registered\n");
return 0;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c734ef5ef9..28bd69a2a5 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -21,6 +21,15 @@ config REGULATOR_PFUZE
depends on I2C
depends on ARCH_IMX6
+config REGULATOR_STPMIC1
+ tristate "STMicroelectronics STPMIC1 PMIC Regulators"
+ depends on MFD_STPMIC1
+ help
+ This driver supports STMicroelectronics STPMIC1 PMIC voltage
+ regulators and switches. The STPMIC1 regulators supply power to
+ an application processor as well as to external system
+ peripherals such as DDR, Flash memories and system devices.
+
config REGULATOR_ANATOP
tristate "Freescale i.MX on-chip ANATOP LDO regulators"
depends on MFD_SYSCON
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b2fc5b79b6..e27e155cf6 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_REGULATOR) += core.o helpers.o
+obj-$(CONFIG_OFDEVICE) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED) += fixed.o
obj-$(CONFIG_REGULATOR_BCM283X) += bcm2835.o
obj-$(CONFIG_REGULATOR_PFUZE) += pfuze.o
-obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o \ No newline at end of file
+obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
+obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index f22d21b35d..c4877cecf7 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -184,3 +184,189 @@ int regulator_list_voltage_linear(struct regulator_dev *rdev,
return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
}
EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
+
+/**
+ * regulator_desc_list_voltage_linear_range - List voltages for linear ranges
+ *
+ * @desc: Regulator desc for regulator which volatges are to be listed
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a series of simple linear mappings between voltages
+ * and selectors who have set linear_ranges in the regulator descriptor
+ * can use this function prior regulator registration to list voltages.
+ * This is useful when voltages need to be listed during device-tree
+ * parsing.
+ */
+int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
+ unsigned int selector)
+{
+ const struct regulator_linear_range *range;
+ int i;
+
+ if (!desc->n_linear_ranges) {
+ BUG_ON(!desc->n_linear_ranges);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < desc->n_linear_ranges; i++) {
+ range = &desc->linear_ranges[i];
+
+ if (!(selector >= range->min_sel &&
+ selector <= range->max_sel))
+ continue;
+
+ selector -= range->min_sel;
+
+ return range->min_uV + (range->uV_step * selector);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_desc_list_voltage_linear_range);
+
+/**
+ * regulator_list_voltage_linear_range - List voltages for linear ranges
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a series of simple linear mappings between voltages
+ * and selectors can set linear_ranges in the regulator descriptor and
+ * then use this function as their list_voltage() operation,
+ */
+int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ return regulator_desc_list_voltage_linear_range(rdev->desc, selector);
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range);
+
+/**
+ * regulator_map_voltage_linear_range - map_voltage() for multiple linear ranges
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers providing linear_ranges in their descriptor can use this as
+ * their map_voltage() callback.
+ */
+int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ const struct regulator_linear_range *range;
+ int ret = -EINVAL;
+ int voltage, i;
+
+ if (!rdev->desc->n_linear_ranges) {
+ BUG_ON(!rdev->desc->n_linear_ranges);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ int linear_max_uV;
+
+ range = &rdev->desc->linear_ranges[i];
+ linear_max_uV = range->min_uV +
+ (range->max_sel - range->min_sel) * range->uV_step;
+
+ if (!(min_uV <= linear_max_uV && max_uV >= range->min_uV))
+ continue;
+
+ if (min_uV <= range->min_uV)
+ min_uV = range->min_uV;
+
+ /* range->uV_step == 0 means fixed voltage range */
+ if (range->uV_step == 0) {
+ ret = 0;
+ } else {
+ ret = DIV_ROUND_UP(min_uV - range->min_uV,
+ range->uV_step);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret += range->min_sel;
+
+ /*
+ * Map back into a voltage to verify we're still in bounds.
+ * If we are not, then continue checking rest of the ranges.
+ */
+ voltage = rdev->desc->ops->list_voltage(rdev, ret);
+ if (voltage >= min_uV && voltage <= max_uV)
+ break;
+ }
+
+ if (i == rdev->desc->n_linear_ranges)
+ return -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_linear_range);
+
+/**
+ * regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their get_voltage_vsel operation, saving some code.
+ */
+int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->vsel_mask;
+ val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
+
+/**
+ * regulator_map_voltage_iterate - map_voltage() based on list_voltage()
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers implementing set_voltage_sel() and list_voltage() can use
+ * this as their map_voltage() operation. It will find a suitable
+ * voltage by calling list_voltage() until it gets something in bounds
+ * for the requested voltages.
+ */
+int regulator_map_voltage_iterate(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int best_val = INT_MAX;
+ int selector = 0;
+ int i, ret;
+
+ /* Find the smallest voltage that falls within the specified
+ * range.
+ */
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ ret = rdev->desc->ops->list_voltage(rdev, i);
+ if (ret < 0)
+ continue;
+
+ if (ret < best_val && ret >= min_uV && ret <= max_uV) {
+ best_val = ret;
+ selector = i;
+ }
+ }
+
+ if (best_val != INT_MAX)
+ return selector;
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate);
+
+
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
new file mode 100644
index 0000000000..3e8caa8710
--- /dev/null
+++ b/drivers/regulator/of_regulator.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OF helpers for regulator framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
+ */
+
+#include <common.h>
+#include <of.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+
+static int of_get_regulation_constraints(struct device_d *dev,
+ struct device_node *np,
+ struct regulator_init_data **init_data,
+ const struct regulator_desc *desc)
+{
+ struct regulation_constraints *constraints = &(*init_data)->constraints;
+ int ret;
+ u32 pval;
+
+ constraints->name = of_get_property(np, "regulator-name", NULL);
+
+ if (!of_property_read_u32(np, "regulator-min-microvolt", &pval))
+ constraints->min_uV = pval;
+
+ if (!of_property_read_u32(np, "regulator-max-microvolt", &pval))
+ constraints->max_uV = pval;
+
+ /* Voltage change possible? */
+ if (constraints->min_uV != constraints->max_uV)
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
+
+ /* Do we have a voltage range, if so try to apply it? */
+ if (constraints->min_uV && constraints->max_uV)
+ constraints->apply_uV = true;
+
+ if (!of_property_read_u32(np, "regulator-microvolt-offset", &pval))
+ constraints->uV_offset = pval;
+ if (!of_property_read_u32(np, "regulator-min-microamp", &pval))
+ constraints->min_uA = pval;
+ if (!of_property_read_u32(np, "regulator-max-microamp", &pval))
+ constraints->max_uA = pval;
+
+ if (!of_property_read_u32(np, "regulator-input-current-limit-microamp",
+ &pval))
+ constraints->ilim_uA = pval;
+
+ /* Current change possible? */
+ if (constraints->min_uA != constraints->max_uA)
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_CURRENT;
+
+ constraints->boot_on = of_property_read_bool(np, "regulator-boot-on");
+ constraints->always_on = of_property_read_bool(np, "regulator-always-on");
+ if (!constraints->always_on) /* status change should be possible. */
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+
+ constraints->pull_down = of_property_read_bool(np, "regulator-pull-down");
+
+ if (of_property_read_bool(np, "regulator-allow-bypass"))
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_BYPASS;
+
+ if (of_property_read_bool(np, "regulator-allow-set-load"))
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_DRMS;
+
+ ret = of_property_read_u32(np, "regulator-ramp-delay", &pval);
+ if (!ret) {
+ if (pval)
+ constraints->ramp_delay = pval;
+ else
+ constraints->ramp_disable = true;
+ }
+
+ ret = of_property_read_u32(np, "regulator-settling-time-us", &pval);
+ if (!ret)
+ constraints->settling_time = pval;
+
+ ret = of_property_read_u32(np, "regulator-settling-time-up-us", &pval);
+ if (!ret)
+ constraints->settling_time_up = pval;
+ if (constraints->settling_time_up && constraints->settling_time) {
+ pr_warn("%pOFn: ambiguous configuration for settling time, ignoring 'regulator-settling-time-up-us'\n",
+ np);
+ constraints->settling_time_up = 0;
+ }
+
+ ret = of_property_read_u32(np, "regulator-settling-time-down-us",
+ &pval);
+ if (!ret)
+ constraints->settling_time_down = pval;
+ if (constraints->settling_time_down && constraints->settling_time) {
+ pr_warn("%pOFn: ambiguous configuration for settling time, ignoring 'regulator-settling-time-down-us'\n",
+ np);
+ constraints->settling_time_down = 0;
+ }
+
+ ret = of_property_read_u32(np, "regulator-enable-ramp-delay", &pval);
+ if (!ret)
+ constraints->enable_time = pval;
+
+ constraints->soft_start = of_property_read_bool(np,
+ "regulator-soft-start");
+ ret = of_property_read_u32(np, "regulator-active-discharge", &pval);
+ if (!ret) {
+ constraints->active_discharge =
+ (pval) ? REGULATOR_ACTIVE_DISCHARGE_ENABLE :
+ REGULATOR_ACTIVE_DISCHARGE_DISABLE;
+ }
+
+ if (!of_property_read_u32(np, "regulator-system-load", &pval))
+ constraints->system_load = pval;
+
+ if (!of_property_read_u32(np, "regulator-max-step-microvolt",
+ &pval))
+ constraints->max_uV_step = pval;
+
+ constraints->over_current_protection = of_property_read_bool(np,
+ "regulator-over-current-protection");
+
+ return 0;
+}
+
+/**
+ * of_get_regulator_init_data - extract regulator_init_data structure info
+ * @dev: device requesting for regulator_init_data
+ * @node: regulator device node
+ * @desc: regulator description
+ *
+ * Populates regulator_init_data structure by extracting data from device
+ * tree node, returns a pointer to the populated structure or NULL if memory
+ * alloc fails.
+ */
+struct regulator_init_data *of_get_regulator_init_data(struct device_d *dev,
+ struct device_node *node,
+ const struct regulator_desc *desc)
+{
+ struct regulator_init_data *init_data;
+
+ if (!node)
+ return NULL;
+
+ init_data = xzalloc(sizeof(*init_data));
+
+ if (of_get_regulation_constraints(dev, node, &init_data, desc))
+ return NULL;
+
+ return init_data;
+}
+EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
+
+struct devm_of_regulator_matches {
+ struct of_regulator_match *matches;
+ unsigned int num_matches;
+};
+
+/**
+ * of_regulator_match - extract multiple regulator init data from device tree.
+ * @dev: device requesting the data
+ * @node: parent device node of the regulators
+ * @matches: match table for the regulators
+ * @num_matches: number of entries in match table
+ *
+ * This function uses a match table specified by the regulator driver to
+ * parse regulator init data from the device tree. @node is expected to
+ * contain a set of child nodes, each providing the init data for one
+ * regulator. The data parsed from a child node will be matched to a regulator
+ * based on either the deprecated property regulator-compatible if present,
+ * or otherwise the child node's name. Note that the match table is modified
+ * in place and an additional of_node reference is taken for each matched
+ * regulator.
+ *
+ * Returns the number of matches found or a negative error code on failure.
+ */
+int of_regulator_match(struct device_d *dev, struct device_node *node,
+ struct of_regulator_match *matches,
+ unsigned int num_matches)
+{
+ unsigned int count = 0;
+ unsigned int i;
+ const char *name;
+ struct device_node *child;
+ struct devm_of_regulator_matches *devm_matches;
+
+ if (!dev || !node)
+ return -EINVAL;
+
+ devm_matches = xzalloc(sizeof(struct devm_of_regulator_matches));
+
+ devm_matches->matches = matches;
+ devm_matches->num_matches = num_matches;
+
+ for (i = 0; i < num_matches; i++) {
+ struct of_regulator_match *match = &matches[i];
+ match->init_data = NULL;
+ match->of_node = NULL;
+ }
+
+ for_each_child_of_node(node, child) {
+ name = of_get_property(child,
+ "regulator-compatible", NULL);
+ if (!name)
+ name = child->name;
+
+ for (i = 0; i < num_matches; i++) {
+ struct of_regulator_match *match = &matches[i];
+ if (match->of_node)
+ continue;
+
+ if (strcmp(match->name, name))
+ continue;
+
+ match->init_data = of_get_regulator_init_data(dev, child,
+ match->desc);
+ if (!match->init_data) {
+ dev_err(dev,
+ "failed to parse DT for regulator %pOFn\n",
+ child);
+ return -EINVAL;
+ }
+ match->of_node = child;
+ count++;
+ break;
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(of_regulator_match);
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
new file mode 100644
index 0000000000..aaaba092c1
--- /dev/null
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) STMicroelectronics 2018
+// Author: Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
+
+#include <common.h>
+#include <init.h>
+#include <of_device.h>
+#include <regmap.h>
+#include <linux/regulator/of_regulator.h>
+#include <regulator.h>
+#include <linux/mfd/stpmic1.h>
+
+#include <dt-bindings/mfd/st,stpmic1.h>
+
+/**
+ * stpmic1 regulator description: this structure is used as driver data
+ * @desc: regulator framework description
+ * @mask_reset_reg: mask reset register address
+ * @mask_reset_mask: mask rank and mask reset register mask
+ * @icc_reg: icc register address
+ * @icc_mask: icc register mask
+ */
+struct stpmic1_regulator_cfg {
+ struct device_d *dev;
+ struct regulator_dev rdev;
+ struct regulator_desc desc;
+ u8 mask_reset_reg;
+ u8 mask_reset_mask;
+ u8 icc_reg;
+ u8 icc_mask;
+};
+
+enum {
+ STPMIC1_BUCK1 = 0,
+ STPMIC1_BUCK2 = 1,
+ STPMIC1_BUCK3 = 2,
+ STPMIC1_BUCK4 = 3,
+ STPMIC1_LDO1 = 4,
+ STPMIC1_LDO2 = 5,
+ STPMIC1_LDO3 = 6,
+ STPMIC1_LDO4 = 7,
+ STPMIC1_LDO5 = 8,
+ STPMIC1_LDO6 = 9,
+ STPMIC1_VREF_DDR = 10,
+ STPMIC1_BOOST = 11,
+ STPMIC1_VBUS_OTG = 12,
+ STPMIC1_SW_OUT = 13,
+};
+
+/* Enable time worst case is 5000mV/(2250uV/uS) */
+#define PMIC_ENABLE_TIME_US 2200
+
+static const struct regulator_linear_range buck1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(725000, 0, 4, 0),
+ REGULATOR_LINEAR_RANGE(725000, 5, 36, 25000),
+ REGULATOR_LINEAR_RANGE(1500000, 37, 63, 0),
+};
+
+static const struct regulator_linear_range buck2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0, 17, 0),
+ REGULATOR_LINEAR_RANGE(1050000, 18, 19, 0),
+ REGULATOR_LINEAR_RANGE(1100000, 20, 21, 0),
+ REGULATOR_LINEAR_RANGE(1150000, 22, 23, 0),
+ REGULATOR_LINEAR_RANGE(1200000, 24, 25, 0),
+ REGULATOR_LINEAR_RANGE(1250000, 26, 27, 0),
+ REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
+ REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
+ REGULATOR_LINEAR_RANGE(1400000, 32, 33, 0),
+ REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 36, 63, 0),
+};
+
+static const struct regulator_linear_range buck3_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0, 19, 0),
+ REGULATOR_LINEAR_RANGE(1100000, 20, 23, 0),
+ REGULATOR_LINEAR_RANGE(1200000, 24, 27, 0),
+ REGULATOR_LINEAR_RANGE(1300000, 28, 31, 0),
+ REGULATOR_LINEAR_RANGE(1400000, 32, 35, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 36, 55, 100000),
+ REGULATOR_LINEAR_RANGE(3400000, 56, 63, 0),
+};
+
+static const struct regulator_linear_range buck4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 27, 25000),
+ REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
+ REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
+ REGULATOR_LINEAR_RANGE(1400000, 32, 33, 0),
+ REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 36, 60, 100000),
+ REGULATOR_LINEAR_RANGE(3900000, 61, 63, 0),
+};
+
+static const struct regulator_linear_range ldo1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
+};
+
+static const struct regulator_linear_range ldo2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
+};
+
+static const struct regulator_linear_range ldo3_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
+ /* with index 31 LDO3 is in DDR mode */
+ REGULATOR_LINEAR_RANGE(500000, 31, 31, 0),
+};
+
+static const struct regulator_linear_range ldo5_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 8, 30, 100000),
+ REGULATOR_LINEAR_RANGE(3900000, 31, 31, 0),
+};
+
+static const struct regulator_linear_range ldo6_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0, 24, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
+};
+
+static const struct regulator_ops stpmic1_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops stpmic1_ldo3_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_iterate,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops stpmic1_ldo4_fixed_regul_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_ops stpmic1_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_ops stpmic1_vref_ddr_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_ops stpmic1_boost_regul_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_ops stpmic1_switch_regul_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+#define REG_LDO(ids, base) { \
+ .n_voltages = 32, \
+ .ops = &stpmic1_ldo_ops, \
+ .linear_ranges = base ## _ranges, \
+ .n_linear_ranges = ARRAY_SIZE(base ## _ranges), \
+ .vsel_reg = ids##_ACTIVE_CR, \
+ .vsel_mask = LDO_VOLTAGE_MASK, \
+ .enable_reg = ids##_ACTIVE_CR, \
+ .enable_mask = LDO_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+}
+
+#define REG_LDO3(ids, base) { \
+ .n_voltages = 32, \
+ .ops = &stpmic1_ldo3_ops, \
+ .linear_ranges = ldo3_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(ldo3_ranges), \
+ .vsel_reg = LDO3_ACTIVE_CR, \
+ .vsel_mask = LDO_VOLTAGE_MASK, \
+ .enable_reg = LDO3_ACTIVE_CR, \
+ .enable_mask = LDO_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+}
+
+#define REG_LDO4(ids, base) { \
+ .n_voltages = 1, \
+ .ops = &stpmic1_ldo4_fixed_regul_ops, \
+ .min_uV = 3300000, \
+ .enable_reg = LDO4_ACTIVE_CR, \
+ .enable_mask = LDO_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+}
+
+#define REG_BUCK(ids, base) { \
+ .ops = &stpmic1_buck_ops, \
+ .n_voltages = 64, \
+ .linear_ranges = base ## _ranges, \
+ .n_linear_ranges = ARRAY_SIZE(base ## _ranges), \
+ .vsel_reg = ids##_ACTIVE_CR, \
+ .vsel_mask = BUCK_VOLTAGE_MASK, \
+ .enable_reg = ids##_ACTIVE_CR, \
+ .enable_mask = BUCK_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+}
+
+#define REG_VREF_DDR(ids, base) { \
+ .n_voltages = 1, \
+ .ops = &stpmic1_vref_ddr_ops, \
+ .min_uV = 500000, \
+ .enable_reg = VREF_DDR_ACTIVE_CR, \
+ .enable_mask = BUCK_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+}
+
+#define REG_BOOST(ids, base) { \
+ .n_voltages = 1, \
+ .ops = &stpmic1_boost_regul_ops, \
+ .min_uV = 0, \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = BOOST_ENABLED, \
+ .enable_val = BOOST_ENABLED, \
+ .disable_val = 0, \
+}
+
+#define REG_VBUS_OTG(ids, base) { \
+ .n_voltages = 1, \
+ .ops = &stpmic1_switch_regul_ops, \
+ .min_uV = 0, \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = USBSW_OTG_SWITCH_ENABLED, \
+ .enable_val = USBSW_OTG_SWITCH_ENABLED, \
+ .disable_val = 0, \
+}
+
+#define REG_SW_OUT(ids, base) { \
+ .n_voltages = 1, \
+ .ops = &stpmic1_switch_regul_ops, \
+ .min_uV = 0, \
+ .enable_reg = BST_SW_CR, \
+ .enable_mask = SWIN_SWOUT_ENABLED, \
+ .enable_val = SWIN_SWOUT_ENABLED, \
+ .disable_val = 0, \
+}
+
+static struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
+ [STPMIC1_BUCK1] = {
+ .desc = REG_BUCK(BUCK1, buck1),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(0),
+ .mask_reset_reg = BUCKS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(0),
+ },
+ [STPMIC1_BUCK2] = {
+ .desc = REG_BUCK(BUCK2, buck2),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(1),
+ .mask_reset_reg = BUCKS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(1),
+ },
+ [STPMIC1_BUCK3] = {
+ .desc = REG_BUCK(BUCK3, buck3),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(2),
+ .mask_reset_reg = BUCKS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(2),
+ },
+ [STPMIC1_BUCK4] = {
+ .desc = REG_BUCK(BUCK4, buck4),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(3),
+ .mask_reset_reg = BUCKS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(3),
+ },
+ [STPMIC1_LDO1] = {
+ .desc = REG_LDO(LDO1, ldo1),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(0),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(0),
+ },
+ [STPMIC1_LDO2] = {
+ .desc = REG_LDO(LDO2, ldo2),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(1),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(1),
+ },
+ [STPMIC1_LDO3] = {
+ .desc = REG_LDO3(LDO3, ldo3),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(2),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(2),
+ },
+ [STPMIC1_LDO4] = {
+ .desc = REG_LDO4(LDO4, ldo4),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(3),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(3),
+ },
+ [STPMIC1_LDO5] = {
+ .desc = REG_LDO(LDO5, ldo5),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(4),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(4),
+ },
+ [STPMIC1_LDO6] = {
+ .desc = REG_LDO(LDO6, ldo6),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(5),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(5),
+ },
+ [STPMIC1_VREF_DDR] = {
+ .desc = REG_VREF_DDR(VREF_DDR, vref_ddr),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(6),
+ },
+ [STPMIC1_BOOST] = {
+ .desc = REG_BOOST(BOOST, boost),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(6),
+ },
+ [STPMIC1_VBUS_OTG] = {
+ .desc = REG_VBUS_OTG(VBUS_OTG, pwr_sw1),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(4),
+ },
+ [STPMIC1_SW_OUT] = {
+ .desc = REG_SW_OUT(SW_OUT, pwr_sw2),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(5),
+ },
+};
+
+#define MATCH(_name, _id) \
+ [STPMIC1_##_id] = { \
+ .name = #_name, \
+ .desc = &stpmic1_regulator_cfgs[STPMIC1_##_id].desc, \
+ }
+
+static struct of_regulator_match stpmic1_matches[] = {
+ MATCH(buck1, BUCK1),
+ MATCH(buck2, BUCK2),
+ MATCH(buck3, BUCK3),
+ MATCH(buck4, BUCK4),
+ MATCH(ldo1, LDO1),
+ MATCH(ldo2, LDO2),
+ MATCH(ldo3, LDO3),
+ MATCH(ldo4, LDO4),
+ MATCH(ldo5, LDO5),
+ MATCH(ldo6, LDO6),
+ MATCH(vref_ddr, VREF_DDR),
+ MATCH(boost, BOOST),
+ MATCH(pwr_sw1, VBUS_OTG),
+ MATCH(pwr_sw2, SW_OUT),
+};
+
+static int stpmic1_regulator_register(struct device_d *dev, int id,
+ struct of_regulator_match *match,
+ struct stpmic1_regulator_cfg *cfg)
+{
+ int ret;
+
+ cfg->dev = dev;
+ cfg->rdev.desc = &cfg->desc;
+ cfg->rdev.regmap = dev_get_regmap(dev->parent, NULL);
+ if (IS_ERR(cfg->rdev.regmap))
+ return PTR_ERR(cfg->rdev.regmap);
+
+ ret = of_regulator_register(&cfg->rdev, match->of_node);
+ if (ret) {
+ dev_err(dev, "failed to register %s regulator\n", match->name);
+ return ret;
+ }
+
+ dev_dbg(dev, "registered %s\n", match->name);
+
+ return 0;
+}
+
+static int stpmic1_regulator_probe(struct device_d *dev)
+{
+ int i, ret;
+
+ ret = of_regulator_match(dev, dev->device_node, stpmic1_matches,
+ ARRAY_SIZE(stpmic1_matches));
+ if (ret < 0) {
+ dev_err(dev, "Error in PMIC regulator device tree node");
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stpmic1_regulator_cfgs); i++) {
+ ret = stpmic1_regulator_register(dev, i, &stpmic1_matches[i],
+ &stpmic1_regulator_cfgs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_dbg(dev, "probed\n");
+
+ return 0;
+}
+
+static __maybe_unused const struct of_device_id stpmic1_regulator_of_match[] = {
+ { .compatible = "st,stpmic1-regulators" },
+ { /* sentinel */ },
+};
+
+static struct driver_d stpmic1_regulator_driver = {
+ .name = "stpmic1-regulator",
+ .probe = stpmic1_regulator_probe,
+ .of_compatible = DRV_OF_COMPAT(stpmic1_regulator_of_match),
+};
+device_platform_driver(stpmic1_regulator_driver);
diff --git a/drivers/watchdog/stpmic1_wdt.c b/drivers/watchdog/stpmic1_wdt.c
index eb8c43f716..5d9720c230 100644
--- a/drivers/watchdog/stpmic1_wdt.c
+++ b/drivers/watchdog/stpmic1_wdt.c
@@ -169,7 +169,10 @@ static int stpmic1_wdt_probe(struct device_d *dev)
int ret;
wdt = xzalloc(sizeof(*wdt));
- wdt->regmap = dev->parent->priv;
+
+ wdt->regmap = dev_get_regmap(dev->parent, NULL);
+ if (IS_ERR(wdt->regmap))
+ return PTR_ERR(wdt->regmap);
wdd = &wdt->wdd;
wdd->hwdev = dev;