summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig10
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boards/Makefile2
-rw-r--r--arch/arm/boards/ls1046ardb/Makefile4
-rw-r--r--arch/arm/boards/ls1046ardb/board.c36
-rw-r--r--arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth4.mode1
-rw-r--r--arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth5.mode1
-rw-r--r--arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth6.mode1
-rw-r--r--arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth7.mode1
-rw-r--r--arch/arm/boards/ls1046ardb/lowlevel.c231
-rw-r--r--arch/arm/boards/ls1046ardb/ls1046ardb_pbi.cfg22
-rw-r--r--arch/arm/boards/ls1046ardb/ls1046ardb_qspi_pbi.cfg26
-rw-r--r--arch/arm/boards/ls1046ardb/ls1046ardb_rcw_emmc.cfg7
-rw-r--r--arch/arm/boards/ls1046ardb/ls1046ardb_rcw_qspi.cfg7
-rw-r--r--arch/arm/boards/ls1046ardb/ls1046ardb_rcw_sd.cfg7
-rw-r--r--arch/arm/boards/ls1046ardb/start.S11
-rw-r--r--arch/arm/boards/tqmls1046a/Makefile3
-rw-r--r--arch/arm/boards/tqmls1046a/board.c32
-rw-r--r--arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth4.mode1
-rw-r--r--arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth5.mode1
-rw-r--r--arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth6.mode1
-rw-r--r--arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth7.mode1
-rw-r--r--arch/arm/boards/tqmls1046a/lowlevel.c217
-rw-r--r--arch/arm/boards/tqmls1046a/start.S12
-rw-r--r--arch/arm/boards/tqmls1046a/tqmls1046a_pbi_qspi.cfg33
-rw-r--r--arch/arm/boards/tqmls1046a/tqmls1046a_pbi_sd.cfg35
-rw-r--r--arch/arm/boards/tqmls1046a/tqmls1046a_rcw_emmc_3333_5559.cfg84
-rw-r--r--arch/arm/boards/tqmls1046a/tqmls1046a_rcw_qspi_3333_5559.cfg84
-rw-r--r--arch/arm/boards/tqmls1046a/tqmls1046a_rcw_sd_3333_5559.cfg84
-rw-r--r--arch/arm/configs/layerscape_defconfig111
-rw-r--r--arch/arm/dts/Makefile2
-rw-r--r--arch/arm/dts/fsl-ls1046a-rdb.dts98
-rw-r--r--arch/arm/dts/fsl-tqmls1046a-mbls10xxa.dts240
-rw-r--r--arch/arm/dts/fsl-tqmls1046a.dtsi54
-rw-r--r--arch/arm/lib/pbl.lds.S6
-rw-r--r--arch/arm/lib64/Makefile2
-rw-r--r--arch/arm/lib64/pbl.c17
-rw-r--r--arch/arm/mach-layerscape/Kconfig21
-rw-r--r--arch/arm/mach-layerscape/Makefile4
-rw-r--r--arch/arm/mach-layerscape/errata.c195
-rw-r--r--arch/arm/mach-layerscape/icid.c243
-rw-r--r--arch/arm/mach-layerscape/include/mach/debug_ll.h34
-rw-r--r--arch/arm/mach-layerscape/include/mach/errata.h7
-rw-r--r--arch/arm/mach-layerscape/include/mach/layerscape.h7
-rw-r--r--arch/arm/mach-layerscape/include/mach/lowlevel.h7
-rw-r--r--arch/arm/mach-layerscape/include/mach/xload.h6
-rw-r--r--arch/arm/mach-layerscape/lowlevel-ls1046a.c246
-rw-r--r--arch/arm/mach-layerscape/lowlevel.S18
-rw-r--r--arch/mips/lib/pbl.lds.S5
-rw-r--r--common/Kconfig8
-rw-r--r--drivers/Kconfig1
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-qoric.c665
-rw-r--r--drivers/ddr/Kconfig1
-rw-r--r--drivers/ddr/Makefile1
-rw-r--r--drivers/ddr/fsl/Kconfig16
-rw-r--r--drivers/ddr/fsl/Makefile12
-rw-r--r--drivers/ddr/fsl/arm_ddr_gen3.c204
-rw-r--r--drivers/ddr/fsl/ctrl_regs.c2539
-rw-r--r--drivers/ddr/fsl/ddr1_dimm_params.c319
-rw-r--r--drivers/ddr/fsl/ddr2_dimm_params.c320
-rw-r--r--drivers/ddr/fsl/ddr3_dimm_params.c325
-rw-r--r--drivers/ddr/fsl/ddr4_dimm_params.c352
-rw-r--r--drivers/ddr/fsl/fsl_ddr.h234
-rw-r--r--drivers/ddr/fsl/fsl_ddr_gen4.c501
-rw-r--r--drivers/ddr/fsl/lc_common_dimm_params.c542
-rw-r--r--drivers/ddr/fsl/main.c444
-rw-r--r--drivers/ddr/fsl/options.c1133
-rw-r--r--drivers/ddr/fsl/util.c98
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/mci/imx-esdhc-pbl.c60
-rw-r--r--drivers/mci/imx-esdhc.c3
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/fsl-fman.c1333
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/imxwd.c46
-rw-r--r--firmware/Makefile2
-rw-r--r--images/Makefile35
-rw-r--r--images/Makefile.imx3
-rw-r--r--images/Makefile.layerscape59
-rw-r--r--include/soc/fsl/fsl_ddr_sdram.h558
-rw-r--r--include/soc/fsl/fsl_fman.h439
-rw-r--r--include/soc/fsl/fsl_immap.h184
-rw-r--r--include/soc/fsl/fsl_memac.h256
-rw-r--r--include/soc/fsl/fsl_qbman.h74
-rw-r--r--include/soc/fsl/immap_lsch2.h355
-rw-r--r--include/soc/fsl/qe.h264
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Makefile1
-rw-r--r--lib/fsl-qe-firmware.c64
-rw-r--r--scripts/Makefile1
-rw-r--r--scripts/Makefile.lib14
-rwxr-xr-xscripts/check_size20
-rwxr-xr-xscripts/extract_symbol_offset14
-rw-r--r--scripts/pblimage.c432
99 files changed, 14212 insertions, 23 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9d3f5b2ca7..8565bbb458 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -113,6 +113,15 @@ config ARCH_IMX
select WATCHDOG_IMX_RESET_SOURCE
select HAS_DEBUG_LL
+config ARCH_LAYERSCAPE
+ bool "NXP Layerscape based"
+ select GPIOLIB
+ select HAS_DEBUG_LL
+ select HAVE_PBL_MULTI_IMAGES
+ select COMMON_CLK
+ select CLKDEV_LOOKUP
+ select COMMON_CLK_OF_PROVIDER
+
config ARCH_MVEBU
bool "Marvell EBU platforms"
select COMMON_CLK
@@ -275,6 +284,7 @@ source arch/arm/mach-digic/Kconfig
source arch/arm/mach-ep93xx/Kconfig
source arch/arm/mach-highbank/Kconfig
source arch/arm/mach-imx/Kconfig
+source arch/arm/mach-layerscape/Kconfig
source arch/arm/mach-mxs/Kconfig
source arch/arm/mach-mvebu/Kconfig
source arch/arm/mach-netx/Kconfig
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 0ce208128d..7dd5e1cd41 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -87,6 +87,7 @@ machine-$(CONFIG_ARCH_DIGIC) := digic
machine-$(CONFIG_ARCH_EP93XX) := ep93xx
machine-$(CONFIG_ARCH_HIGHBANK) := highbank
machine-$(CONFIG_ARCH_IMX) := imx
+machine-$(CONFIG_ARCH_LAYERSCAPE) := layerscape
machine-$(CONFIG_ARCH_MXS) := mxs
machine-$(CONFIG_ARCH_MVEBU) := mvebu
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
diff --git a/arch/arm/boards/Makefile b/arch/arm/boards/Makefile
index fc883e3dea..d146d866d7 100644
--- a/arch/arm/boards/Makefile
+++ b/arch/arm/boards/Makefile
@@ -163,3 +163,5 @@ obj-$(CONFIG_MACH_ZII_IMX8MQ_DEV) += zii-imx8mq-dev/
obj-$(CONFIG_MACH_ZII_VF610_DEV) += zii-vf610-dev/
obj-$(CONFIG_MACH_ZII_IMX7D_RPU2) += zii-imx7d-rpu2/
obj-$(CONFIG_MACH_WAGO_PFC_AM35XX) += wago-pfc-am35xx/
+obj-$(CONFIG_MACH_LS1046ARDB) += ls1046ardb/
+obj-$(CONFIG_MACH_TQMLS1046A) += tqmls1046a/ \ No newline at end of file
diff --git a/arch/arm/boards/ls1046ardb/Makefile b/arch/arm/boards/ls1046ardb/Makefile
new file mode 100644
index 0000000000..03ac4ecca3
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/Makefile
@@ -0,0 +1,4 @@
+lwl-y += lowlevel.o
+obj-y += board.o
+lwl-y += start.o
+bbenv-y += defaultenv-ls1046ardb
diff --git a/arch/arm/boards/ls1046ardb/board.c b/arch/arm/boards/ls1046ardb/board.c
new file mode 100644
index 0000000000..483040957e
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/board.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <init.h>
+#include <envfs.h>
+#include <asm/memory.h>
+#include <linux/sizes.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <asm/system.h>
+
+static int rdb_mem_init(void)
+{
+ if (!of_machine_is_compatible("fsl,ls1046a-rdb"))
+ return 0;
+
+ arm_add_mem_device("ram0", 0x80000000, 0x80000000);
+ arm_add_mem_device("ram1", 0x880000000, 3ULL * SZ_2G);
+
+ printf("Current EL: %d\n", current_el());
+
+ return 0;
+}
+mem_initcall(rdb_mem_init);
+
+static int rdb_postcore_init(void)
+{
+ if (!of_machine_is_compatible("fsl,ls1046a-rdb"))
+ return 0;
+
+ defaultenv_append_directory(defaultenv_ls1046ardb);
+
+ return 0;
+}
+
+postcore_initcall(rdb_postcore_init);
diff --git a/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth4.mode b/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth4.mode
new file mode 100644
index 0000000000..7a68b11da8
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth4.mode
@@ -0,0 +1 @@
+disabled
diff --git a/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth5.mode b/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth5.mode
new file mode 100644
index 0000000000..7a68b11da8
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth5.mode
@@ -0,0 +1 @@
+disabled
diff --git a/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth6.mode b/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth6.mode
new file mode 100644
index 0000000000..7a68b11da8
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth6.mode
@@ -0,0 +1 @@
+disabled
diff --git a/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth7.mode b/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth7.mode
new file mode 100644
index 0000000000..7a68b11da8
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/defaultenv-ls1046ardb/nv/dev.eth7.mode
@@ -0,0 +1 @@
+disabled
diff --git a/arch/arm/boards/ls1046ardb/lowlevel.c b/arch/arm/boards/ls1046ardb/lowlevel.c
new file mode 100644
index 0000000000..6de16063a7
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/lowlevel.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <debug_ll.h>
+#include <ddr_spd.h>
+#include <platform_data/mmc-esdhc-imx.h>
+#include <i2c/i2c-early.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <soc/fsl/immap_lsch2.h>
+#include <asm/barebox-arm-head.h>
+#include <asm/barebox-arm.h>
+#include <asm/syscounter.h>
+#include <asm/cache.h>
+#include <mach/errata.h>
+#include <mach/lowlevel.h>
+#include <mach/xload.h>
+#include <mach/layerscape.h>
+
+struct board_specific_parameters {
+ u32 n_ranks;
+ u32 datarate_mhz_high;
+ u32 rank_gb;
+ u32 clk_adjust;
+ u32 wrlvl_start;
+ u32 wrlvl_ctl_2;
+ u32 wrlvl_ctl_3;
+};
+
+/*
+ * These tables contain all valid speeds we want to override with board
+ * specific parameters. datarate_mhz_high values need to be in ascending order
+ * for each n_ranks group.
+ */
+static const struct board_specific_parameters udimm0[] = {
+ /*
+ * memory controller 0
+ * num| hi| rank| clk| wrlvl | wrlvl | wrlvl
+ * ranks| mhz| GB |adjst| start | ctl2 | ctl3
+ */
+ {2, 1350, 0, 8, 6, 0x0708090B, 0x0C0D0E09,},
+ {2, 1666, 0, 8, 7, 0x08090A0C, 0x0D0F100B,},
+ {2, 1900, 0, 8, 7, 0x09090B0D, 0x0E10120B,},
+ {2, 2300, 0, 8, 9, 0x0A0B0C10, 0x1213140E,},
+ {}
+};
+
+static const struct board_specific_parameters *udimms[] = {
+ udimm0,
+};
+
+static const struct board_specific_parameters rdimm0[] = {
+ /*
+ * memory controller 0
+ * num| hi| rank| clk| wrlvl | wrlvl | wrlvl
+ * ranks| mhz| GB |adjst| start | ctl2 | ctl3
+ */
+ {2, 1666, 0, 0x8, 0x0D, 0x0C0B0A08, 0x0A0B0C08,},
+ {2, 1900, 0, 0x8, 0x0E, 0x0D0C0B09, 0x0B0C0D09,},
+ {2, 2300, 0, 0xa, 0x12, 0x100F0D0C, 0x0E0F100C,},
+ {1, 1666, 0, 0x8, 0x0D, 0x0C0B0A08, 0x0A0B0C08,},
+ {1, 1900, 0, 0x8, 0x0E, 0x0D0C0B09, 0x0B0C0D09,},
+ {1, 2300, 0, 0xa, 0x12, 0x100F0D0C, 0x0E0F100C,},
+ {}
+};
+
+static const struct board_specific_parameters *rdimms[] = {
+ rdimm0,
+};
+
+static void ddr_board_options(memctl_options_t *popts,
+ struct dimm_params *pdimm,
+ struct fsl_ddr_controller *c)
+{
+ const struct board_specific_parameters *pbsp, *pbsp_highest = NULL;
+ unsigned long ddr_freq;
+
+ if (!pdimm->n_ranks)
+ return;
+
+ if (popts->registered_dimm_en)
+ pbsp = rdimms[0];
+ else
+ pbsp = udimms[0];
+
+ /* Get clk_adjust, wrlvl_start, wrlvl_ctl, according to the board ddr
+ * freqency and n_banks specified in board_specific_parameters table.
+ */
+ ddr_freq = c->ddr_freq / 1000000;
+ while (pbsp->datarate_mhz_high) {
+ if (pbsp->n_ranks == pdimm->n_ranks) {
+ if (ddr_freq <= pbsp->datarate_mhz_high) {
+ popts->clk_adjust = pbsp->clk_adjust;
+ popts->wrlvl_start = pbsp->wrlvl_start;
+ popts->wrlvl_ctl_2 = pbsp->wrlvl_ctl_2;
+ popts->wrlvl_ctl_3 = pbsp->wrlvl_ctl_3;
+ goto found;
+ }
+ pbsp_highest = pbsp;
+ }
+ pbsp++;
+ }
+
+ if (pbsp_highest) {
+ printf("Error: board specific timing not found for %lu MT/s\n",
+ ddr_freq);
+ printf("Trying to use the highest speed (%u) parameters\n",
+ pbsp_highest->datarate_mhz_high);
+ popts->clk_adjust = pbsp_highest->clk_adjust;
+ popts->wrlvl_start = pbsp_highest->wrlvl_start;
+ popts->wrlvl_ctl_2 = pbsp->wrlvl_ctl_2;
+ popts->wrlvl_ctl_3 = pbsp->wrlvl_ctl_3;
+ } else {
+ panic("DIMM is not supported by this board");
+ }
+found:
+ debug("Found timing match: n_ranks %d, data rate %d, rank_gb %d\n",
+ pbsp->n_ranks, pbsp->datarate_mhz_high, pbsp->rank_gb);
+
+ popts->data_bus_width = 0; /* 64-bit data bus */
+ popts->bstopre = 0; /* enable auto precharge */
+
+ /*
+ * Factors to consider for half-strength driver enable:
+ * - number of DIMMs installed
+ */
+ popts->half_strength_driver_enable = 0;
+ /*
+ * Write leveling override
+ */
+ popts->wrlvl_override = 1;
+ popts->wrlvl_sample = 0xf;
+
+ /*
+ * Rtt and Rtt_WR override
+ */
+ popts->rtt_override = 0;
+
+ /* Enable ZQ calibration */
+ popts->zq_en = 1;
+
+ popts->ddr_cdr1 = DDR_CDR1_DHC_EN | DDR_CDR1_ODT(DDR4_CDR_ODT_80ohm);
+ popts->ddr_cdr2 = DDR_CDR2_ODT(DDR4_CDR_ODT_80ohm) |
+ DDR_CDR2_VREF_TRAIN_EN | DDR_CDR2_VREF_RANGE_2;
+
+ /* optimize cpo for erratum A-009942 */
+ popts->cpo_sample = 0x61;
+}
+
+extern char __dtb_fsl_ls1046a_rdb_start[];
+
+static struct spd_eeprom spd_eeprom[] = {
+ {
+ /* filled during runtime */
+ },
+};
+
+static struct dimm_params dimm_params[] = {
+ {
+ /* filled during runtime */
+ },
+};
+
+static struct fsl_ddr_controller ddrc[] = {
+ {
+ .dimm_slots_per_ctrl = ARRAY_SIZE(dimm_params),
+ .spd_installed_dimms = spd_eeprom,
+ .dimm_params = dimm_params,
+ .memctl_opts.ddrtype = SDRAM_TYPE_DDR4,
+ .base = IOMEM(LSCH2_DDR_ADDR),
+ .ddr_freq = LS1046A_DDR_FREQ,
+ .erratum_A008511 = 1,
+ .erratum_A009803 = 1,
+ .erratum_A010165 = 1,
+ .erratum_A009801 = 1,
+ .erratum_A009942 = 1,
+ .chip_selects_per_ctrl = 4,
+ .board_options = ddr_board_options,
+ },
+};
+
+static struct fsl_ddr_info ls1046a_info = {
+ .num_ctrls = ARRAY_SIZE(ddrc),
+ .c = ddrc,
+};
+
+static noinline __noreturn void ls1046ardb_r_entry(unsigned long memsize)
+{
+ unsigned long membase = LS1046A_DDR_SDRAM_BASE;
+ struct fsl_i2c *i2c;
+ int ret;
+
+ if (get_pc() >= membase) {
+ if (memsize + membase >= 0x100000000)
+ memsize = 0x100000000 - membase;
+
+ barebox_arm_entry(membase, 0x80000000 - SZ_1M * 67,
+ __dtb_fsl_ls1046a_rdb_start);
+ }
+
+ arm_cpu_lowlevel_init();
+ debug_ll_init();
+ ls1046a_init_lowlevel();
+
+ i2c = ls1046_i2c_init(IOMEM(LSCH2_I2C1_BASE_ADDR));
+ ret = spd_read_eeprom(i2c, i2c_fsl_xfer, 0x51, &spd_eeprom);
+ if (ret) {
+ pr_err("Cannot read SPD EEPROM: %d\n", ret);
+ goto err;
+ }
+
+ memsize = fsl_ddr_sdram(&ls1046a_info);
+
+ ls1046a_errata_post_ddr();
+
+ ls1046a_esdhc_start_image(memsize, 0, 0);
+
+err:
+ pr_err("Booting failed\n");
+
+ while (1);
+}
+
+void ls1046ardb_entry(unsigned long r0, unsigned long r1, unsigned long r2);
+
+__noreturn void ls1046ardb_entry(unsigned long r0, unsigned long r1, unsigned long r2)
+{
+ relocate_to_current_adr();
+ setup_c();
+
+ ls1046ardb_r_entry(r0);
+}
diff --git a/arch/arm/boards/ls1046ardb/ls1046ardb_pbi.cfg b/arch/arm/boards/ls1046ardb/ls1046ardb_pbi.cfg
new file mode 100644
index 0000000000..5478217524
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/ls1046ardb_pbi.cfg
@@ -0,0 +1,22 @@
+#Configure Scratch register
+09570600 00000000
+09570604 10000000
+#Disable CCI barrier tranaction
+09570178 0000e010
+09180000 00000008
+#USB PHY frequency sel
+09570418 0000009e
+0957041c 0000009e
+09570420 0000009e
+#Serdes SATA
+09eb1300 80104e20
+09eb08dc 00502880
+#PEX gen3 link
+09570158 00000300
+89400890 01048000
+89500890 01048000
+89600890 01048000
+#Alt base register
+09570158 00001000
+#flush PBI data
+096100c0 000fffff
diff --git a/arch/arm/boards/ls1046ardb/ls1046ardb_qspi_pbi.cfg b/arch/arm/boards/ls1046ardb/ls1046ardb_qspi_pbi.cfg
new file mode 100644
index 0000000000..735d46c9f9
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/ls1046ardb_qspi_pbi.cfg
@@ -0,0 +1,26 @@
+#QSPI clk
+0957015c 40100000
+#Configure Scratch register
+09570600 00000000
+09570604 10000000
+#Disable CCI barrier tranaction
+09570178 0000e010
+09180000 00000008
+#USB PHY frequency sel
+09570418 0000009e
+0957041c 0000009e
+09570420 0000009e
+#Serdes SATA
+09eb1300 80104e20
+09eb08dc 00502880
+#PEX gen3 link
+09570158 00000300
+89400890 01048000
+89500890 01048000
+89600890 01048000
+#Alt base register
+09570158 00001000
+#flush PBI data
+096100c0 000fffff
+#Change endianness
+09550000 000f400c
diff --git a/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_emmc.cfg b/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_emmc.cfg
new file mode 100644
index 0000000000..ccedf87e84
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_emmc.cfg
@@ -0,0 +1,7 @@
+#PBL preamble and RCW header
+aa55aa55 01ee0100
+# RCW
+0c150012 0e000000 00000000 00000000
+11335559 40000012 60040000 c1000000
+00000000 00000000 00000000 00238800
+20124000 00003000 00000096 00000001
diff --git a/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_qspi.cfg b/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_qspi.cfg
new file mode 100644
index 0000000000..7b9be0ad3f
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_qspi.cfg
@@ -0,0 +1,7 @@
+#PBL preamble and RCW header
+aa55aa55 01ee0100
+# RCW
+0c150010 0e000000 00000000 00000000
+11335559 40005012 40025000 c1000000
+00000000 00000000 00000000 00238800
+20124000 00003101 00000096 00000001
diff --git a/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_sd.cfg b/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_sd.cfg
new file mode 100644
index 0000000000..d3b152282f
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/ls1046ardb_rcw_sd.cfg
@@ -0,0 +1,7 @@
+#PBL preamble and RCW header
+aa55aa55 01ee0100
+# RCW
+0c150012 0e000000 00000000 00000000
+11335559 40005012 60040000 c1000000
+00000000 00000000 00000000 00238800
+20124000 00003101 00000096 00000001
diff --git a/arch/arm/boards/ls1046ardb/start.S b/arch/arm/boards/ls1046ardb/start.S
new file mode 100644
index 0000000000..466782b278
--- /dev/null
+++ b/arch/arm/boards/ls1046ardb/start.S
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <linux/linkage.h>
+#include <asm/barebox-arm64.h>
+
+#define STACK_TOP 0x10020000
+
+ENTRY_PROC(start_ls1046ardb)
+ mov x3, #STACK_TOP
+ mov sp, x3
+ b ls1046ardb_entry
+ENTRY_PROC_END(start_ls1046ardb)
diff --git a/arch/arm/boards/tqmls1046a/Makefile b/arch/arm/boards/tqmls1046a/Makefile
new file mode 100644
index 0000000000..851a5dcb3d
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/Makefile
@@ -0,0 +1,3 @@
+lwl-y += lowlevel.o start.o
+obj-y += board.o
+bbenv-y += defaultenv-tqmls1046a
diff --git a/arch/arm/boards/tqmls1046a/board.c b/arch/arm/boards/tqmls1046a/board.c
new file mode 100644
index 0000000000..5d6d5ad62c
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/board.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <init.h>
+#include <envfs.h>
+#include <asm/memory.h>
+#include <linux/sizes.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+
+static int tqmls1046a_mem_init(void)
+{
+ if (!of_machine_is_compatible("tqc,tqmls1046a"))
+ return 0;
+
+ arm_add_mem_device("ram0", 0x80000000, SZ_2G);
+
+ return 0;
+}
+mem_initcall(tqmls1046a_mem_init);
+
+static int tqmls1046a_postcore_init(void)
+{
+ if (!of_machine_is_compatible("tqc,tqmls1046a"))
+ return 0;
+
+ defaultenv_append_directory(defaultenv_tqmls1046a);
+
+ return 0;
+}
+
+postcore_initcall(tqmls1046a_postcore_init);
diff --git a/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth4.mode b/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth4.mode
new file mode 100644
index 0000000000..7a68b11da8
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth4.mode
@@ -0,0 +1 @@
+disabled
diff --git a/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth5.mode b/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth5.mode
new file mode 100644
index 0000000000..7a68b11da8
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth5.mode
@@ -0,0 +1 @@
+disabled
diff --git a/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth6.mode b/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth6.mode
new file mode 100644
index 0000000000..7a68b11da8
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth6.mode
@@ -0,0 +1 @@
+disabled
diff --git a/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth7.mode b/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth7.mode
new file mode 100644
index 0000000000..7a68b11da8
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/defaultenv-tqmls1046a/nv/dev.eth7.mode
@@ -0,0 +1 @@
+disabled
diff --git a/arch/arm/boards/tqmls1046a/lowlevel.c b/arch/arm/boards/tqmls1046a/lowlevel.c
new file mode 100644
index 0000000000..044d6a418d
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/lowlevel.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <common.h>
+#include <debug_ll.h>
+#include <platform_data/mmc-esdhc-imx.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <soc/fsl/immap_lsch2.h>
+#include <asm/barebox-arm-head.h>
+#include <asm/barebox-arm.h>
+#include <asm/syscounter.h>
+#include <asm/cache.h>
+#include <mach/errata.h>
+#include <mach/lowlevel.h>
+#include <mach/xload.h>
+#include <mach/layerscape.h>
+
+struct board_specific_parameters {
+ u32 n_ranks;
+ u32 datarate_mhz_high;
+ u32 rank_gb;
+ u32 clk_adjust;
+ u32 wrlvl_start;
+ u32 wrlvl_ctl_2;
+ u32 wrlvl_ctl_3;
+};
+
+/*
+ * These tables contain all valid speeds we want to override with board
+ * specific parameters. datarate_mhz_high values need to be in ascending order
+ * for each n_ranks group.
+ */
+static const struct board_specific_parameters udimm0[] = {
+ /*
+ * memory controller 0
+ * num| hi| rank| clk| wrlvl | wrlvl | wrlvl | cpo |wrdata|2T
+ * ranks| mhz| GB |adjst| start | ctl2 | ctl3 | |delay |
+ */
+ {1, 2100, 0, 8, 9, 0x09080806, 0x07060606,},
+ {}
+};
+
+static const struct board_specific_parameters *udimms[] = {
+ udimm0,
+};
+
+static void ddr_board_options(memctl_options_t *popts,
+ struct dimm_params *pdimm,
+ struct fsl_ddr_controller *c)
+{
+ const struct board_specific_parameters *pbsp, *pbsp_highest = NULL;
+ unsigned long ddr_freq;
+
+ if (!pdimm->n_ranks)
+ return;
+
+ pbsp = udimms[0];
+
+ /*
+ * Get clk_adjust, wrlvl_start, wrlvl_ctl, according to the board ddr
+ * freqency and n_banks specified in board_specific_parameters table.
+ */
+ ddr_freq = c->ddr_freq / 1000000;
+ while (pbsp->datarate_mhz_high) {
+ if (pbsp->n_ranks == pdimm->n_ranks) {
+ if (ddr_freq <= pbsp->datarate_mhz_high) {
+ popts->clk_adjust = pbsp->clk_adjust;
+ popts->wrlvl_start = pbsp->wrlvl_start;
+ popts->wrlvl_ctl_2 = pbsp->wrlvl_ctl_2;
+ popts->wrlvl_ctl_3 = pbsp->wrlvl_ctl_3;
+ goto found;
+ }
+ pbsp_highest = pbsp;
+ }
+ pbsp++;
+ }
+
+ if (pbsp_highest) {
+ printf("Error: board specific timing not found for %lu MT/s\n",
+ ddr_freq);
+ printf("Trying to use the highest speed (%u) parameters\n",
+ pbsp_highest->datarate_mhz_high);
+ popts->clk_adjust = pbsp_highest->clk_adjust;
+ popts->wrlvl_start = pbsp_highest->wrlvl_start;
+ popts->wrlvl_ctl_2 = pbsp->wrlvl_ctl_2;
+ popts->wrlvl_ctl_3 = pbsp->wrlvl_ctl_3;
+ } else {
+ panic("DIMM is not supported by this board");
+ }
+found:
+ debug("Found timing match: n_ranks %d, data rate %d, rank_gb %d\n",
+ pbsp->n_ranks, pbsp->datarate_mhz_high, pbsp->rank_gb);
+
+ popts->data_bus_width = 0; /* 64-bit data bus */
+ popts->bstopre = 0; /* enable auto precharge */
+
+ /*
+ * Factors to consider for half-strength driver enable:
+ * - number of DIMMs installed
+ */
+ popts->half_strength_driver_enable = 0;
+ /*
+ * Write leveling override
+ */
+ popts->wrlvl_override = 1;
+ popts->wrlvl_sample = 0xf;
+
+ /*
+ * Rtt and Rtt_WR override
+ */
+ popts->rtt_override = 0;
+
+ /* Enable ZQ calibration */
+ popts->zq_en = 1;
+
+ popts->ddr_cdr1 = DDR_CDR1_DHC_EN | DDR_CDR1_ODT(DDR4_CDR_ODT_60ohm);
+ popts->ddr_cdr2 = DDR_CDR2_ODT(DDR4_CDR_ODT_60ohm) |
+ DDR_CDR2_VREF_TRAIN_EN | DDR_CDR2_VREF_RANGE_2;
+
+ /* optimize cpo for erratum A-009942 */
+ popts->cpo_sample = 0x61;
+}
+
+static struct dimm_params dimm_params[] = {
+ {
+ .n_ranks = 1,
+ .rank_density = 2147483648u,
+ .capacity = 2147483648u,
+ .primary_sdram_width = 64,
+ .ec_sdram_width = 8,
+ .registered_dimm = 0,
+ .mirrored_dimm = 0,
+ .n_row_addr = 15,
+ .n_col_addr = 10,
+ .bank_addr_bits = 2,
+ .bank_group_bits = 0,
+ .edc_config = 2,
+ .burst_lengths_bitmask = 0x0c,
+
+ .tckmin_x_ps = 833,
+ .tckmax_ps = 1900,
+ .caslat_x = 0x000DFA00, //
+ .taa_ps = 13320,
+ .trcd_ps = 13320,
+ .trp_ps = 13320,
+ .tras_ps = 32000,
+ .trc_ps = 45320,
+ .trfc1_ps = 260000,
+ .trfc2_ps = 160000,
+ .trfc4_ps = 110000,
+ .tfaw_ps = 21000,
+ .trrds_ps = 3300,
+ .trrdl_ps = 4900,
+ .tccdl_ps = 5000,
+ .trfc_slr_ps = 3500000,
+ .refresh_rate_ps = 7800000,
+ },
+};
+
+static struct fsl_ddr_controller ddrc[] = {
+ {
+ .dimm_slots_per_ctrl = ARRAY_SIZE(dimm_params),
+ .dimm_params = dimm_params,
+ .memctl_opts.ddrtype = SDRAM_TYPE_DDR4,
+ .base = IOMEM(LSCH2_DDR_ADDR),
+ .ddr_freq = LS1046A_DDR_FREQ,
+ .erratum_A008511 = 1,
+ .erratum_A009803 = 1,
+ .erratum_A010165 = 1,
+ .erratum_A009801 = 1,
+ .erratum_A009942 = 1,
+ .chip_selects_per_ctrl = 4,
+ .board_options = ddr_board_options,
+ },
+};
+
+static struct fsl_ddr_info ls1046a_info = {
+ .num_ctrls = ARRAY_SIZE(ddrc),
+ .c = ddrc,
+};
+
+extern char __dtb_fsl_tqmls1046a_mbls10xxa_start[];
+
+static noinline __noreturn void tqmls1046a_r_entry(unsigned long memsize)
+{
+ unsigned long membase = LS1046A_DDR_SDRAM_BASE;
+
+ if (get_pc() >= membase) {
+ if (memsize + membase >= 0x100000000)
+ memsize = 0x100000000 - membase;
+
+ barebox_arm_entry(membase, 0x80000000,
+ __dtb_fsl_tqmls1046a_mbls10xxa_start);
+ }
+
+ arm_cpu_lowlevel_init();
+ debug_ll_init();
+ ls1046a_init_lowlevel();
+
+ memsize = fsl_ddr_sdram(&ls1046a_info);
+
+ ls1046a_errata_post_ddr();
+
+ ls1046a_esdhc_start_image(memsize, 0, 0);
+
+ pr_err("Booting failed\n");
+
+ while (1);
+}
+
+void tqmls1046a_entry(unsigned long r0, unsigned long r1, unsigned long r2);
+
+__noreturn void tqmls1046a_entry(unsigned long r0, unsigned long r1, unsigned long r2)
+{
+ relocate_to_current_adr();
+ setup_c();
+
+ tqmls1046a_r_entry(r0);
+}
diff --git a/arch/arm/boards/tqmls1046a/start.S b/arch/arm/boards/tqmls1046a/start.S
new file mode 100644
index 0000000000..12b785af54
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/start.S
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <linux/linkage.h>
+#include <asm/barebox-arm64.h>
+
+#define STACK_TOP 0x10020000
+
+ENTRY_PROC(start_tqmls1046a)
+ mov x3, #STACK_TOP
+ mov sp, x3
+ b tqmls1046a_entry
+ENTRY_PROC_END(start_tqmls1046a)
+
diff --git a/arch/arm/boards/tqmls1046a/tqmls1046a_pbi_qspi.cfg b/arch/arm/boards/tqmls1046a/tqmls1046a_pbi_qspi.cfg
new file mode 100644
index 0000000000..32865ca2d0
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/tqmls1046a_pbi_qspi.cfg
@@ -0,0 +1,33 @@
+#Configure QSPI clock
+0957015c 40100000
+#Configure Scratch register
+09570600 00000000
+09570604 40010000
+#Disable CCI barrier tranaction
+09570178 0000e010
+09180000 00000008
+#USB PHY frequency sel
+09570418 0000009c
+0957041c 0000009c
+09570420 0000009c
+#Serdes SATA
+09eb1300 80104e20
+09eb08dc 00502880
+#PEX gen3 link (errata A-010477)
+09570158 00000300
+89400890 01048000
+89500890 01048000
+89600890 01048000
+#PEX gen3 equalization preset values (errata A-008851)
+894008bc 01000000
+89400154 47474747
+89400158 47474747
+894008bc 00000000
+895008bc 01000000
+89500154 47474747
+89500158 47474747
+895008bc 00000000
+896008bc 01000000
+89600154 47474747
+89600158 47474747
+896008bc 00000000
diff --git a/arch/arm/boards/tqmls1046a/tqmls1046a_pbi_sd.cfg b/arch/arm/boards/tqmls1046a/tqmls1046a_pbi_sd.cfg
new file mode 100644
index 0000000000..7ac1398123
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/tqmls1046a_pbi_sd.cfg
@@ -0,0 +1,35 @@
+#Configure Scratch register
+09570600 00000000
+09570604 10000000
+#Disable CCI barrier tranaction
+09570178 0000e010
+09180000 00000008
+#USB PHY frequency sel
+09570418 0000009c
+0957041c 0000009c
+09570420 0000009c
+#Serdes SATA
+09eb1300 80104e20
+09eb08dc 00502880
+#PEX gen3 link (errata A-010477)
+09570158 00000300
+89400890 01048000
+89500890 01048000
+89600890 01048000
+#PEX gen3 equalization preset values (errata A-008851)
+894008bc 01000000
+89400154 47474747
+89400158 47474747
+894008bc 00000000
+895008bc 01000000
+89500154 47474747
+89500158 47474747
+895008bc 00000000
+896008bc 01000000
+89600154 47474747
+89600158 47474747
+896008bc 00000000
+#Alt base register
+09570158 00001000
+#flush PBI data
+096100c0 000fffff
diff --git a/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_emmc_3333_5559.cfg b/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_emmc_3333_5559.cfg
new file mode 100644
index 0000000000..6c72d001c3
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_emmc_3333_5559.cfg
@@ -0,0 +1,84 @@
+# RCW values
+# 0: 1 - SYS_PLL_CFG : 0 [0x0 / 0b00]
+# 2: 6 - SYS_PLL_RAT : 6 [0x6 / 0b00110]
+# 8: 9 - MEM_PLL_CFG : 0 [0x0 / 0b00]
+# 10: 15 - MEM_PLL_RAT : 20 [0x14 / 0b010100]
+# 24: 25 - CGA_PLL1_CFG : 0 [0x0 / 0b00]
+# 26: 31 - CGA_PLL1_RAT : 16 [0x10 / 0b010000]
+# 32: 33 - CGA_PLL2_CFG : 0 [0x0 / 0b00]
+# 34: 39 - CGA_PLL2_RAT : 14 [0xe / 0b001110]
+# 96: 99 - C1_PLL_SEL : 0 [0x0 / 0b0000]
+# 128:143 - SRDS_PRTCL_S1 : 13107 [0x3333 / 0b0011001100110011]
+# 144:159 - SRDS_PRTCL_S2 : 21849 [0x5559 / 0b0101010101011001]
+# 160:161 - SRDS_PLL_REF_CLK_SEL_S1 : 3 [0x3 / 0b11]
+# 162:163 - SRDS_PLL_REF_CLK_SEL_S2 : 3 [0x3 / 0b11]
+# 168:169 - SRDS_PLL_PD_S1 : 0 [0x0 / 0b00]
+# 170:171 - SRDS_PLL_PD_S2 : 0 [0x0 / 0b00]
+# 176:177 - SRDS_DIV_PEX_S1 : 1 [0x1 / 0b01]
+# 178:179 - SRDS_DIV_PEX_S2 : 1 [0x1 / 0b01]
+# 186:187 - DDR_REFCLK_SEL : 0 [0x0 / 0b00]
+# 188:188 - SRDS_REFCLK_SEL_S1 : 0 [0x0 / 0b0]
+# 189:189 - SRDS_REFCLK_SEL_S2 : 0 [0x0 / 0b0]
+# 190:191 - DDR_FDBK_MULT : 2 [0x2 / 0b10]
+# 192:195 - PBI_SRC : 6 [0x6 / 0b0110]
+# 201:201 - BOOT_HO : 0 [0x0 / 0b0]
+# 202:202 - SB_EN : 0 [0x0 / 0b0]
+# 203:211 - IFC_MODE : 64 [0x40 / 0b001000000]
+# 224:226 - HWA_CGA_M1_CLK_SEL : 6 [0x6 / 0b110]
+# 230:231 - DRAM_LAT : 1 [0x1 / 0b01]
+# 232:232 - DDR_RATE : 0 [0x0 / 0b0]
+# 234:234 - DDR_RSV0 : 0 [0x0 / 0b0]
+# 242:242 - SYS_PLL_SPD : 0 [0x0 / 0b0]
+# 243:243 - MEM_PLL_SPD : 0 [0x0 / 0b0]
+# 244:244 - CGA_PLL1_SPD : 0 [0x0 / 0b0]
+# 245:245 - CGA_PLL2_SPD : 0 [0x0 / 0b0]
+# 264:266 - HOST_AGT_PEX : 0 [0x0 / 0b000]
+# 288:295 - GP_INFO1 : 0 [0x00 / 0b00000000]
+# 299:319 - GP_INFO2 : 0 [0x00000 / 0b000000000000000000000]
+# 354:356 - UART_EXT : 0 [0x0 / 0b000]
+# 357:359 - IRQ_EXT : 0 [0x0 / 0b000]
+# 360:362 - SPI_EXT : 0 [0x0 / 0b000]
+# 363:365 - SDHC_EXT : 0 [0x0 / 0b000]
+# 366:368 - UART_BASE : 5 [0x5 / 0b101]
+# 369:369 - ASLEEP : 0 [0x0 / 0b0]
+# 370:370 - RTC : 0 [0x0 / 0b0]
+# 371:371 - SDHC_BASE : 0 [0x0 / 0b0]
+# 372:372 - IRQ_OUT : 1 [0x1 / 0b1]
+# 373:381 - IRQ_BASE : 0 [0x00 / 0b000000000]
+# 382:383 - SPI_BASE : 0 [0x0 / 0b00]
+# 384:386 - IFC_GRP_A_EXT : 1 [0x1 / 0b001]
+# 393:395 - IFC_GRP_D_EXT : 0 [0x0 / 0b000]
+# 396:398 - IFC_GRP_E1_EXT : 0 [0x0 / 0b000]
+# 399:401 - IFC_GRP_F_EXT : 1 [0x1 / 0b001]
+# 405:405 - IFC_GRP_E1_BASE : 0 [0x0 / 0b0]
+# 407:407 - IFC_GRP_D_BASE : 0 [0x0 / 0b0]
+# 412:413 - IFC_GRP_A_BASE : 0 [0x0 / 0b00]
+# 415:415 - IFC_A_22_24 : 0 [0x0 / 0b0]
+# 416:418 - EC1 : 0 [0x0 / 0b000]
+# 419:421 - EC2 : 0 [0x0 / 0b000]
+# 422:423 - LVDD_VSEL : 1 [0x1 / 0b01]
+# 424:424 - I2C_IPGCLK_SEL : 0 [0x0 / 0b0]
+# 425:425 - EM1 : 0 [0x0 / 0b0]
+# 426:426 - EM2 : 0 [0x0 / 0b0]
+# 427:427 - EMI2_DMODE : 1 [0x1 / 0b1]
+# 428:428 - EMI2_CMODE : 0 [0x0 / 0b0]
+# 429:429 - USB_DRVVBUS : 0 [0x0 / 0b0]
+# 430:430 - USB_PWRFAULT : 0 [0x0 / 0b0]
+# 433:434 - TVDD_VSEL : 1 [0x1 / 0b01]
+# 435:436 - DVDD_VSEL : 2 [0x2 / 0b10]
+# 438:438 - EMI1_DMODE : 1 [0x1 / 0b1]
+# 439:440 - EVDD_VSEL : 0 [0x0 / 0b00]
+# 441:443 - IIC2_BASE : 0 [0x0 / 0b000]
+# 444:444 - EMI1_CMODE : 0 [0x0 / 0b0]
+# 445:447 - IIC2_EXT : 2 [0x2 / 0b010]
+# 472:481 - SYSCLK_FREQ : 600 [0x258 / 0b1001011000]
+# 509:511 - HWA_CGA_M2_CLK_SEL : 1 [0x1 / 0b001]
+
+
+#PBL preamble and RCW header
+aa55aa55 01ee0100
+# RCW
+0c140010 0e000000 00000000 00000000
+33335559 f0005002 60040000 c1000000
+00000000 00000000 00000000 00028800
+20004000 01103202 00000096 00000001
diff --git a/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_qspi_3333_5559.cfg b/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_qspi_3333_5559.cfg
new file mode 100644
index 0000000000..395c75c7d0
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_qspi_3333_5559.cfg
@@ -0,0 +1,84 @@
+# RCW values
+# 0: 1 - SYS_PLL_CFG : 0 [0x0 / 0b00]
+# 2: 6 - SYS_PLL_RAT : 6 [0x6 / 0b00110]
+# 8: 9 - MEM_PLL_CFG : 0 [0x0 / 0b00]
+# 10: 15 - MEM_PLL_RAT : 20 [0x14 / 0b010100]
+# 24: 25 - CGA_PLL1_CFG : 0 [0x0 / 0b00]
+# 26: 31 - CGA_PLL1_RAT : 16 [0x10 / 0b010000]
+# 32: 33 - CGA_PLL2_CFG : 0 [0x0 / 0b00]
+# 34: 39 - CGA_PLL2_RAT : 14 [0xe / 0b001110]
+# 96: 99 - C1_PLL_SEL : 0 [0x0 / 0b0000]
+# 128:143 - SRDS_PRTCL_S1 : 13107 [0x3333 / 0b0011001100110011]
+# 144:159 - SRDS_PRTCL_S2 : 21849 [0x5559 / 0b0101010101011001]
+# 160:161 - SRDS_PLL_REF_CLK_SEL_S1 : 3 [0x3 / 0b11]
+# 162:163 - SRDS_PLL_REF_CLK_SEL_S2 : 3 [0x3 / 0b11]
+# 168:169 - SRDS_PLL_PD_S1 : 0 [0x0 / 0b00]
+# 170:171 - SRDS_PLL_PD_S2 : 0 [0x0 / 0b00]
+# 176:177 - SRDS_DIV_PEX_S1 : 1 [0x1 / 0b01]
+# 178:179 - SRDS_DIV_PEX_S2 : 1 [0x1 / 0b01]
+# 186:187 - DDR_REFCLK_SEL : 0 [0x0 / 0b00]
+# 188:188 - SRDS_REFCLK_SEL_S1 : 0 [0x0 / 0b0]
+# 189:189 - SRDS_REFCLK_SEL_S2 : 0 [0x0 / 0b0]
+# 190:191 - DDR_FDBK_MULT : 2 [0x2 / 0b10]
+# 192:195 - PBI_SRC : 4 [0x4 / 0b0100]
+# 201:201 - BOOT_HO : 0 [0x0 / 0b0]
+# 202:202 - SB_EN : 0 [0x0 / 0b0]
+# 203:211 - IFC_MODE : 37 [0x25 / 0b000100101]
+# 224:226 - HWA_CGA_M1_CLK_SEL : 6 [0x6 / 0b110]
+# 230:231 - DRAM_LAT : 1 [0x1 / 0b01]
+# 232:232 - DDR_RATE : 0 [0x0 / 0b0]
+# 234:234 - DDR_RSV0 : 0 [0x0 / 0b0]
+# 242:242 - SYS_PLL_SPD : 0 [0x0 / 0b0]
+# 243:243 - MEM_PLL_SPD : 0 [0x0 / 0b0]
+# 244:244 - CGA_PLL1_SPD : 0 [0x0 / 0b0]
+# 245:245 - CGA_PLL2_SPD : 0 [0x0 / 0b0]
+# 264:266 - HOST_AGT_PEX : 0 [0x0 / 0b000]
+# 288:295 - GP_INFO1 : 0 [0x00 / 0b00000000]
+# 299:319 - GP_INFO2 : 0 [0x00000 / 0b000000000000000000000]
+# 354:356 - UART_EXT : 0 [0x0 / 0b000]
+# 357:359 - IRQ_EXT : 0 [0x0 / 0b000]
+# 360:362 - SPI_EXT : 0 [0x0 / 0b000]
+# 363:365 - SDHC_EXT : 0 [0x0 / 0b000]
+# 366:368 - UART_BASE : 5 [0x5 / 0b101]
+# 369:369 - ASLEEP : 0 [0x0 / 0b0]
+# 370:370 - RTC : 0 [0x0 / 0b0]
+# 371:371 - SDHC_BASE : 0 [0x0 / 0b0]
+# 372:372 - IRQ_OUT : 1 [0x1 / 0b1]
+# 373:381 - IRQ_BASE : 0 [0x00 / 0b000000000]
+# 382:383 - SPI_BASE : 0 [0x0 / 0b00]
+# 384:386 - IFC_GRP_A_EXT : 1 [0x1 / 0b001]
+# 393:395 - IFC_GRP_D_EXT : 0 [0x0 / 0b000]
+# 396:398 - IFC_GRP_E1_EXT : 0 [0x0 / 0b000]
+# 399:401 - IFC_GRP_F_EXT : 1 [0x1 / 0b001]
+# 405:405 - IFC_GRP_E1_BASE : 0 [0x0 / 0b0]
+# 407:407 - IFC_GRP_D_BASE : 0 [0x0 / 0b0]
+# 412:413 - IFC_GRP_A_BASE : 0 [0x0 / 0b00]
+# 415:415 - IFC_A_22_24 : 0 [0x0 / 0b0]
+# 416:418 - EC1 : 0 [0x0 / 0b000]
+# 419:421 - EC2 : 0 [0x0 / 0b000]
+# 422:423 - LVDD_VSEL : 1 [0x1 / 0b01]
+# 424:424 - I2C_IPGCLK_SEL : 0 [0x0 / 0b0]
+# 425:425 - EM1 : 0 [0x0 / 0b0]
+# 426:426 - EM2 : 0 [0x0 / 0b0]
+# 427:427 - EMI2_DMODE : 1 [0x1 / 0b1]
+# 428:428 - EMI2_CMODE : 0 [0x0 / 0b0]
+# 429:429 - USB_DRVVBUS : 0 [0x0 / 0b0]
+# 430:430 - USB_PWRFAULT : 0 [0x0 / 0b0]
+# 433:434 - TVDD_VSEL : 1 [0x1 / 0b01]
+# 435:436 - DVDD_VSEL : 2 [0x2 / 0b10]
+# 438:438 - EMI1_DMODE : 1 [0x1 / 0b1]
+# 439:440 - EVDD_VSEL : 0 [0x0 / 0b00]
+# 441:443 - IIC2_BASE : 0 [0x0 / 0b000]
+# 444:444 - EMI1_CMODE : 0 [0x0 / 0b0]
+# 445:447 - IIC2_EXT : 2 [0x2 / 0b010]
+# 472:481 - SYSCLK_FREQ : 600 [0x258 / 0b1001011000]
+# 509:511 - HWA_CGA_M2_CLK_SEL : 1 [0x1 / 0b001]
+
+
+#PBL preamble and RCW header
+aa55aa55 01ee0100
+# RCW
+0c140010 0e000000 00000000 00000000
+33335559 f0005002 40025000 c1000000
+00000000 00000000 00000000 00028800
+20004000 01103202 00000096 00000001
diff --git a/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_sd_3333_5559.cfg b/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_sd_3333_5559.cfg
new file mode 100644
index 0000000000..4ef6d576ed
--- /dev/null
+++ b/arch/arm/boards/tqmls1046a/tqmls1046a_rcw_sd_3333_5559.cfg
@@ -0,0 +1,84 @@
+# RCW values
+# 0: 1 - SYS_PLL_CFG : 0 [0x0 / 0b00]
+# 2: 6 - SYS_PLL_RAT : 6 [0x6 / 0b00110]
+# 8: 9 - MEM_PLL_CFG : 0 [0x0 / 0b00]
+# 10: 15 - MEM_PLL_RAT : 20 [0x14 / 0b010100]
+# 24: 25 - CGA_PLL1_CFG : 0 [0x0 / 0b00]
+# 26: 31 - CGA_PLL1_RAT : 16 [0x10 / 0b010000]
+# 32: 33 - CGA_PLL2_CFG : 0 [0x0 / 0b00]
+# 34: 39 - CGA_PLL2_RAT : 14 [0xe / 0b001110]
+# 96: 99 - C1_PLL_SEL : 0 [0x0 / 0b0000]
+# 128:143 - SRDS_PRTCL_S1 : 13107 [0x3333 / 0b0011001100110011]
+# 144:159 - SRDS_PRTCL_S2 : 21849 [0x5559 / 0b0101010101011001]
+# 160:161 - SRDS_PLL_REF_CLK_SEL_S1 : 3 [0x3 / 0b11]
+# 162:163 - SRDS_PLL_REF_CLK_SEL_S2 : 3 [0x3 / 0b11]
+# 168:169 - SRDS_PLL_PD_S1 : 0 [0x0 / 0b00]
+# 170:171 - SRDS_PLL_PD_S2 : 0 [0x0 / 0b00]
+# 176:177 - SRDS_DIV_PEX_S1 : 1 [0x1 / 0b01]
+# 178:179 - SRDS_DIV_PEX_S2 : 1 [0x1 / 0b01]
+# 186:187 - DDR_REFCLK_SEL : 0 [0x0 / 0b00]
+# 188:188 - SRDS_REFCLK_SEL_S1 : 0 [0x0 / 0b0]
+# 189:189 - SRDS_REFCLK_SEL_S2 : 0 [0x0 / 0b0]
+# 190:191 - DDR_FDBK_MULT : 2 [0x2 / 0b10]
+# 192:195 - PBI_SRC : 6 [0x6 / 0b0110]
+# 201:201 - BOOT_HO : 0 [0x0 / 0b0]
+# 202:202 - SB_EN : 0 [0x0 / 0b0]
+# 203:211 - IFC_MODE : 64 [0x40 / 0b001000000]
+# 224:226 - HWA_CGA_M1_CLK_SEL : 6 [0x6 / 0b110]
+# 230:231 - DRAM_LAT : 1 [0x1 / 0b01]
+# 232:232 - DDR_RATE : 0 [0x0 / 0b0]
+# 234:234 - DDR_RSV0 : 0 [0x0 / 0b0]
+# 242:242 - SYS_PLL_SPD : 0 [0x0 / 0b0]
+# 243:243 - MEM_PLL_SPD : 0 [0x0 / 0b0]
+# 244:244 - CGA_PLL1_SPD : 0 [0x0 / 0b0]
+# 245:245 - CGA_PLL2_SPD : 0 [0x0 / 0b0]
+# 264:266 - HOST_AGT_PEX : 0 [0x0 / 0b000]
+# 288:295 - GP_INFO1 : 0 [0x00 / 0b00000000]
+# 299:319 - GP_INFO2 : 0 [0x00000 / 0b000000000000000000000]
+# 354:356 - UART_EXT : 0 [0x0 / 0b000]
+# 357:359 - IRQ_EXT : 0 [0x0 / 0b000]
+# 360:362 - SPI_EXT : 0 [0x0 / 0b000]
+# 363:365 - SDHC_EXT : 0 [0x0 / 0b000]
+# 366:368 - UART_BASE : 5 [0x5 / 0b101]
+# 369:369 - ASLEEP : 0 [0x0 / 0b0]
+# 370:370 - RTC : 0 [0x0 / 0b0]
+# 371:371 - SDHC_BASE : 0 [0x0 / 0b0]
+# 372:372 - IRQ_OUT : 1 [0x1 / 0b1]
+# 373:381 - IRQ_BASE : 0 [0x00 / 0b000000000]
+# 382:383 - SPI_BASE : 0 [0x0 / 0b00]
+# 384:386 - IFC_GRP_A_EXT : 1 [0x1 / 0b001]
+# 393:395 - IFC_GRP_D_EXT : 0 [0x0 / 0b000]
+# 396:398 - IFC_GRP_E1_EXT : 0 [0x0 / 0b000]
+# 399:401 - IFC_GRP_F_EXT : 1 [0x1 / 0b001]
+# 405:405 - IFC_GRP_E1_BASE : 0 [0x0 / 0b0]
+# 407:407 - IFC_GRP_D_BASE : 0 [0x0 / 0b0]
+# 412:413 - IFC_GRP_A_BASE : 0 [0x0 / 0b00]
+# 415:415 - IFC_A_22_24 : 0 [0x0 / 0b0]
+# 416:418 - EC1 : 0 [0x0 / 0b000]
+# 419:421 - EC2 : 0 [0x0 / 0b000]
+# 422:423 - LVDD_VSEL : 1 [0x1 / 0b01]
+# 424:424 - I2C_IPGCLK_SEL : 0 [0x0 / 0b0]
+# 425:425 - EM1 : 0 [0x0 / 0b0]
+# 426:426 - EM2 : 0 [0x0 / 0b0]
+# 427:427 - EMI2_DMODE : 1 [0x1 / 0b1]
+# 428:428 - EMI2_CMODE : 0 [0x0 / 0b0]
+# 429:429 - USB_DRVVBUS : 0 [0x0 / 0b0]
+# 430:430 - USB_PWRFAULT : 0 [0x0 / 0b0]
+# 433:434 - TVDD_VSEL : 1 [0x1 / 0b01]
+# 435:436 - DVDD_VSEL : 2 [0x2 / 0b10]
+# 438:438 - EMI1_DMODE : 1 [0x1 / 0b1]
+# 439:440 - EVDD_VSEL : 2 [0x2 / 0b10]
+# 441:443 - IIC2_BASE : 0 [0x0 / 0b000]
+# 444:444 - EMI1_CMODE : 0 [0x0 / 0b0]
+# 445:447 - IIC2_EXT : 1 [0x1 / 0b001]
+# 472:481 - SYSCLK_FREQ : 600 [0x258 / 0b1001011000]
+# 509:511 - HWA_CGA_M2_CLK_SEL : 1 [0x1 / 0b001]
+
+
+#PBL preamble and RCW header
+aa55aa55 01ee0100
+# RCW
+0c140010 0e000000 00000000 00000000
+33335559 f0005002 60040000 c1000000
+00000000 00000000 00000000 00028800
+20004000 01103301 00000096 00000001
diff --git a/arch/arm/configs/layerscape_defconfig b/arch/arm/configs/layerscape_defconfig
new file mode 100644
index 0000000000..dadbcc214c
--- /dev/null
+++ b/arch/arm/configs/layerscape_defconfig
@@ -0,0 +1,111 @@
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_MACH_LS1046ARDB=y
+CONFIG_MACH_TQMLS1046A=y
+CONFIG_MMU=y
+CONFIG_MALLOC_SIZE=0x0
+CONFIG_MALLOC_TLSF=y
+CONFIG_KALLSYMS=y
+CONFIG_RELOCATABLE=y
+CONFIG_HUSH_FANCY_PROMPT=y
+CONFIG_CMDLINE_EDITING=y
+CONFIG_AUTO_COMPLETE=y
+CONFIG_MENU=y
+CONFIG_BOOTM_SHOW_TYPE=y
+CONFIG_BOOTM_VERBOSE=y
+CONFIG_BOOTM_INITRD=y
+CONFIG_BOOTM_OFTREE=y
+CONFIG_BOOTM_OFTREE_UIMAGE=y
+CONFIG_BLSPEC=y
+CONFIG_CONSOLE_ACTIVATE_NONE=y
+CONFIG_CONSOLE_ALLOW_COLOR=y
+CONFIG_PBL_CONSOLE=y
+CONFIG_PARTITION_DISK_EFI=y
+CONFIG_DEFAULT_ENVIRONMENT_GENERIC_NEW=y
+CONFIG_RESET_SOURCE=y
+CONFIG_DEBUG_LL=y
+CONFIG_CMD_DMESG=y
+CONFIG_LONGHELP=y
+CONFIG_CMD_IOMEM=y
+CONFIG_CMD_IMD=y
+CONFIG_CMD_MEMINFO=y
+CONFIG_CMD_REGULATOR=y
+CONFIG_CMD_MMC_EXTCSD=y
+CONFIG_CMD_GO=y
+CONFIG_CMD_RESET=y
+CONFIG_CMD_UIMAGE=y
+CONFIG_CMD_PARTITION=y
+CONFIG_CMD_EXPORT=y
+CONFIG_CMD_LOADENV=y
+CONFIG_CMD_PRINTENV=y
+CONFIG_CMD_MAGICVAR=y
+CONFIG_CMD_MAGICVAR_HELP=y
+CONFIG_CMD_SAVEENV=y
+CONFIG_CMD_FILETYPE=y
+CONFIG_CMD_LN=y
+CONFIG_CMD_MD5SUM=y
+CONFIG_CMD_UNCOMPRESS=y
+CONFIG_CMD_LET=y
+CONFIG_CMD_MSLEEP=y
+CONFIG_CMD_READF=y
+CONFIG_CMD_SLEEP=y
+CONFIG_CMD_DHCP=y
+CONFIG_CMD_MIITOOL=y
+CONFIG_CMD_PING=y
+CONFIG_CMD_ECHO_E=y
+CONFIG_CMD_EDIT=y
+CONFIG_CMD_MENU=y
+CONFIG_CMD_MENU_MANAGEMENT=y
+CONFIG_CMD_MENUTREE=y
+CONFIG_CMD_READLINE=y
+CONFIG_CMD_TIMEOUT=y
+CONFIG_CMD_CRC=y
+CONFIG_CMD_CRC_CMP=y
+CONFIG_CMD_MEMTEST=y
+CONFIG_CMD_MM=y
+CONFIG_CMD_CLK=y
+CONFIG_CMD_DETECT=y
+CONFIG_CMD_FLASH=y
+CONFIG_CMD_GPIO=y
+CONFIG_CMD_I2C=y
+CONFIG_CMD_LED=y
+CONFIG_CMD_SPI=y
+CONFIG_CMD_LED_TRIGGER=y
+CONFIG_CMD_WD=y
+CONFIG_CMD_BAREBOX_UPDATE=y
+CONFIG_CMD_OF_NODE=y
+CONFIG_CMD_OF_PROPERTY=y
+CONFIG_CMD_OFTREE=y
+CONFIG_CMD_TIME=y
+CONFIG_NET=y
+CONFIG_NET_NETCONSOLE=y
+CONFIG_OFDEVICE=y
+CONFIG_OF_BAREBOX_DRIVERS=y
+CONFIG_DRIVER_SERIAL_NS16550=y
+CONFIG_DRIVER_NET_FSL_FMAN=y
+CONFIG_DP83867_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_NET_DSA_MV88E6XXX=y
+CONFIG_I2C=y
+CONFIG_I2C_IMX=y
+CONFIG_MCI=y
+CONFIG_MCI_MMC_BOOT_PARTITIONS=y
+CONFIG_MCI_IMX_ESDHC=y
+CONFIG_LED=y
+CONFIG_LED_GPIO=y
+CONFIG_LED_GPIO_OF=y
+CONFIG_LED_TRIGGERS=y
+CONFIG_EEPROM_AT25=y
+CONFIG_EEPROM_AT24=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_IMX=y
+CONFIG_NVMEM=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED=y
+CONFIG_FS_EXT4=y
+CONFIG_FS_TFTP=y
+CONFIG_FS_NFS=y
+CONFIG_FS_FAT=y
+CONFIG_FS_FAT_WRITE=y
+CONFIG_FS_FAT_LFN=y
+CONFIG_ZLIB=y
+CONFIG_LZO_DECOMPRESS=y
diff --git a/arch/arm/dts/Makefile b/arch/arm/dts/Makefile
index f694871efc..a7654a39be 100644
--- a/arch/arm/dts/Makefile
+++ b/arch/arm/dts/Makefile
@@ -132,5 +132,7 @@ pbl-dtb-$(CONFIG_MACH_XILINX_ZCU104) += zynqmp-zcu104-revA.dtb.o
pbl-dtb-$(CONFIG_MACH_ZII_IMX7D_RPU2) += imx7d-zii-rpu2.dtb.o
pbl-dtb-$(CONFIG_MACH_WAGO_PFC_AM35XX) += am35xx-pfc-750_820x.dtb.o
+pbl-dtb-$(CONFIG_MACH_LS1046ARDB) += fsl-ls1046a-rdb.dtb.o
+pbl-dtb-$(CONFIG_MACH_TQMLS1046A) += fsl-tqmls1046a-mbls10xxa.dtb.o
clean-files := *.dtb *.dtb.S .*.dtc .*.pre .*.dts *.dtb.lzo
diff --git a/arch/arm/dts/fsl-ls1046a-rdb.dts b/arch/arm/dts/fsl-ls1046a-rdb.dts
new file mode 100644
index 0000000000..e16948bc8a
--- /dev/null
+++ b/arch/arm/dts/fsl-ls1046a-rdb.dts
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <arm64/freescale/fsl-ls1046a-rdb.dts>
+
+/ {
+ chosen {
+ stdout-path = &duart0;
+
+ environment {
+ compatible = "barebox,environment";
+ device-path = &environment_sd;
+ };
+ };
+
+ aliases {
+ mmc0 = &esdhc;
+ };
+};
+
+&esdhc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ environment_sd: partition@200000 {
+ label = "barebox-environment";
+ reg = <0x200000 0x20000>;
+ };
+};
+
+&fman0 {
+ ethernet@e0000 {
+ status = "disabled";
+ };
+
+ ethernet@e2000 {
+ status = "disabled";
+ };
+
+ ethernet@e4000 {
+ phy-mode = "rgmii-id";
+ };
+
+ ethernet@e6000 {
+ phy-mode = "rgmii-id";
+ };
+
+ ethernet@e8000 {
+ };
+
+ ethernet@ea000 {
+ };
+
+ ethernet@f0000 {
+ };
+
+ ethernet@f2000 {
+ };
+
+ mdio@fc000 {
+ };
+
+ mdio@fd000 {
+ };
+
+ mdio@e1000 {
+ status = "disabled";
+ };
+
+ mdio@e3000 {
+ status = "disabled";
+ };
+
+ mdio@e5000 {
+ status = "disabled";
+ };
+
+ mdio@e7000 {
+ status = "disabled";
+ };
+
+ mdio@e9000 {
+ status = "disabled";
+ };
+
+ mdio@eb000 {
+ status = "disabled";
+ };
+
+ mdio@f1000 {
+ status = "disabled";
+ };
+
+ mdio@f3000 {
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/dts/fsl-tqmls1046a-mbls10xxa.dts b/arch/arm/dts/fsl-tqmls1046a-mbls10xxa.dts
new file mode 100644
index 0000000000..f21479eef8
--- /dev/null
+++ b/arch/arm/dts/fsl-tqmls1046a-mbls10xxa.dts
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree Include file for TQMLS1046A SoM on MBLS10xxA from TQ
+ *
+ * Copyright 2018 TQ-Systems GmbH
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+
+#include "fsl-tqmls1046a.dtsi"
+
+/ {
+ model = "TQ TQMLS1046A SoM on MBLS10xxA board";
+ compatible = "tqc,tqmls1046a", "fsl,ls1046a";
+
+ aliases {
+ serial0 = &duart0;
+ serial1 = &duart1;
+ mmc0 = &esdhc;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ gpio-keys,name = "gpio-keys";
+ poll-interval = <100>;
+ autorepeat;
+
+ button0 {
+ label = "button0";
+ gpios = <&gpioexp3 5 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_F1>;
+ };
+
+ button1 {
+ label = "button1";
+ gpios = <&gpioexp3 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_F2>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ user {
+ gpios = <&gpioexp3 13 GPIO_ACTIVE_LOW>;
+ label = "led:user";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+};
+
+
+&duart0 {
+ status = "okay";
+};
+
+&duart1 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ i2c-mux@70 {
+ compatible = "nxp,pca9544";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+
+ gpioexp1: pca9555@20 {
+ compatible = "nxp,pca9555";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpioexp2: pca9555@21 {
+ compatible = "nxp,pca9555";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpioexp3: pca9555@22 {
+ compatible = "nxp,pca9555";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+ };
+ };
+};
+
+&usb1 {
+ dr_mode = "otg";
+};
+
+#include <arm64/freescale/fsl-ls1046-post.dtsi>
+#include <dt-bindings/net/ti-dp83867.h>
+
+&fman0 {
+ status = "okay";
+
+ ethernet@e0000 {
+ status = "disabled";
+ };
+
+ ethernet@e2000 {
+ phy-handle = <&qsgmii1_phy2>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@e4000 {
+ phy-handle = <&rgmii_phy1>;
+ phy-connection-type = "rgmii";
+ phy-mode = "rgmii-id";
+ };
+
+ ethernet@e6000 {
+ phy-handle = <&rgmii_phy2>;
+ phy-connection-type = "rgmii";
+ phy-mode = "rgmii-id";
+ };
+
+ ethernet@e8000 {
+ status = "disabled";
+ };
+
+ ethernet@ea000 {
+ phy-handle = <&qsgmii2_phy2>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@f0000 {
+ phy-handle = <&qsgmii1_phy1>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@f2000 {
+ phy-handle = <&qsgmii2_phy1>;
+ phy-connection-type = "sgmii";
+ };
+
+ mdio@e1000 {
+ status = "disabled";
+ };
+
+ mdio@e3000 {
+ status = "disabled";
+ };
+
+ mdio@e5000 {
+ status = "disabled";
+ };
+
+ mdio@e7000 {
+ status = "disabled";
+ };
+
+ mdio@e9000 {
+ status = "disabled";
+ };
+
+ mdio@eb000 {
+ status = "disabled";
+ };
+
+ mdio@f1000 {
+ status = "disabled";
+ };
+
+ mdio@f3000 {
+ status = "disabled";
+ };
+
+ mdio@fc000 {
+ rgmii_phy1: ethernet-phy@0e {
+ reg = <0x0e>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
+ };
+
+ qsgmii1_phy1: ethernet-phy@1c {
+ reg = <0x1c>;
+ };
+
+ qsgmii1_phy2: ethernet-phy@1d {
+ reg = <0x1d>;
+ };
+ };
+
+ mdio@fd000 {
+ rgmii_phy2: ethernet-phy@0c {
+ reg = <0x0c>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
+ };
+
+ qsgmii2_phy1: ethernet-phy@00 {
+ reg = <0x00>;
+ };
+
+ qsgmii2_phy2: ethernet-phy@01 {
+ reg = <0x01>;
+ };
+ };
+};
diff --git a/arch/arm/dts/fsl-tqmls1046a.dtsi b/arch/arm/dts/fsl-tqmls1046a.dtsi
new file mode 100644
index 0000000000..4717e66857
--- /dev/null
+++ b/arch/arm/dts/fsl-tqmls1046a.dtsi
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree Include file for LS1046A based SoM of TQ
+ *
+ * Copyright 2018 TQ-Systems GmbH
+ */
+
+#include <arm64/freescale/fsl-ls1046a.dtsi>
+
+&i2c0 {
+ status = "okay";
+
+ temp-sensor@18 {
+ compatible = "jc42";
+ reg = <0x18>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf85063";
+ reg = <0x51>;
+ };
+
+ eeprom@57 {
+ compatible = "atmel,24c256";
+ reg = <0x57>;
+ };
+};
+
+&qspi {
+ num-cs = <2>;
+ bus-num = <0>;
+ status = "okay";
+
+ qflash0: mx66u51235f@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <108000000>;
+ reg = <0>;
+ };
+
+ qflash1: mx66u51235f@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <108000000>;
+ compatible = "jedec,spi-nor";
+ reg = <1>;
+ };
+};
diff --git a/arch/arm/lib/pbl.lds.S b/arch/arm/lib/pbl.lds.S
index 53c9ce0fe6..300671bb51 100644
--- a/arch/arm/lib/pbl.lds.S
+++ b/arch/arm/lib/pbl.lds.S
@@ -80,12 +80,16 @@ SECTIONS
.dynsym : { *(.dynsym) }
.__dynsym_end : { *(.__dynsym_end) }
+ pbl_code_size = . - BASE;
+
. = ALIGN(4);
.__bss_start : { *(.__bss_start) }
.bss : { *(.bss*) }
.__bss_stop : { *(.__bss_stop) }
_end = .;
+ pbl_memory_size = . - BASE;
+
. = ALIGN(4);
__piggydata_start = .;
.piggydata : {
@@ -95,6 +99,8 @@ SECTIONS
.image_end : { *(.__image_end) }
+ pbl_image_size = . - BASE;
+
_barebox_image_size = __image_end - BASE;
_barebox_pbl_size = __bss_start - BASE;
}
diff --git a/arch/arm/lib64/Makefile b/arch/arm/lib64/Makefile
index 4c0019fabe..5068431342 100644
--- a/arch/arm/lib64/Makefile
+++ b/arch/arm/lib64/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_ARM_OPTIMZED_STRING_FUNCTIONS) += memset.o string.o
extra-y += barebox.lds
obj-pbl-y += runtime-offset.o
-pbl-y += div0.o
+pbl-y += div0.o pbl.o
diff --git a/arch/arm/lib64/pbl.c b/arch/arm/lib64/pbl.c
new file mode 100644
index 0000000000..0cef08e4d2
--- /dev/null
+++ b/arch/arm/lib64/pbl.c
@@ -0,0 +1,17 @@
+#include <asm/system.h>
+#include <clock.h>
+#include <common.h>
+
+void udelay(unsigned long us)
+{
+ unsigned long cntfrq = get_cntfrq();
+ unsigned long ticks = (us * cntfrq) / 1000000;
+ unsigned long start = get_cntpct();
+
+ while ((long)(start + ticks - get_cntpct()) > 0);
+}
+
+void mdelay(unsigned long ms)
+{
+ udelay(ms * 1000);
+}
diff --git a/arch/arm/mach-layerscape/Kconfig b/arch/arm/mach-layerscape/Kconfig
new file mode 100644
index 0000000000..3a44f3fea1
--- /dev/null
+++ b/arch/arm/mach-layerscape/Kconfig
@@ -0,0 +1,21 @@
+if ARCH_LAYERSCAPE
+
+config ARCH_LS1046
+ select CPU_V8
+ select SYS_SUPPORTS_64BIT_KERNEL
+ bool
+
+config MACH_LS1046ARDB
+ bool "QorIQ LS1046A Reference Design Board"
+ select ARCH_LS1046
+ select DDR_SPD
+ select MCI_IMX_ESDHC_PBL
+ select I2C_IMX_EARLY
+ select DDR_FSL
+ select DDR_FSL_DDR4
+
+config MACH_TQMLS1046A
+ bool "TQ TQMLS1046A Board"
+ select ARCH_LS1046
+
+endif
diff --git a/arch/arm/mach-layerscape/Makefile b/arch/arm/mach-layerscape/Makefile
new file mode 100644
index 0000000000..269839254b
--- /dev/null
+++ b/arch/arm/mach-layerscape/Makefile
@@ -0,0 +1,4 @@
+obj- := __dummy__.o
+lwl-y += lowlevel.o errata.o
+lwl-$(CONFIG_ARCH_LS1046) += lowlevel-ls1046a.o
+obj-y += icid.o
diff --git a/arch/arm/mach-layerscape/errata.c b/arch/arm/mach-layerscape/errata.c
new file mode 100644
index 0000000000..4f4b759ddb
--- /dev/null
+++ b/arch/arm/mach-layerscape/errata.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <common.h>
+#include <io.h>
+#include <soc/fsl/immap_lsch2.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <asm/system.h>
+#include <mach/errata.h>
+#include <mach/lowlevel.h>
+
+#define scfg_clrsetbits32(addr, clear, set) clrsetbits_be32(addr, clear, set)
+#define scfg_clrbits32(addr, clear) clrbits_be32(addr, clear)
+
+static inline void set_usb_pcstxswingfull(u32 __iomem *scfg, u32 offset)
+{
+ scfg_clrsetbits32(scfg + offset / 4,
+ 0x7f << 9,
+ SCFG_USB_PCSTXSWINGFULL << 9);
+}
+
+static void erratum_a008997_ls1046a(void)
+{
+ u32 __iomem *scfg = (u32 __iomem *)LSCH2_SCFG_ADDR;
+
+ set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB1);
+ set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB2);
+ set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB3);
+}
+
+#define PROGRAM_USB_PHY_RX_OVRD_IN_HI(phy) \
+ out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_1); \
+ out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_2); \
+ out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_3); \
+ out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_4)
+
+static void erratum_a009007_ls1046a(void)
+{
+ void __iomem *usb_phy = IOMEM(SCFG_USB_PHY1);
+
+ PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy);
+ usb_phy = (void __iomem *)SCFG_USB_PHY2;
+ PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy);
+
+ usb_phy = (void __iomem *)SCFG_USB_PHY3;
+ PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy);
+}
+
+static inline void set_usb_txvreftune(u32 __iomem *scfg, u32 offset)
+{
+ scfg_clrsetbits32(scfg + offset / 4, 0xf << 6, SCFG_USB_TXVREFTUNE << 6);
+}
+
+static void erratum_a009008_ls1046a(void)
+{
+ u32 __iomem *scfg = IOMEM(LSCH2_SCFG_ADDR);
+
+ set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB1);
+ set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB2);
+ set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB3);
+}
+
+static inline void set_usb_sqrxtune(u32 __iomem *scfg, u32 offset)
+{
+ scfg_clrbits32(scfg + offset / 4, SCFG_USB_SQRXTUNE_MASK << 23);
+}
+
+static void erratum_a009798_ls1046a(void)
+{
+ u32 __iomem *scfg = IOMEM(LSCH2_SCFG_ADDR);
+
+ set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB1);
+ set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB2);
+ set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB3);
+}
+
+static void erratum_a008850_early(void)
+{
+ /* part 1 of 2 */
+ struct ccsr_cci400 __iomem *cci = IOMEM(LSCH2_CCI400_ADDR);
+ struct ccsr_ddr __iomem *ddr = IOMEM(LSCH2_DDR_ADDR);
+
+ /* Skip if running at lower exception level */
+ if (current_el() < 3)
+ return;
+
+ /* disables propagation of barrier transactions to DDRC from CCI400 */
+ out_le32(&cci->ctrl_ord, CCI400_CTRLORD_TERM_BARRIER);
+
+ /* disable the re-ordering in DDRC */
+ ddr_out32(&ddr->eor, DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS);
+}
+
+/* erratum_a009942_check_cpo */
+
+void ls1046a_errata(void)
+{
+ erratum_a008850_early();
+ erratum_a009008_ls1046a();
+ erratum_a009798_ls1046a();
+ erratum_a008997_ls1046a();
+ erratum_a009007_ls1046a();
+}
+
+static void erratum_a008850_post(void)
+{
+ /* part 2 of 2 */
+ struct ccsr_cci400 __iomem *cci = IOMEM(LSCH2_CCI400_ADDR);
+ struct ccsr_ddr __iomem *ddr = IOMEM(LSCH2_DDR_ADDR);
+ u32 tmp;
+
+ /* Skip if running at lower exception level */
+ if (current_el() < 3)
+ return;
+
+ /* enable propagation of barrier transactions to DDRC from CCI400 */
+ out_le32(&cci->ctrl_ord, CCI400_CTRLORD_EN_BARRIER);
+
+ /* enable the re-ordering in DDRC */
+ tmp = ddr_in32(&ddr->eor);
+ tmp &= ~(DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS);
+ ddr_out32(&ddr->eor, tmp);
+}
+
+/*
+ * This additional workaround of A009942 checks the condition to determine if
+ * the CPO value set by the existing A009942 workaround needs to be updated.
+ * If need, print a warning to prompt user reconfigure DDR debug_29[24:31] with
+ * expected optimal value, the optimal value is highly board dependent.
+ */
+static void erratum_a009942_check_cpo(void)
+{
+ struct ccsr_ddr __iomem *ddr =
+ (struct ccsr_ddr __iomem *)(LSCH2_DDR_ADDR);
+ u32 cpo, cpo_e, cpo_o, cpo_target, cpo_optimal;
+ u32 cpo_min = ddr_in32(&ddr->debug[9]) >> 24;
+ u32 cpo_max = cpo_min;
+ u32 sdram_cfg, i, tmp, lanes, ddr_type;
+ bool update_cpo = false, has_ecc = false;
+
+ sdram_cfg = ddr_in32(&ddr->sdram_cfg);
+ if (sdram_cfg & SDRAM_CFG_32_BE)
+ lanes = 4;
+ else if (sdram_cfg & SDRAM_CFG_16_BE)
+ lanes = 2;
+ else
+ lanes = 8;
+
+ if (sdram_cfg & SDRAM_CFG_ECC_EN)
+ has_ecc = true;
+
+ /* determine the maximum and minimum CPO values */
+ for (i = 9; i < 9 + lanes / 2; i++) {
+ cpo = ddr_in32(&ddr->debug[i]);
+ cpo_e = cpo >> 24;
+ cpo_o = (cpo >> 8) & 0xff;
+ tmp = min(cpo_e, cpo_o);
+ if (tmp < cpo_min)
+ cpo_min = tmp;
+ tmp = max(cpo_e, cpo_o);
+ if (tmp > cpo_max)
+ cpo_max = tmp;
+ }
+
+ if (has_ecc) {
+ cpo = ddr_in32(&ddr->debug[13]);
+ cpo = cpo >> 24;
+ if (cpo < cpo_min)
+ cpo_min = cpo;
+ if (cpo > cpo_max)
+ cpo_max = cpo;
+ }
+
+ cpo_target = ddr_in32(&ddr->debug[28]) & 0xff;
+ cpo_optimal = ((cpo_max + cpo_min) >> 1) + 0x27;
+ debug("cpo_optimal = 0x%x, cpo_target = 0x%x\n", cpo_optimal,
+ cpo_target);
+ debug("cpo_max = 0x%x, cpo_min = 0x%x\n", cpo_max, cpo_min);
+
+ ddr_type = (sdram_cfg & SDRAM_CFG_SDRAM_TYPE_MASK) >>
+ SDRAM_CFG_SDRAM_TYPE_SHIFT;
+ if (ddr_type == SDRAM_TYPE_DDR4)
+ update_cpo = (cpo_min + 0x3b) < cpo_target ? true : false;
+ else if (ddr_type == SDRAM_TYPE_DDR3)
+ update_cpo = (cpo_min + 0x3f) < cpo_target ? true : false;
+
+ if (update_cpo) {
+ printf("WARN: pls set popts->cpo_sample = 0x%x ", cpo_optimal);
+ printf("in <board>/ddr.c to optimize cpo\n");
+ }
+}
+
+void ls1046a_errata_post_ddr(void)
+{
+ erratum_a008850_post();
+ erratum_a009942_check_cpo();
+}
diff --git a/arch/arm/mach-layerscape/icid.c b/arch/arm/mach-layerscape/icid.c
new file mode 100644
index 0000000000..2326d7e67a
--- /dev/null
+++ b/arch/arm/mach-layerscape/icid.c
@@ -0,0 +1,243 @@
+#include <common.h>
+#include <io.h>
+#include <init.h>
+#include <soc/fsl/immap_lsch2.h>
+#include <soc/fsl/fsl_qbman.h>
+#include <soc/fsl/fsl_fman.h>
+
+/*
+ * Stream IDs on Chassis-2 (for example ls1043a, ls1046a, ls1012) devices
+ * are not hardwired and are programmed by sw. There are a limited number
+ * of stream IDs available, and the partitioning of them is scenario
+ * dependent. This header defines the partitioning between legacy, PCI,
+ * and DPAA1 devices.
+ *
+ * This partitioning can be customized in this file depending
+ * on the specific hardware config:
+ *
+ * -non-PCI legacy, platform devices (USB, SDHC, SATA, DMA, QE etc)
+ * -all legacy devices get a unique stream ID assigned and programmed in
+ * their AMQR registers by u-boot
+ *
+ * -PCIe
+ * -there is a range of stream IDs set aside for PCI in this
+ * file. U-boot will scan the PCI bus and for each device discovered:
+ * -allocate a streamID
+ * -set a PEXn LUT table entry mapping 'requester ID' to 'stream ID'
+ * -set a msi-map entry in the PEXn controller node in the
+ * device tree (see Documentation/devicetree/bindings/pci/pci-msi.txt
+ * for more info on the msi-map definition)
+ * -set a iommu-map entry in the PEXn controller node in the
+ * device tree (see Documentation/devicetree/bindings/pci/pci-iommu.txt
+ * for more info on the iommu-map definition)
+ *
+ * -DPAA1
+ * - Stream ids for DPAA1 use are reserved for future usecase.
+ *
+ */
+
+
+#define FSL_INVALID_STREAM_ID 0
+
+/* legacy devices */
+#define FSL_USB1_STREAM_ID 1
+#define FSL_USB2_STREAM_ID 2
+#define FSL_USB3_STREAM_ID 3
+#define FSL_SDHC_STREAM_ID 4
+#define FSL_SATA_STREAM_ID 5
+#define FSL_QE_STREAM_ID 6
+#define FSL_QDMA_STREAM_ID 7
+#define FSL_EDMA_STREAM_ID 8
+#define FSL_ETR_STREAM_ID 9
+#define FSL_DEBUG_STREAM_ID 10
+
+/* PCI - programmed in PEXn_LUT */
+#define FSL_PEX_STREAM_ID_START 11
+#define FSL_PEX_STREAM_ID_END 26
+
+/* DPAA1 - Stream-ID that can be programmed in DPAA1 h/w */
+#define FSL_DPAA1_STREAM_ID_START 27
+#define FSL_DPAA1_STREAM_ID_END 63
+
+struct icid_id_table {
+ const char *compat;
+ u32 id;
+ u32 reg;
+ phys_addr_t compat_addr;
+ phys_addr_t reg_addr;
+};
+
+struct fman_icid_id_table {
+ u32 port_id;
+ u32 icid;
+};
+
+#define SET_ICID_ENTRY(name, idA, regA, addr, compataddr) \
+ { \
+ .compat = name, \
+ .id = idA, \
+ .reg = regA, \
+ .compat_addr = compataddr, \
+ .reg_addr = addr, \
+ }
+
+#define SET_SCFG_ICID(compat, streamid, name, compataddr) \
+ SET_ICID_ENTRY(compat, streamid, (((streamid) << 24) | (1 << 23)), \
+ offsetof(struct ccsr_scfg, name) + LSCH2_SCFG_ADDR, \
+ compataddr)
+
+#define SET_USB_ICID(usb_num, compat, streamid) \
+ SET_SCFG_ICID(compat, streamid, usb##usb_num##_icid,\
+ LSCH2_XHCI_USB##usb_num##_ADDR)
+
+#define SET_SATA_ICID(compat, streamid) \
+ SET_SCFG_ICID(compat, streamid, sata_icid,\
+ LSCH2_HCI_BASE_ADDR)
+
+#define SET_SDHC_ICID(streamid) \
+ SET_SCFG_ICID("fsl,esdhc", streamid, sdhc_icid,\
+ LSCH2_ESDHC_ADDR)
+
+#define QMAN_CQSIDR_REG 0x20a80
+
+#define SET_QDMA_ICID(compat, streamid) \
+ SET_ICID_ENTRY(compat, streamid, (1 << 31) | (streamid), \
+ LSCH2_QDMA_BASE_ADDR + QMAN_CQSIDR_REG, \
+ LSCH2_QDMA_BASE_ADDR), \
+ SET_ICID_ENTRY(NULL, streamid, (1 << 31) | (streamid), \
+ LSCH2_QDMA_BASE_ADDR + QMAN_CQSIDR_REG + 4, \
+ LSCH2_QDMA_BASE_ADDR)
+
+#define SET_EDMA_ICID(streamid) \
+ SET_SCFG_ICID("fsl,vf610-edma", streamid, edma_icid,\
+ LSCH2_EDMA_BASE_ADDR)
+
+#define SET_ETR_ICID(streamid) \
+ SET_SCFG_ICID(NULL, streamid, etr_icid, 0)
+
+#define SET_DEBUG_ICID(streamid) \
+ SET_SCFG_ICID(NULL, streamid, debug_icid, 0)
+
+#define SET_QE_ICID(streamid) \
+ SET_SCFG_ICID("fsl,qe", streamid, qe_icid,\
+ LSCH2_QE_BASE_ADDR)
+
+#define SET_QMAN_ICID(streamid) \
+ SET_ICID_ENTRY("fsl,qman", streamid, streamid, \
+ offsetof(struct ccsr_qman, liodnr) + \
+ LSCH2_QMAN_ADDR, \
+ LSCH2_QMAN_ADDR)
+
+#define SET_BMAN_ICID(streamid) \
+ SET_ICID_ENTRY("fsl,bman", streamid, streamid, \
+ offsetof(struct ccsr_bman, liodnr) + \
+ LSCH2_BMAN_ADDR, \
+ LSCH2_BMAN_ADDR)
+
+#define SET_FMAN_ICID_ENTRY(_port_id, streamid) \
+ { .port_id = (_port_id), .icid = (streamid) }
+
+#define SET_SEC_QI_ICID(streamid) \
+ SET_ICID_ENTRY("fsl,sec-v4.0", streamid, \
+ 0, offsetof(ccsr_sec_t, qilcr_ls) + \
+ LSCH2_SEC_ADDR, \
+ LSCH2_SEC_ADDR)
+
+#define SET_SEC_JR_ICID_ENTRY(jr_num, streamid) \
+ SET_ICID_ENTRY( \
+ (CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT && \
+ (FSL_SEC_JR##jr_num##_OFFSET == \
+ SEC_JR3_OFFSET + CONFIG_SYS_FSL_SEC_OFFSET) \
+ ? NULL \
+ : "fsl,sec-v4.0-job-ring"), \
+ streamid, \
+ (((streamid) << 16) | (streamid)), \
+ offsetof(ccsr_sec_t, jrliodnr[jr_num].ls) + \
+ LSCH2_SEC_ADDR, \
+ FSL_SEC_JR##jr_num##_BASE_ADDR)
+
+#define SET_SEC_DECO_ICID_ENTRY(deco_num, streamid) \
+ SET_ICID_ENTRY(NULL, streamid, (((streamid) << 16) | (streamid)), \
+ offsetof(ccsr_sec_t, decoliodnr[deco_num].ls) + \
+ LSCH2_SEC_ADDR, 0)
+
+#define SET_SEC_RTIC_ICID_ENTRY(rtic_num, streamid) \
+ SET_ICID_ENTRY(NULL, streamid, (((streamid) << 16) | (streamid)), \
+ offsetof(ccsr_sec_t, rticliodnr[rtic_num].ls) + \
+ LSCH2_SEC_ADDR, 0)
+
+static struct icid_id_table icid_tbl_ls1046a[] = {
+ SET_QMAN_ICID(FSL_DPAA1_STREAM_ID_START),
+ SET_BMAN_ICID(FSL_DPAA1_STREAM_ID_START + 1),
+
+ SET_SDHC_ICID(FSL_SDHC_STREAM_ID),
+
+ SET_USB_ICID(1, "snps,dwc3", FSL_USB1_STREAM_ID),
+ SET_USB_ICID(2, "snps,dwc3", FSL_USB2_STREAM_ID),
+ SET_USB_ICID(3, "snps,dwc3", FSL_USB3_STREAM_ID),
+
+ SET_SATA_ICID("fsl,ls1046a-ahci", FSL_SATA_STREAM_ID),
+ SET_QDMA_ICID("fsl,ls1046a-qdma", FSL_QDMA_STREAM_ID),
+ SET_EDMA_ICID(FSL_EDMA_STREAM_ID),
+ SET_ETR_ICID(FSL_ETR_STREAM_ID),
+ SET_DEBUG_ICID(FSL_DEBUG_STREAM_ID),
+};
+
+static struct fman_icid_id_table fman_icid_tbl_ls1046a[] = {
+ /* port id, icid */
+ SET_FMAN_ICID_ENTRY(0x02, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x03, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x04, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x05, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x06, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x07, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x08, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x09, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x0a, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x0b, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x0c, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x0d, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x28, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x29, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x2a, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x2b, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x2c, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x2d, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x10, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x11, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x30, FSL_DPAA1_STREAM_ID_END),
+ SET_FMAN_ICID_ENTRY(0x31, FSL_DPAA1_STREAM_ID_END),
+};
+
+static void set_icid(struct icid_id_table *tbl, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ out_be32((u32 *)(tbl[i].reg_addr), tbl[i].reg);
+}
+
+static void set_fman_icids(struct fman_icid_id_table *tbl, int size)
+{
+ int i;
+ struct ccsr_fman *fm = (void *)LSCH2_FM1_ADDR;
+
+ for (i = 0; i < size; i++) {
+ out_be32(&fm->fm_bmi_common.fmbm_ppid[tbl[i].port_id - 1],
+ tbl[i].icid);
+ }
+}
+
+static int set_icids(void)
+{
+ if (!of_machine_is_compatible("fsl,ls1046a"))
+ return 0;
+
+ /* setup general icid offsets */
+ set_icid(icid_tbl_ls1046a, ARRAY_SIZE(icid_tbl_ls1046a));
+
+ set_fman_icids(fman_icid_tbl_ls1046a, ARRAY_SIZE(fman_icid_tbl_ls1046a));
+
+ return 0;
+}
+postcore_initcall(set_icids); \ No newline at end of file
diff --git a/arch/arm/mach-layerscape/include/mach/debug_ll.h b/arch/arm/mach-layerscape/include/mach/debug_ll.h
new file mode 100644
index 0000000000..2658a4a7c9
--- /dev/null
+++ b/arch/arm/mach-layerscape/include/mach/debug_ll.h
@@ -0,0 +1,34 @@
+#ifndef __INCLUDE_ARCH_DEBUG_LL_H__
+#define __INCLUDE_ARCH_DEBUG_LL_H__
+
+#include <io.h>
+#include <soc/fsl/immap_lsch2.h>
+
+#define __LS_UART_BASE(num) LSCH2_NS16550_COM##num
+#define LS_UART_BASE(num) __LS_UART_BASE(num)
+
+static inline uint8_t debug_ll_read_reg(int reg)
+{
+ void __iomem *base = IOMEM(LS_UART_BASE(CONFIG_DEBUG_LAYERSCAPE_UART_PORT));
+
+ return readb(base + reg);
+}
+
+static inline void debug_ll_write_reg(int reg, uint8_t val)
+{
+ void __iomem *base = IOMEM(LS_UART_BASE(CONFIG_DEBUG_LAYERSCAPE_UART_PORT));
+
+ writeb(val, base + reg);
+}
+
+#include <debug_ll/ns16550.h>
+
+static inline void debug_ll_init(void)
+{
+ uint16_t divisor;
+
+ divisor = debug_ll_ns16550_calc_divisor(300000000);
+ debug_ll_ns16550_init(divisor);
+}
+
+#endif /* __INCLUDE_ARCH_DEBUG_LL_H__ */
diff --git a/arch/arm/mach-layerscape/include/mach/errata.h b/arch/arm/mach-layerscape/include/mach/errata.h
new file mode 100644
index 0000000000..bdefa22172
--- /dev/null
+++ b/arch/arm/mach-layerscape/include/mach/errata.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_ERRATA_H
+#define __MACH_ERRATA_H
+
+void ls1046a_errata(void);
+void ls1046a_errata_post_ddr(void);
+
+#endif /* __MACH_ERRATA_H */
diff --git a/arch/arm/mach-layerscape/include/mach/layerscape.h b/arch/arm/mach-layerscape/include/mach/layerscape.h
new file mode 100644
index 0000000000..55e0b7bc96
--- /dev/null
+++ b/arch/arm/mach-layerscape/include/mach/layerscape.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_LAYERSCAPE_H
+#define __MACH_LAYERSCAPE_H
+
+#define LS1046A_DDR_SDRAM_BASE 0x80000000
+#define LS1046A_DDR_FREQ 2100000000
+
+#endif /* __MACH_LAYERSCAPE_H */
diff --git a/arch/arm/mach-layerscape/include/mach/lowlevel.h b/arch/arm/mach-layerscape/include/mach/lowlevel.h
new file mode 100644
index 0000000000..0f5f0f3aad
--- /dev/null
+++ b/arch/arm/mach-layerscape/include/mach/lowlevel.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_LOWLEVEL_H
+#define __MACH_LOWLEVEL_H
+
+void ls1046a_init_lowlevel(void);
+void ls1046a_init_l2_latency(void);
+
+#endif /* __MACH_LOWLEVEL_H */
diff --git a/arch/arm/mach-layerscape/include/mach/xload.h b/arch/arm/mach-layerscape/include/mach/xload.h
new file mode 100644
index 0000000000..fedd36e020
--- /dev/null
+++ b/arch/arm/mach-layerscape/include/mach/xload.h
@@ -0,0 +1,6 @@
+#ifndef __MACH_XLOAD_H
+#define __MACH_XLOAD_H
+
+int ls1046a_esdhc_start_image(unsigned long r0, unsigned long r1, unsigned long r2);
+
+#endif /* __MACH_XLOAD_H */
diff --git a/arch/arm/mach-layerscape/lowlevel-ls1046a.c b/arch/arm/mach-layerscape/lowlevel-ls1046a.c
new file mode 100644
index 0000000000..32f825ec25
--- /dev/null
+++ b/arch/arm/mach-layerscape/lowlevel-ls1046a.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <common.h>
+#include <io.h>
+#include <asm/syscounter.h>
+#include <asm/system.h>
+#include <mach/errata.h>
+#include <mach/lowlevel.h>
+#include <soc/fsl/immap_lsch2.h>
+#include <soc/fsl/fsl_immap.h>
+
+enum csu_cslx_access {
+ CSU_NS_SUP_R = 0x08,
+ CSU_NS_SUP_W = 0x80,
+ CSU_NS_SUP_RW = 0x88,
+ CSU_NS_USER_R = 0x04,
+ CSU_NS_USER_W = 0x40,
+ CSU_NS_USER_RW = 0x44,
+ CSU_S_SUP_R = 0x02,
+ CSU_S_SUP_W = 0x20,
+ CSU_S_SUP_RW = 0x22,
+ CSU_S_USER_R = 0x01,
+ CSU_S_USER_W = 0x10,
+ CSU_S_USER_RW = 0x11,
+ CSU_ALL_RW = 0xff,
+};
+
+struct csu_ns_dev {
+ unsigned long ind;
+ uint32_t val;
+};
+
+enum csu_cslx_ind {
+ CSU_CSLX_PCIE2_IO = 0,
+ CSU_CSLX_PCIE1_IO,
+ CSU_CSLX_MG2TPR_IP,
+ CSU_CSLX_IFC_MEM,
+ CSU_CSLX_OCRAM,
+ CSU_CSLX_GIC,
+ CSU_CSLX_PCIE1,
+ CSU_CSLX_OCRAM2,
+ CSU_CSLX_QSPI_MEM,
+ CSU_CSLX_PCIE2,
+ CSU_CSLX_SATA,
+ CSU_CSLX_USB1,
+ CSU_CSLX_QM_BM_SWPORTAL,
+ CSU_CSLX_PCIE3 = 16,
+ CSU_CSLX_PCIE3_IO,
+ CSU_CSLX_USB3 = 20,
+ CSU_CSLX_USB2,
+ CSU_CSLX_PFE = 23,
+ CSU_CSLX_SERDES = 32,
+ CSU_CSLX_QDMA,
+ CSU_CSLX_LPUART2,
+ CSU_CSLX_LPUART1,
+ CSU_CSLX_LPUART4,
+ CSU_CSLX_LPUART3,
+ CSU_CSLX_LPUART6,
+ CSU_CSLX_LPUART5,
+ CSU_CSLX_DSPI1 = 41,
+ CSU_CSLX_QSPI,
+ CSU_CSLX_ESDHC,
+ CSU_CSLX_IFC = 45,
+ CSU_CSLX_I2C1,
+ CSU_CSLX_USB_2,
+ CSU_CSLX_I2C3 = 48,
+ CSU_CSLX_I2C2,
+ CSU_CSLX_DUART2 = 50,
+ CSU_CSLX_DUART1,
+ CSU_CSLX_WDT2,
+ CSU_CSLX_WDT1,
+ CSU_CSLX_EDMA,
+ CSU_CSLX_SYS_CNT,
+ CSU_CSLX_DMA_MUX2,
+ CSU_CSLX_DMA_MUX1,
+ CSU_CSLX_DDR,
+ CSU_CSLX_QUICC,
+ CSU_CSLX_DCFG_CCU_RCPM = 60,
+ CSU_CSLX_SECURE_BOOTROM,
+ CSU_CSLX_SFP,
+ CSU_CSLX_TMU,
+ CSU_CSLX_SECURE_MONITOR,
+ CSU_CSLX_SCFG,
+ CSU_CSLX_FM = 66,
+ CSU_CSLX_SEC5_5,
+ CSU_CSLX_BM,
+ CSU_CSLX_QM,
+ CSU_CSLX_GPIO2 = 70,
+ CSU_CSLX_GPIO1,
+ CSU_CSLX_GPIO4,
+ CSU_CSLX_GPIO3,
+ CSU_CSLX_PLATFORM_CONT,
+ CSU_CSLX_CSU,
+ CSU_CSLX_IIC4 = 77,
+ CSU_CSLX_WDT4,
+ CSU_CSLX_WDT3,
+ CSU_CSLX_ESDHC2 = 80,
+ CSU_CSLX_WDT5 = 81,
+ CSU_CSLX_SAI2,
+ CSU_CSLX_SAI1,
+ CSU_CSLX_SAI4,
+ CSU_CSLX_SAI3,
+ CSU_CSLX_FTM2 = 86,
+ CSU_CSLX_FTM1,
+ CSU_CSLX_FTM4,
+ CSU_CSLX_FTM3,
+ CSU_CSLX_FTM6 = 90,
+ CSU_CSLX_FTM5,
+ CSU_CSLX_FTM8,
+ CSU_CSLX_FTM7,
+ CSU_CSLX_DSCR = 121,
+};
+
+static struct csu_ns_dev ns_dev[] = {
+ {CSU_CSLX_PCIE2_IO, CSU_ALL_RW},
+ {CSU_CSLX_PCIE1_IO, CSU_ALL_RW},
+ {CSU_CSLX_MG2TPR_IP, CSU_ALL_RW},
+ {CSU_CSLX_IFC_MEM, CSU_ALL_RW},
+ {CSU_CSLX_OCRAM, CSU_ALL_RW},
+ {CSU_CSLX_GIC, CSU_ALL_RW},
+ {CSU_CSLX_PCIE1, CSU_ALL_RW},
+ {CSU_CSLX_OCRAM2, CSU_ALL_RW},
+ {CSU_CSLX_QSPI_MEM, CSU_ALL_RW},
+ {CSU_CSLX_PCIE2, CSU_ALL_RW},
+ {CSU_CSLX_SATA, CSU_ALL_RW},
+ {CSU_CSLX_USB1, CSU_ALL_RW},
+ {CSU_CSLX_QM_BM_SWPORTAL, CSU_ALL_RW},
+ {CSU_CSLX_PCIE3, CSU_ALL_RW},
+ {CSU_CSLX_PCIE3_IO, CSU_ALL_RW},
+ {CSU_CSLX_USB3, CSU_ALL_RW},
+ {CSU_CSLX_USB2, CSU_ALL_RW},
+ {CSU_CSLX_PFE, CSU_ALL_RW},
+ {CSU_CSLX_SERDES, CSU_ALL_RW},
+ {CSU_CSLX_QDMA, CSU_ALL_RW},
+ {CSU_CSLX_LPUART2, CSU_ALL_RW},
+ {CSU_CSLX_LPUART1, CSU_ALL_RW},
+ {CSU_CSLX_LPUART4, CSU_ALL_RW},
+ {CSU_CSLX_LPUART3, CSU_ALL_RW},
+ {CSU_CSLX_LPUART6, CSU_ALL_RW},
+ {CSU_CSLX_LPUART5, CSU_ALL_RW},
+ {CSU_CSLX_DSPI1, CSU_ALL_RW},
+ {CSU_CSLX_QSPI, CSU_ALL_RW},
+ {CSU_CSLX_ESDHC, CSU_ALL_RW},
+ {CSU_CSLX_IFC, CSU_ALL_RW},
+ {CSU_CSLX_I2C1, CSU_ALL_RW},
+ {CSU_CSLX_I2C3, CSU_ALL_RW},
+ {CSU_CSLX_I2C2, CSU_ALL_RW},
+ {CSU_CSLX_DUART2, CSU_ALL_RW},
+ {CSU_CSLX_DUART1, CSU_ALL_RW},
+ {CSU_CSLX_WDT2, CSU_ALL_RW},
+ {CSU_CSLX_WDT1, CSU_ALL_RW},
+ {CSU_CSLX_EDMA, CSU_ALL_RW},
+ {CSU_CSLX_SYS_CNT, CSU_ALL_RW},
+ {CSU_CSLX_DMA_MUX2, CSU_ALL_RW},
+ {CSU_CSLX_DMA_MUX1, CSU_ALL_RW},
+ {CSU_CSLX_DDR, CSU_ALL_RW},
+ {CSU_CSLX_QUICC, CSU_ALL_RW},
+ {CSU_CSLX_DCFG_CCU_RCPM, CSU_ALL_RW},
+ {CSU_CSLX_SECURE_BOOTROM, CSU_ALL_RW},
+ {CSU_CSLX_SFP, CSU_ALL_RW},
+ {CSU_CSLX_TMU, CSU_ALL_RW},
+ {CSU_CSLX_SECURE_MONITOR, CSU_ALL_RW},
+ {CSU_CSLX_SCFG, CSU_ALL_RW},
+ {CSU_CSLX_FM, CSU_ALL_RW},
+ {CSU_CSLX_SEC5_5, CSU_ALL_RW},
+ {CSU_CSLX_BM, CSU_ALL_RW},
+ {CSU_CSLX_QM, CSU_ALL_RW},
+ {CSU_CSLX_GPIO2, CSU_ALL_RW},
+ {CSU_CSLX_GPIO1, CSU_ALL_RW},
+ {CSU_CSLX_GPIO4, CSU_ALL_RW},
+ {CSU_CSLX_GPIO3, CSU_ALL_RW},
+ {CSU_CSLX_PLATFORM_CONT, CSU_ALL_RW},
+ {CSU_CSLX_CSU, CSU_ALL_RW},
+ {CSU_CSLX_IIC4, CSU_ALL_RW},
+ {CSU_CSLX_WDT4, CSU_ALL_RW},
+ {CSU_CSLX_WDT3, CSU_ALL_RW},
+ {CSU_CSLX_ESDHC2, CSU_ALL_RW},
+ {CSU_CSLX_WDT5, CSU_ALL_RW},
+ {CSU_CSLX_SAI2, CSU_ALL_RW},
+ {CSU_CSLX_SAI1, CSU_ALL_RW},
+ {CSU_CSLX_SAI4, CSU_ALL_RW},
+ {CSU_CSLX_SAI3, CSU_ALL_RW},
+ {CSU_CSLX_FTM2, CSU_ALL_RW},
+ {CSU_CSLX_FTM1, CSU_ALL_RW},
+ {CSU_CSLX_FTM4, CSU_ALL_RW},
+ {CSU_CSLX_FTM3, CSU_ALL_RW},
+ {CSU_CSLX_FTM6, CSU_ALL_RW},
+ {CSU_CSLX_FTM5, CSU_ALL_RW},
+ {CSU_CSLX_FTM8, CSU_ALL_RW},
+ {CSU_CSLX_FTM7, CSU_ALL_RW},
+ {CSU_CSLX_DSCR, CSU_ALL_RW},
+};
+
+static void set_devices_ns_access(unsigned long index, u16 val)
+{
+ u32 *base = IOMEM(LSCH2_CSU_ADDR);
+ u32 *reg;
+ uint32_t tmp;
+
+ reg = base + index / 2;
+ tmp = in_be32(reg);
+ if (index % 2 == 0) {
+ tmp &= 0x0000ffff;
+ tmp |= val << 16;
+ } else {
+ tmp &= 0xffff0000;
+ tmp |= val;
+ }
+
+ out_be32(reg, tmp);
+}
+
+static void init_csu(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ns_dev); i++)
+ set_devices_ns_access(ns_dev[i].ind, ns_dev[i].val);
+}
+
+void ls1046a_init_lowlevel(void)
+{
+ struct ccsr_cci400 __iomem *cci = IOMEM(LSCH2_CCI400_ADDR);
+ struct ccsr_scfg *scfg = IOMEM(LSCH2_SCFG_ADDR);
+
+ init_csu();
+ ls1046a_init_l2_latency();
+ set_cntfrq(25000000);
+ syscnt_enable(IOMEM(LSCH2_SYS_COUNTER_ADDR));
+
+ /* Make SEC reads and writes snoopable */
+ setbits_be32(&scfg->snpcnfgcr, SCFG_SNPCNFGCR_SECRDSNP |
+ SCFG_SNPCNFGCR_SECWRSNP |
+ SCFG_SNPCNFGCR_SATARDSNP |
+ SCFG_SNPCNFGCR_SATAWRSNP);
+
+ /*
+ * Enable snoop requests and DVM message requests for
+ * Slave insterface S4 (A53 core cluster)
+ */
+ if (current_el() == 3) {
+ out_le32(&cci->slave[4].snoop_ctrl,
+ CCI400_DVM_MESSAGE_REQ_EN | CCI400_SNOOP_REQ_EN);
+ }
+
+ ls1046a_errata();
+}
diff --git a/arch/arm/mach-layerscape/lowlevel.S b/arch/arm/mach-layerscape/lowlevel.S
new file mode 100644
index 0000000000..adb3e54367
--- /dev/null
+++ b/arch/arm/mach-layerscape/lowlevel.S
@@ -0,0 +1,18 @@
+#include <linux/linkage.h>
+
+.section .text.ls1046a_init_l2_latency
+ENTRY(ls1046a_init_l2_latency)
+ /* Initialize the L2 RAM latency */
+ mrs x1, S3_1_c11_c0_2
+ mov x0, #0x1C7
+ /* Clear L2 Tag RAM latency and L2 Data RAM latency */
+ bic x1, x1, x0
+ /* Set L2 data ram latency bits [2:0] */
+ orr x1, x1, #0x2
+ /* set L2 tag ram latency bits [8:6] */
+ orr x1, x1, #0x80
+ msr S3_1_c11_c0_2, x1
+ isb
+
+ ret
+ENDPROC(ls1046a_init_l2_latency);
diff --git a/arch/mips/lib/pbl.lds.S b/arch/mips/lib/pbl.lds.S
index 1f0285dd6f..f1752ec720 100644
--- a/arch/mips/lib/pbl.lds.S
+++ b/arch/mips/lib/pbl.lds.S
@@ -38,6 +38,8 @@ SECTIONS
. = ALIGN(4);
.data : { *(.data*) }
+ pbl_code_size = . - HEAD_TEXT_BASE;
+
. = ALIGN(4);
__piggydata_start = .;
.piggydata : {
@@ -45,9 +47,12 @@ SECTIONS
}
__piggydata_end = .;
+ pbl_image_size = . - HEAD_TEXT_BASE;
+
. = ALIGN(4);
__bss_start = .;
.bss : { *(.bss*) }
__bss_stop = .;
+ pbl_memory_size = . - HEAD_TEXT_BASE;
_end = .;
}
diff --git a/common/Kconfig b/common/Kconfig
index 76632b324e..9c1b01debd 100644
--- a/common/Kconfig
+++ b/common/Kconfig
@@ -1238,6 +1238,14 @@ config DEBUG_SOCFPGA_UART_CLOCK
help
Choose UART root clock.
+config DEBUG_LAYERSCAPE_UART_PORT
+ int "Layerscape UART port selection"
+ depends on ARCH_LAYERSCAPE
+ default 1
+ help
+ Select the UART port number used for early debugging here. Port
+ numbers start counting from 1.
+
config DEBUG_INITCALLS
bool "Trace initcalls"
help
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d6fbcbfe16..f75da26982 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -40,5 +40,6 @@ source "drivers/crypto/Kconfig"
source "drivers/memory/Kconfig"
source "drivers/soc/imx/Kconfig"
source "drivers/nvme/Kconfig"
+source "drivers/ddr/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 65fd488ce9..fb7fcd3fc2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_AIODEV) += aiodev/
obj-y += memory/
obj-y += soc/imx/
obj-y += nvme/
+obj-y += ddr/
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 34c44fff9b..66c3591dad 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SOC_QCA_AR9344) += clk-ar9344.o
obj-$(CONFIG_ARCH_IMX) += imx/
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
obj-$(CONFIG_MACH_VEXPRESS) += vexpress/
+obj-$(CONFIG_ARCH_LAYERSCAPE) += clk-qoric.o
diff --git a/drivers/clk/clk-qoric.c b/drivers/clk/clk-qoric.c
new file mode 100644
index 0000000000..c40c6e90d9
--- /dev/null
+++ b/drivers/clk/clk-qoric.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * clock driver for Freescale QorIQ SoCs.
+ */
+
+#define pr_fmt(fmt) "clk-qoric: " fmt
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <io.h>
+#include <linux/kernel.h>
+#include <of_address.h>
+#include <of.h>
+#include <asm-generic/div64.h>
+
+#define PLL_DIV1 0
+#define PLL_DIV2 1
+#define PLL_DIV3 2
+#define PLL_DIV4 3
+
+#define PLATFORM_PLL 0
+#define CGA_PLL1 1
+#define CGA_PLL2 2
+#define CGA_PLL3 3
+#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
+#define CGB_PLL1 4
+#define CGB_PLL2 5
+
+struct clockgen_pll_div {
+ struct clk *clk;
+ char name[32];
+};
+
+struct clockgen_pll {
+ struct clockgen_pll_div div[8];
+};
+
+#define CLKSEL_VALID 1
+
+struct clockgen_sourceinfo {
+ u32 flags; /* CLKSEL_xxx */
+ int pll; /* CGx_PLLn */
+ int div; /* PLL_DIVn */
+};
+
+#define NUM_MUX_PARENTS 16
+
+struct clockgen_muxinfo {
+ struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
+};
+
+#define NUM_HWACCEL 5
+#define NUM_CMUX 8
+
+struct clockgen;
+
+#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
+#define CG_VER3 4 /* version 3 cg: reg layout different */
+#define CG_LITTLE_ENDIAN 8
+
+struct clockgen_chipinfo {
+ const char *compat;
+ const struct clockgen_muxinfo *cmux_groups[2];
+ const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
+ void (*init_periph)(struct clockgen *cg);
+ int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
+ u32 pll_mask; /* 1 << n bit set if PLL n is valid */
+ u32 flags; /* CG_xxx */
+};
+
+struct clockgen {
+ struct device_node *node;
+ void __iomem *regs;
+ struct clockgen_chipinfo info; /* mutable copy */
+ struct clk *sysclk, *coreclk;
+ struct clockgen_pll pll[6];
+ struct clk *cmux[NUM_CMUX];
+ struct clk *hwaccel[NUM_HWACCEL];
+ struct clk *fman[2];
+};
+
+static struct clockgen clockgen;
+
+static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
+{
+ if (cg->info.flags & CG_LITTLE_ENDIAN)
+ iowrite32(val, reg);
+ else
+ iowrite32be(val, reg);
+}
+
+static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
+{
+ u32 val;
+
+ if (cg->info.flags & CG_LITTLE_ENDIAN)
+ val = ioread32(reg);
+ else
+ val = ioread32be(reg);
+
+ return val;
+}
+
+static const struct clockgen_muxinfo t1023_cmux = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ }
+};
+
+static const struct clockgen_muxinfo t1040_cmux = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ }
+};
+
+static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
+ {
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ },
+};
+
+static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
+ {
+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1043a_hwa1 = {
+ {
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1043a_hwa2 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1046a_hwa1 = {
+ {
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1046a_hwa2 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1012a_cmux = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ {},
+ [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ }
+};
+
+static void __init t2080_init_periph(struct clockgen *cg)
+{
+ cg->fman[0] = cg->hwaccel[0];
+}
+
+static const struct clockgen_chipinfo chipinfo_ls1021a = {
+ .compat = "fsl,ls1021a-clockgen",
+ .cmux_groups = { &t1023_cmux },
+ .cmux_to_group = { 0, -1 },
+ .pll_mask = 0x03,
+};
+
+static const struct clockgen_chipinfo chipinfo_ls1043a = {
+ .compat = "fsl,ls1043a-clockgen",
+ .init_periph = t2080_init_periph,
+ .cmux_groups = { &t1040_cmux },
+ .hwaccel = { &ls1043a_hwa1, &ls1043a_hwa2 },
+ .cmux_to_group = { 0, -1 },
+ .pll_mask = 0x07,
+ .flags = CG_PLL_8BIT,
+};
+
+static const struct clockgen_chipinfo chipinfo_ls1046a = {
+ .compat = "fsl,ls1046a-clockgen",
+ .init_periph = t2080_init_periph,
+ .cmux_groups = { &t1040_cmux },
+ .hwaccel = { &ls1046a_hwa1, &ls1046a_hwa2 },
+ .cmux_to_group = { 0, -1 },
+ .pll_mask = 0x07,
+ .flags = CG_PLL_8BIT,
+};
+
+static const struct clockgen_chipinfo chipinfo_ls1088a = {
+ .compat = "fsl,ls1088a-clockgen",
+ .cmux_groups = { &clockgen2_cmux_cga12 },
+ .cmux_to_group = { 0, 0, -1 },
+ .pll_mask = 0x07,
+ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
+};
+
+static const struct clockgen_chipinfo chipinfo_ls1012a = {
+ .compat = "fsl,ls1012a-clockgen",
+ .cmux_groups = { &ls1012a_cmux },
+ .cmux_to_group = { 0, -1 },
+ .pll_mask = 0x03,
+};
+
+static const struct clockgen_chipinfo chipinfo_ls2080a = {
+ .compat = "fsl,ls2080a-clockgen",
+ .cmux_groups = { &clockgen2_cmux_cga12, &clockgen2_cmux_cgb },
+ .cmux_to_group = { 0, 0, 1, 1, -1 },
+ .pll_mask = 0x37,
+ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
+};
+
+struct mux_hwclock {
+ struct clk clk;
+ struct clockgen *cg;
+ const struct clockgen_muxinfo *info;
+ u32 __iomem *reg;
+ int num_parents;
+};
+
+#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, clk)
+#define CLKSEL_MASK 0x78000000
+#define CLKSEL_SHIFT 27
+
+static int mux_set_parent(struct clk *clk, u8 idx)
+{
+ struct mux_hwclock *hwc = to_mux_hwclock(clk);
+
+ if (idx >= hwc->num_parents)
+ return -EINVAL;
+
+ cg_out(hwc->cg, (idx << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
+
+ return 0;
+}
+
+static int mux_get_parent(struct clk *clk)
+{
+ struct mux_hwclock *hwc = to_mux_hwclock(clk);
+
+ return (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
+}
+
+static const struct clk_ops cmux_ops = {
+ .get_parent = mux_get_parent,
+ .set_parent = mux_set_parent,
+};
+
+/*
+ * Don't allow setting for now, as the clock options haven't been
+ * sanitized for additional restrictions.
+ */
+static const struct clk_ops hwaccel_ops = {
+ .get_parent = mux_get_parent,
+};
+
+static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
+ struct mux_hwclock *hwc,
+ int idx)
+{
+ const struct clockgen_sourceinfo *clksel = &hwc->info->clksel[idx];
+ int pll, div;
+
+ if (!(clksel->flags & CLKSEL_VALID))
+ return NULL;
+
+ pll = clksel->pll;
+ div = clksel->div;
+
+ return &cg->pll[pll].div[div];
+}
+
+static struct clk * __init create_mux_common(struct clockgen *cg,
+ struct mux_hwclock *hwc,
+ const struct clk_ops *ops,
+ const char *fmt, int idx)
+{
+ struct clk *clk = &hwc->clk;
+ const struct clockgen_pll_div *div;
+ const char **parent_names;
+ int i, ret;
+
+ parent_names = xzalloc(sizeof(char *) * NUM_MUX_PARENTS);
+
+ for (i = 0; i < NUM_MUX_PARENTS; i++) {
+ div = get_pll_div(cg, hwc, i);
+ if (!div)
+ continue;
+
+ parent_names[i] = div->name;
+ }
+
+ clk->name = xasprintf(fmt, idx);;
+ clk->ops = ops;
+ clk->parent_names = parent_names;
+ clk->num_parents = hwc->num_parents = i;
+ hwc->cg = cg;
+
+ ret = clk_register(clk);
+ if (ret) {
+ pr_err("%s: Couldn't register %s: %d\n", __func__, clk->name, ret);
+ kfree(hwc);
+ return NULL;
+ }
+
+ return clk;
+}
+
+static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
+{
+ struct mux_hwclock *hwc;
+ const struct clockgen_pll_div *div;
+ u32 clksel;
+
+ hwc = xzalloc(sizeof(*hwc));
+
+ if (cg->info.flags & CG_VER3)
+ hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
+ else
+ hwc->reg = cg->regs + 0x20 * idx;
+
+ hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
+
+ /*
+ * Find the rate for the default clksel, and treat it as the
+ * maximum rated core frequency. If this is an incorrect
+ * assumption, certain clock options (possibly including the
+ * default clksel) may be inappropriately excluded on certain
+ * chips.
+ */
+ clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
+ div = get_pll_div(cg, hwc, clksel);
+ if (!div) {
+ kfree(hwc);
+ return NULL;
+ }
+
+ return create_mux_common(cg, hwc, &cmux_ops, "cg-cmux%d", idx);
+}
+
+static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
+{
+ struct mux_hwclock *hwc;
+
+ hwc = xzalloc(sizeof(*hwc));
+
+ hwc->reg = cg->regs + 0x20 * idx + 0x10;
+ hwc->info = cg->info.hwaccel[idx];
+
+ return create_mux_common(cg, hwc, &hwaccel_ops, "cg-hwaccel%d", idx);
+}
+
+static void __init create_muxes(struct clockgen *cg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
+ if (cg->info.cmux_to_group[i] < 0)
+ break;
+ if (cg->info.cmux_to_group[i] >=
+ ARRAY_SIZE(cg->info.cmux_groups)) {
+ continue;
+ }
+
+ cg->cmux[i] = create_one_cmux(cg, i);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
+ if (!cg->info.hwaccel[i])
+ continue;
+
+ cg->hwaccel[i] = create_one_hwaccel(cg, i);
+ }
+}
+
+#define PLL_KILL BIT(31)
+
+static void __init create_one_pll(struct clockgen *cg, int idx)
+{
+ u32 __iomem *reg;
+ u32 mult;
+ struct clockgen_pll *pll = &cg->pll[idx];
+ const char *input = cg->sysclk->name;
+ int i;
+
+ if (!(cg->info.pll_mask & (1 << idx)))
+ return;
+
+ if (cg->coreclk && idx != PLATFORM_PLL) {
+ if (IS_ERR(cg->coreclk))
+ return;
+
+ input = cg->coreclk->name;
+ }
+
+ if (cg->info.flags & CG_VER3) {
+ switch (idx) {
+ case PLATFORM_PLL:
+ reg = cg->regs + 0x60080;
+ break;
+ case CGA_PLL1:
+ reg = cg->regs + 0x80;
+ break;
+ case CGA_PLL2:
+ reg = cg->regs + 0xa0;
+ break;
+ case CGB_PLL1:
+ reg = cg->regs + 0x10080;
+ break;
+ case CGB_PLL2:
+ reg = cg->regs + 0x100a0;
+ break;
+ default:
+ pr_warn("index %d\n", idx);
+ return;
+ }
+ } else {
+ if (idx == PLATFORM_PLL)
+ reg = cg->regs + 0xc00;
+ else
+ reg = cg->regs + 0x800 + 0x20 * (idx - 1);
+ }
+
+ /* Get the multiple of PLL */
+ mult = cg_in(cg, reg);
+
+ /* Check if this PLL is disabled */
+ if (mult & PLL_KILL) {
+ pr_debug("%s(): pll %p disabled\n", __func__, reg);
+ return;
+ }
+
+ if ((cg->info.flags & CG_VER3) ||
+ ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
+ mult = (mult & GENMASK(8, 1)) >> 1;
+ else
+ mult = (mult & GENMASK(6, 1)) >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
+ struct clk *clk;
+
+ /*
+ * For platform PLL, there are 8 divider clocks.
+ * For core PLL, there are 4 divider clocks at most.
+ */
+ if (idx != PLATFORM_PLL && i >= 4)
+ break;
+
+ snprintf(pll->div[i].name, sizeof(pll->div[i].name),
+ "cg-pll%d-div%d", idx, i + 1);
+
+ clk = clk_fixed_factor(pll->div[i].name, input, mult, i + 1, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%s: %s: register failed %ld\n",
+ __func__, pll->div[i].name, PTR_ERR(clk));
+ continue;
+ }
+
+ pll->div[i].clk = clk;
+ }
+}
+
+static void __init create_plls(struct clockgen *cg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
+ create_one_pll(cg, i);
+}
+
+static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct clockgen *cg = data;
+ struct clk *clk;
+ struct clockgen_pll *pll;
+ u32 type, idx;
+
+ if (clkspec->args_count < 2) {
+ pr_err("%s: insufficient phandle args\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = clkspec->args[0];
+ idx = clkspec->args[1];
+
+ switch (type) {
+ case 0:
+ if (idx != 0)
+ goto bad_args;
+ clk = cg->sysclk;
+ break;
+ case 1:
+ if (idx >= ARRAY_SIZE(cg->cmux))
+ goto bad_args;
+ clk = cg->cmux[idx];
+ break;
+ case 2:
+ if (idx >= ARRAY_SIZE(cg->hwaccel))
+ goto bad_args;
+ clk = cg->hwaccel[idx];
+ break;
+ case 3:
+ if (idx >= ARRAY_SIZE(cg->fman))
+ goto bad_args;
+ clk = cg->fman[idx];
+ break;
+ case 4:
+ pll = &cg->pll[PLATFORM_PLL];
+ if (idx >= ARRAY_SIZE(pll->div))
+ goto bad_args;
+ clk = pll->div[idx].clk;
+ break;
+ case 5:
+ if (idx != 0)
+ goto bad_args;
+ clk = cg->coreclk;
+ if (IS_ERR(clk))
+ clk = NULL;
+ break;
+ default:
+ goto bad_args;
+ }
+
+ if (!clk)
+ return ERR_PTR(-ENOENT);
+ return clk;
+
+bad_args:
+ pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
+ return ERR_PTR(-EINVAL);
+}
+
+static void __init clockgen_init(struct device_node *np,
+ const struct clockgen_chipinfo *chipinfo)
+{
+ int ret;
+
+ clockgen.node = np;
+ clockgen.regs = of_iomap(np, 0);
+ if (!clockgen.regs) {
+ pr_err("of_iomap failed for %s\n", np->full_name);
+ return;
+ }
+
+ clockgen.info = *chipinfo;
+
+ clockgen.sysclk = of_clk_get(clockgen.node, 0);
+ if (IS_ERR(clockgen.sysclk)) {
+ pr_err("sysclk not found: %s\n", strerrorp(clockgen.sysclk));
+ return;
+ }
+
+ clockgen.coreclk = of_clk_get(clockgen.node, 1);
+ if (IS_ERR(clockgen.coreclk))
+ clockgen.coreclk = NULL;
+
+ create_plls(&clockgen);
+ create_muxes(&clockgen);
+
+ if (clockgen.info.init_periph)
+ clockgen.info.init_periph(&clockgen);
+
+ ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
+ if (ret) {
+ pr_err("Couldn't register clk provider for node %s: %d\n",
+ np->full_name, ret);
+ }
+
+ return;
+}
+
+static void __maybe_unused clockgen_init_ls1012a(struct device_node *np)
+{
+ clockgen_init(np, &chipinfo_ls1012a);
+}
+
+static void __maybe_unused clockgen_init_ls1021a(struct device_node *np)
+{
+ clockgen_init(np, &chipinfo_ls1021a);
+}
+
+static void __maybe_unused clockgen_init_ls1043a(struct device_node *np)
+{
+ clockgen_init(np, &chipinfo_ls1043a);
+}
+
+static void __maybe_unused clockgen_init_ls1046a(struct device_node *np)
+{
+ clockgen_init(np, &chipinfo_ls1046a);
+}
+
+static void __maybe_unused clockgen_init_ls1088a(struct device_node *np)
+{
+ clockgen_init(np, &chipinfo_ls1088a);
+}
+
+static void __maybe_unused clockgen_init_ls2080a(struct device_node *np)
+{
+ clockgen_init(np, &chipinfo_ls2080a);
+}
+
+#ifdef CONFIG_ARCH_LS1012
+CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init_ls1012a);
+#endif
+#ifdef CONFIG_ARCH_LS1021
+CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init_ls1021a);
+#endif
+#ifdef CONFIG_ARCH_LS1043
+CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init_ls1043a);
+#endif
+#ifdef CONFIG_ARCH_LS1046
+CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init_ls1046a);
+#endif
+#ifdef CONFIG_ARCH_LS1088
+CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init_ls1088a);
+#endif
+#ifdef CONFIG_ARCH_LS2080
+CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init_ls2080a);
+#endif
diff --git a/drivers/ddr/Kconfig b/drivers/ddr/Kconfig
new file mode 100644
index 0000000000..4ea71598af
--- /dev/null
+++ b/drivers/ddr/Kconfig
@@ -0,0 +1 @@
+source "drivers/ddr/fsl/Kconfig"
diff --git a/drivers/ddr/Makefile b/drivers/ddr/Makefile
new file mode 100644
index 0000000000..faf2f9e1d6
--- /dev/null
+++ b/drivers/ddr/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DDR_FSL) += fsl/
diff --git a/drivers/ddr/fsl/Kconfig b/drivers/ddr/fsl/Kconfig
new file mode 100644
index 0000000000..9cae9028a2
--- /dev/null
+++ b/drivers/ddr/fsl/Kconfig
@@ -0,0 +1,16 @@
+config DDR_FSL
+ bool
+
+if DDR_FSL
+
+config DDR_FSL_DDR1
+ bool "Enable DDR1 support"
+config DDR_FSL_DDR2
+ bool "Enable DDR2 support"
+config DDR_FSL_DDR3
+ bool "Enable DDR3 support"
+config DDR_FSL_DDR4
+ bool "Enable DDR4 support"
+
+endif
+
diff --git a/drivers/ddr/fsl/Makefile b/drivers/ddr/fsl/Makefile
new file mode 100644
index 0000000000..86ac4b820a
--- /dev/null
+++ b/drivers/ddr/fsl/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2008-2014 Freescale Semiconductor, Inc.
+
+pbl-y += main.o util.o ctrl_regs.o options.o lc_common_dimm_params.o
+
+pbl-y += ddr1_dimm_params.o
+pbl-y += ddr2_dimm_params.o
+pbl-y += ddr3_dimm_params.o
+pbl-y += ddr4_dimm_params.o
+obj-y += arm_ddr_gen3.o
+pbl-y += fsl_ddr_gen4.o
diff --git a/drivers/ddr/fsl/arm_ddr_gen3.c b/drivers/ddr/fsl/arm_ddr_gen3.c
new file mode 100644
index 0000000000..c016917a3f
--- /dev/null
+++ b/drivers/ddr/fsl/arm_ddr_gen3.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Derived from mpc85xx_ddr_gen3.c, removed all workarounds
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <soc/fsl/fsl_immap.h>
+#include <soc/fsl/immap_lsch2.h>
+#include "fsl_ddr.h"
+
+/*
+ * regs has the to-be-set values for DDR controller registers
+ * ctrl_num is the DDR controller number
+ * step: 0 goes through the initialization in one pass
+ * 1 sets registers and returns before enabling controller
+ * 2 resumes from step 1 and continues to initialize
+ * Dividing the initialization to two steps to deassert DDR reset signal
+ * to comply with JEDEC specs for RDIMMs.
+ */
+void fsl_ddr_set_memctl_regs(struct fsl_ddr_controller *c, int step)
+{
+ struct ccsr_ddr __iomem *ddr = c->base;
+ const fsl_ddr_cfg_regs_t *regs = &c->fsl_ddr_config_reg;
+ unsigned int i, bus_width;
+ u32 temp_sdram_cfg;
+ u32 total_gb_size_per_controller;
+ int timeout;
+
+ if (step == 2)
+ goto step2;
+
+ if (regs->ddr_eor)
+ ddr_out32(&ddr->eor, regs->ddr_eor);
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ if (i == 0) {
+ ddr_out32(&ddr->cs0_bnds, regs->cs[i].bnds);
+ ddr_out32(&ddr->cs0_config, regs->cs[i].config);
+ ddr_out32(&ddr->cs0_config_2, regs->cs[i].config_2);
+
+ } else if (i == 1) {
+ ddr_out32(&ddr->cs1_bnds, regs->cs[i].bnds);
+ ddr_out32(&ddr->cs1_config, regs->cs[i].config);
+ ddr_out32(&ddr->cs1_config_2, regs->cs[i].config_2);
+
+ } else if (i == 2) {
+ ddr_out32(&ddr->cs2_bnds, regs->cs[i].bnds);
+ ddr_out32(&ddr->cs2_config, regs->cs[i].config);
+ ddr_out32(&ddr->cs2_config_2, regs->cs[i].config_2);
+
+ } else if (i == 3) {
+ ddr_out32(&ddr->cs3_bnds, regs->cs[i].bnds);
+ ddr_out32(&ddr->cs3_config, regs->cs[i].config);
+ ddr_out32(&ddr->cs3_config_2, regs->cs[i].config_2);
+ }
+ }
+
+ ddr_out32(&ddr->timing_cfg_3, regs->timing_cfg_3);
+ ddr_out32(&ddr->timing_cfg_0, regs->timing_cfg_0);
+ ddr_out32(&ddr->timing_cfg_1, regs->timing_cfg_1);
+ ddr_out32(&ddr->timing_cfg_2, regs->timing_cfg_2);
+ ddr_out32(&ddr->sdram_mode, regs->ddr_sdram_mode);
+ ddr_out32(&ddr->sdram_mode_2, regs->ddr_sdram_mode_2);
+ ddr_out32(&ddr->sdram_mode_3, regs->ddr_sdram_mode_3);
+ ddr_out32(&ddr->sdram_mode_4, regs->ddr_sdram_mode_4);
+ ddr_out32(&ddr->sdram_mode_5, regs->ddr_sdram_mode_5);
+ ddr_out32(&ddr->sdram_mode_6, regs->ddr_sdram_mode_6);
+ ddr_out32(&ddr->sdram_mode_7, regs->ddr_sdram_mode_7);
+ ddr_out32(&ddr->sdram_mode_8, regs->ddr_sdram_mode_8);
+ ddr_out32(&ddr->sdram_md_cntl, regs->ddr_sdram_md_cntl);
+ ddr_out32(&ddr->sdram_interval, regs->ddr_sdram_interval);
+ ddr_out32(&ddr->sdram_data_init, regs->ddr_data_init);
+ ddr_out32(&ddr->sdram_clk_cntl, regs->ddr_sdram_clk_cntl);
+ ddr_out32(&ddr->timing_cfg_4, regs->timing_cfg_4);
+ ddr_out32(&ddr->timing_cfg_5, regs->timing_cfg_5);
+ ddr_out32(&ddr->ddr_zq_cntl, regs->ddr_zq_cntl);
+ ddr_out32(&ddr->ddr_wrlvl_cntl, regs->ddr_wrlvl_cntl);
+ if (regs->ddr_wrlvl_cntl_2)
+ ddr_out32(&ddr->ddr_wrlvl_cntl_2, regs->ddr_wrlvl_cntl_2);
+ if (regs->ddr_wrlvl_cntl_3)
+ ddr_out32(&ddr->ddr_wrlvl_cntl_3, regs->ddr_wrlvl_cntl_3);
+
+ ddr_out32(&ddr->ddr_sr_cntr, regs->ddr_sr_cntr);
+ ddr_out32(&ddr->ddr_sdram_rcw_1, regs->ddr_sdram_rcw_1);
+ ddr_out32(&ddr->ddr_sdram_rcw_2, regs->ddr_sdram_rcw_2);
+ ddr_out32(&ddr->ddr_cdr1, regs->ddr_cdr1);
+
+ if (is_warm_boot()) {
+ ddr_out32(&ddr->sdram_cfg_2,
+ regs->ddr_sdram_cfg_2 & ~SDRAM_CFG2_D_INIT);
+ ddr_out32(&ddr->init_addr, c->common_timing_params.base_address);
+ ddr_out32(&ddr->init_ext_addr, DDR_INIT_ADDR_EXT_UIA);
+
+ /* DRAM VRef will not be trained */
+ ddr_out32(&ddr->ddr_cdr2,
+ regs->ddr_cdr2 & ~DDR_CDR2_VREF_TRAIN_EN);
+ } else {
+ ddr_out32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2);
+ ddr_out32(&ddr->init_addr, regs->ddr_init_addr);
+ ddr_out32(&ddr->init_ext_addr, regs->ddr_init_ext_addr);
+ ddr_out32(&ddr->ddr_cdr2, regs->ddr_cdr2);
+ }
+ ddr_out32(&ddr->err_disable, regs->err_disable);
+ ddr_out32(&ddr->err_int_en, regs->err_int_en);
+ for (i = 0; i < 32; i++) {
+ if (regs->debug[i]) {
+ debug("Write to debug_%d as %08x\n", i + 1,
+ regs->debug[i]);
+ ddr_out32(&ddr->debug[i], regs->debug[i]);
+ }
+ }
+
+ /*
+ * For RDIMMs, JEDEC spec requires clocks to be stable before reset is
+ * deasserted. Clocks start when any chip select is enabled and clock
+ * control register is set. Because all DDR components are connected to
+ * one reset signal, this needs to be done in two steps. Step 1 is to
+ * get the clocks started. Step 2 resumes after reset signal is
+ * deasserted.
+ */
+ if (step == 1) {
+ udelay(200);
+ return;
+ }
+
+step2:
+ /* Set, but do not enable the memory */
+ temp_sdram_cfg = regs->ddr_sdram_cfg;
+ temp_sdram_cfg &= ~(SDRAM_CFG_MEM_EN);
+ ddr_out32(&ddr->sdram_cfg, temp_sdram_cfg);
+
+ /*
+ * 500 painful micro-seconds must elapse between
+ * the DDR clock setup and the DDR config enable.
+ * DDR2 need 200 us, and DDR3 need 500 us from spec,
+ * we choose the max, that is 500 us for all of case.
+ */
+ udelay(500);
+ asm volatile("dsb sy;isb");
+
+ if (is_warm_boot()) {
+ /* enter self-refresh */
+ temp_sdram_cfg = ddr_in32(&ddr->sdram_cfg_2);
+ temp_sdram_cfg |= SDRAM_CFG2_FRC_SR;
+ ddr_out32(&ddr->sdram_cfg_2, temp_sdram_cfg);
+
+ temp_sdram_cfg = (ddr_in32(&ddr->sdram_cfg) | SDRAM_CFG_BI);
+ } else {
+ temp_sdram_cfg = ddr_in32(&ddr->sdram_cfg) & ~SDRAM_CFG_BI;
+ }
+ /* Let the controller go */
+ ddr_out32(&ddr->sdram_cfg, temp_sdram_cfg | SDRAM_CFG_MEM_EN);
+ asm volatile("dsb sy;isb");
+
+ total_gb_size_per_controller = 0;
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ if (!(regs->cs[i].config & 0x80000000))
+ continue;
+ total_gb_size_per_controller += 1 << (
+ ((regs->cs[i].config >> 14) & 0x3) + 2 +
+ ((regs->cs[i].config >> 8) & 0x7) + 12 +
+ ((regs->cs[i].config >> 0) & 0x7) + 8 +
+ 3 - ((regs->ddr_sdram_cfg >> 19) & 0x3) -
+ 26); /* minus 26 (count of 64M) */
+ }
+ if (regs->cs[0].config & 0x20000000) {
+ /* 2-way interleaving */
+ total_gb_size_per_controller <<= 1;
+ }
+ /*
+ * total memory / bus width = transactions needed
+ * transactions needed / data rate = seconds
+ * to add plenty of buffer, double the time
+ * For example, 2GB on 666MT/s 64-bit bus takes about 402ms
+ * Let's wait for 800ms
+ */
+ bus_width = 3 - ((ddr_in32(&ddr->sdram_cfg) & SDRAM_CFG_DBW_MASK)
+ >> SDRAM_CFG_DBW_SHIFT);
+ timeout = ((total_gb_size_per_controller << (6 - bus_width)) * 100 /
+ (c->ddr_freq >> 20)) << 1;
+ total_gb_size_per_controller >>= 4; /* shift down to gb size */
+ debug("total %d GB\n", total_gb_size_per_controller);
+ debug("Need to wait up to %d * 10ms\n", timeout);
+
+ /* Poll DDR_SDRAM_CFG_2[D_INIT] bit until auto-data init is done. */
+ while ((ddr_in32(&ddr->sdram_cfg_2) & SDRAM_CFG2_D_INIT) &&
+ (timeout >= 0)) {
+ udelay(10000); /* throttle polling rate */
+ timeout--;
+ }
+
+ if (timeout <= 0)
+ printf("Waiting for D_INIT timeout. Memory may not work.\n");
+
+ if (is_warm_boot()) {
+ /* exit self-refresh */
+ temp_sdram_cfg = ddr_in32(&ddr->sdram_cfg_2);
+ temp_sdram_cfg &= ~SDRAM_CFG2_FRC_SR;
+ ddr_out32(&ddr->sdram_cfg_2, temp_sdram_cfg);
+ }
+}
diff --git a/drivers/ddr/fsl/ctrl_regs.c b/drivers/ddr/fsl/ctrl_regs.c
new file mode 100644
index 0000000000..4957320d60
--- /dev/null
+++ b/drivers/ddr/fsl/ctrl_regs.c
@@ -0,0 +1,2539 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2008-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP Semiconductor
+ */
+
+/*
+ * Generic driver for Freescale DDR/DDR2/DDR3/DDR4 memory controller.
+ * Based on code from spd_sdram.c
+ * Author: James Yang [at freescale.com]
+ */
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <soc/fsl/fsl_immap.h>
+#include <io.h>
+#include <soc/fsl/immap_lsch2.h>
+#include <linux/log2.h>
+#include "fsl_ddr.h"
+
+/*
+ * Determine Rtt value.
+ *
+ * This should likely be either board or controller specific.
+ *
+ * Rtt(nominal) - DDR2:
+ * 0 = Rtt disabled
+ * 1 = 75 ohm
+ * 2 = 150 ohm
+ * 3 = 50 ohm
+ * Rtt(nominal) - DDR3:
+ * 0 = Rtt disabled
+ * 1 = 60 ohm
+ * 2 = 120 ohm
+ * 3 = 40 ohm
+ * 4 = 20 ohm
+ * 5 = 30 ohm
+ *
+ */
+static inline int fsl_ddr_get_rtt(const memctl_options_t *popts)
+{
+ if (is_ddr2(popts))
+ return 3;
+ else
+ return 0;
+}
+
+/*
+ * compute CAS write latency according to DDR4 spec
+ * CWL = 9 for <= 1600MT/s
+ * 10 for <= 1866MT/s
+ * 11 for <= 2133MT/s
+ * 12 for <= 2400MT/s
+ * 14 for <= 2667MT/s
+ * 16 for <= 2933MT/s
+ * 18 for higher
+ */
+static inline unsigned int compute_cas_write_latency_ddr4(struct fsl_ddr_controller *c)
+{
+ unsigned int cwl;
+ const unsigned int mclk_ps = get_memory_clk_period_ps(c);
+ if (mclk_ps >= 1250)
+ cwl = 9;
+ else if (mclk_ps >= 1070)
+ cwl = 10;
+ else if (mclk_ps >= 935)
+ cwl = 11;
+ else if (mclk_ps >= 833)
+ cwl = 12;
+ else if (mclk_ps >= 750)
+ cwl = 14;
+ else if (mclk_ps >= 681)
+ cwl = 16;
+ else
+ cwl = 18;
+
+ return cwl;
+}
+
+/*
+ * compute the CAS write latency according to DDR3 spec
+ * CWL = 5 if tCK >= 2.5ns
+ * 6 if 2.5ns > tCK >= 1.875ns
+ * 7 if 1.875ns > tCK >= 1.5ns
+ * 8 if 1.5ns > tCK >= 1.25ns
+ * 9 if 1.25ns > tCK >= 1.07ns
+ * 10 if 1.07ns > tCK >= 0.935ns
+ * 11 if 0.935ns > tCK >= 0.833ns
+ * 12 if 0.833ns > tCK >= 0.75ns
+ */
+static inline unsigned int compute_cas_write_latency_ddr3(struct fsl_ddr_controller *c)
+{
+ unsigned int cwl;
+ const unsigned int mclk_ps = get_memory_clk_period_ps(c);
+
+ if (mclk_ps >= 2500)
+ cwl = 5;
+ else if (mclk_ps >= 1875)
+ cwl = 6;
+ else if (mclk_ps >= 1500)
+ cwl = 7;
+ else if (mclk_ps >= 1250)
+ cwl = 8;
+ else if (mclk_ps >= 1070)
+ cwl = 9;
+ else if (mclk_ps >= 935)
+ cwl = 10;
+ else if (mclk_ps >= 833)
+ cwl = 11;
+ else if (mclk_ps >= 750)
+ cwl = 12;
+ else {
+ cwl = 12;
+ printf("Warning: CWL is out of range\n");
+ }
+ return cwl;
+}
+
+/* Chip Select Configuration (CSn_CONFIG) */
+static void set_csn_config(int dimm_number, int i, fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const struct dimm_params *dimm_params)
+{
+ unsigned int cs_n_en = 0; /* Chip Select enable */
+ unsigned int intlv_en = 0; /* Memory controller interleave enable */
+ unsigned int intlv_ctl = 0; /* Interleaving control */
+ unsigned int ap_n_en = 0; /* Chip select n auto-precharge enable */
+ unsigned int odt_rd_cfg = 0; /* ODT for reads configuration */
+ unsigned int odt_wr_cfg = 0; /* ODT for writes configuration */
+ unsigned int ba_bits_cs_n = 0; /* Num of bank bits for SDRAM on CSn */
+ unsigned int row_bits_cs_n = 0; /* Num of row bits for SDRAM on CSn */
+ unsigned int col_bits_cs_n = 0; /* Num of ocl bits for SDRAM on CSn */
+ int go_config = 0;
+ unsigned int bg_bits_cs_n = 0; /* Num of bank group bits */
+ unsigned int n_banks_per_sdram_device;
+
+ /* Compute CS_CONFIG only for existing ranks of each DIMM. */
+ switch (i) {
+ case 0:
+ if (dimm_params[dimm_number].n_ranks > 0) {
+ go_config = 1;
+ /* These fields only available in CS0_CONFIG */
+ if (!popts->memctl_interleaving)
+ break;
+ switch (popts->memctl_interleaving_mode) {
+ case FSL_DDR_256B_INTERLEAVING:
+ case FSL_DDR_CACHE_LINE_INTERLEAVING:
+ case FSL_DDR_PAGE_INTERLEAVING:
+ case FSL_DDR_BANK_INTERLEAVING:
+ case FSL_DDR_SUPERBANK_INTERLEAVING:
+ intlv_en = popts->memctl_interleaving;
+ intlv_ctl = popts->memctl_interleaving_mode;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case 1:
+ if ((dimm_number == 0 && dimm_params[0].n_ranks > 1) || \
+ (dimm_number == 1 && dimm_params[1].n_ranks > 0))
+ go_config = 1;
+ break;
+ case 2:
+ if ((dimm_number == 0 && dimm_params[0].n_ranks > 2) || \
+ (dimm_number >= 1 && dimm_params[dimm_number].n_ranks > 0))
+ go_config = 1;
+ break;
+ case 3:
+ if ((dimm_number == 0 && dimm_params[0].n_ranks > 3) || \
+ (dimm_number == 1 && dimm_params[1].n_ranks > 1) || \
+ (dimm_number == 3 && dimm_params[3].n_ranks > 0))
+ go_config = 1;
+ break;
+ default:
+ break;
+ }
+ if (go_config) {
+ cs_n_en = 1;
+ ap_n_en = popts->cs_local_opts[i].auto_precharge;
+ odt_rd_cfg = popts->cs_local_opts[i].odt_rd_cfg;
+ odt_wr_cfg = popts->cs_local_opts[i].odt_wr_cfg;
+ if (is_ddr4(popts)) {
+ ba_bits_cs_n = dimm_params[dimm_number].bank_addr_bits;
+ bg_bits_cs_n = dimm_params[dimm_number].bank_group_bits;
+ } else {
+ n_banks_per_sdram_device
+ = dimm_params[dimm_number].n_banks_per_sdram_device;
+ ba_bits_cs_n = ilog2(n_banks_per_sdram_device) - 2;
+ }
+ row_bits_cs_n = dimm_params[dimm_number].n_row_addr - 12;
+ col_bits_cs_n = dimm_params[dimm_number].n_col_addr - 8;
+ }
+ ddr->cs[i].config = (0
+ | ((cs_n_en & 0x1) << 31)
+ | ((intlv_en & 0x3) << 29)
+ | ((intlv_ctl & 0xf) << 24)
+ | ((ap_n_en & 0x1) << 23)
+
+ /* XXX: some implementation only have 1 bit starting at left */
+ | ((odt_rd_cfg & 0x7) << 20)
+
+ /* XXX: Some implementation only have 1 bit starting at left */
+ | ((odt_wr_cfg & 0x7) << 16)
+
+ | ((ba_bits_cs_n & 0x3) << 14)
+ | ((row_bits_cs_n & 0x7) << 8)
+ | ((bg_bits_cs_n & 0x3) << 4)
+ | ((col_bits_cs_n & 0x7) << 0)
+ );
+ debug("FSLDDR: cs[%d]_config = 0x%08x\n", i,ddr->cs[i].config);
+}
+
+/* Chip Select Configuration 2 (CSn_CONFIG_2) */
+/* FIXME: 8572 */
+static void set_csn_config_2(int i, fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int pasr_cfg = 0; /* Partial array self refresh config */
+
+ ddr->cs[i].config_2 = ((pasr_cfg & 7) << 24);
+ debug("FSLDDR: cs[%d]_config_2 = 0x%08x\n", i, ddr->cs[i].config_2);
+}
+
+/* -3E = 667 CL5, -25 = CL6 800, -25E = CL5 800 */
+
+/*
+ * Check DIMM configuration, return 2 if quad-rank or two dual-rank
+ * Return 1 if other two slots configuration. Return 0 if single slot.
+ */
+static inline int avoid_odt_overlap(struct fsl_ddr_controller *c,
+ const struct dimm_params *dimm_params)
+{
+ if (c->dimm_slots_per_ctrl == 1)
+ if (dimm_params[0].n_ranks == 4)
+ return 2;
+
+ if (c->dimm_slots_per_ctrl == 2) {
+ if ((dimm_params[0].n_ranks == 2) &&
+ (dimm_params[1].n_ranks == 2))
+ return 2;
+
+ if ((dimm_params[0].n_ranks != 0) &&
+ (dimm_params[2].n_ranks != 0))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * DDR SDRAM Timing Configuration 0 (TIMING_CFG_0)
+ *
+ * Avoid writing for DDR I. The new PQ38 DDR controller
+ * dreams up non-zero default values to be backwards compatible.
+ */
+static void set_timing_cfg_0(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct dimm_params *dimm_params = c->dimm_params;
+ unsigned char trwt_mclk = 0; /* Read-to-write turnaround */
+ unsigned char twrt_mclk = 0; /* Write-to-read turnaround */
+ /* 7.5 ns on -3E; 0 means WL - CL + BL/2 + 1 */
+ unsigned char trrt_mclk = 0; /* Read-to-read turnaround */
+ unsigned char twwt_mclk = 0; /* Write-to-write turnaround */
+
+ /* Active powerdown exit timing (tXARD and tXARDS). */
+ unsigned char act_pd_exit_mclk;
+ /* Precharge powerdown exit timing (tXP). */
+ unsigned char pre_pd_exit_mclk;
+ /* ODT powerdown exit timing (tAXPD). */
+ unsigned char taxpd_mclk = 0;
+ /* Mode register set cycle time (tMRD). */
+ unsigned char tmrd_mclk;
+ const unsigned int mclk_ps = get_memory_clk_period_ps(c);
+
+ if (is_ddr4(popts)) {
+ /* tXP=max(4nCK, 6ns) */
+ int txp = max((int)mclk_ps * 4, 6000); /* unit=ps */
+ unsigned int data_rate = c->ddr_freq;
+
+ /* for faster clock, need more time for data setup */
+ trwt_mclk = (data_rate/1000000 > 1900) ? 3 : 2;
+
+ /*
+ * for single quad-rank DIMM and two-slot DIMMs
+ * to avoid ODT overlap
+ */
+ switch (avoid_odt_overlap(c, dimm_params)) {
+ case 2:
+ twrt_mclk = 2;
+ twwt_mclk = 2;
+ trrt_mclk = 2;
+ break;
+ default:
+ twrt_mclk = 1;
+ twwt_mclk = 1;
+ trrt_mclk = 0;
+ break;
+ }
+
+ act_pd_exit_mclk = picos_to_mclk(c, txp);
+ pre_pd_exit_mclk = act_pd_exit_mclk;
+ /*
+ * MRS_CYC = max(tMRD, tMOD)
+ * tMRD = 8nCK, tMOD = max(24nCK, 15ns)
+ */
+ tmrd_mclk = max(24U, picos_to_mclk(c, 15000));
+ } else if (is_ddr3(popts)) {
+ unsigned int data_rate = c->ddr_freq;
+ int txp;
+ unsigned int ip_rev;
+ int odt_overlap;
+ /*
+ * (tXARD and tXARDS). Empirical?
+ * The DDR3 spec has not tXARD,
+ * we use the tXP instead of it.
+ * tXP=max(3nCK, 7.5ns) for DDR3-800, 1066
+ * max(3nCK, 6ns) for DDR3-1333, 1600, 1866, 2133
+ * spec has not the tAXPD, we use
+ * tAXPD=1, need design to confirm.
+ */
+ txp = max((int)mclk_ps * 3, (mclk_ps > 1540 ? 7500 : 6000));
+
+ ip_rev = fsl_ddr_get_version(c);
+ if (ip_rev >= 0x40700) {
+ /*
+ * MRS_CYC = max(tMRD, tMOD)
+ * tMRD = 4nCK (8nCK for RDIMM)
+ * tMOD = max(12nCK, 15ns)
+ */
+ tmrd_mclk = max((unsigned int)12, picos_to_mclk(c, 15000));
+ } else {
+ /*
+ * MRS_CYC = tMRD
+ * tMRD = 4nCK (8nCK for RDIMM)
+ */
+ if (popts->registered_dimm_en)
+ tmrd_mclk = 8;
+ else
+ tmrd_mclk = 4;
+ }
+
+ /* set the turnaround time */
+
+ /*
+ * for single quad-rank DIMM and two-slot DIMMs
+ * to avoid ODT overlap
+ */
+ odt_overlap = avoid_odt_overlap(c, dimm_params);
+ switch (odt_overlap) {
+ case 2:
+ twwt_mclk = 2;
+ trrt_mclk = 1;
+ break;
+ case 1:
+ twwt_mclk = 1;
+ trrt_mclk = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* for faster clock, need more time for data setup */
+ trwt_mclk = (data_rate/1000000 > 1800) ? 2 : 1;
+
+ if ((data_rate/1000000 > 1150) || (popts->memctl_interleaving))
+ twrt_mclk = 1;
+
+ if (popts->dynamic_power == 0) { /* powerdown is not used */
+ act_pd_exit_mclk = 1;
+ pre_pd_exit_mclk = 1;
+ taxpd_mclk = 1;
+ } else {
+ /* act_pd_exit_mclk = tXARD, see above */
+ act_pd_exit_mclk = picos_to_mclk(c, txp);
+ /* Mode register MR0[A12] is '1' - fast exit */
+ pre_pd_exit_mclk = act_pd_exit_mclk;
+ taxpd_mclk = 1;
+ }
+ } else if (is_ddr2(popts)) {
+ /*
+ * (tXARD and tXARDS). Empirical?
+ * tXARD = 2 for DDR2
+ * tXP=2
+ * tAXPD=8
+ */
+ act_pd_exit_mclk = 2;
+ pre_pd_exit_mclk = 2;
+ taxpd_mclk = 8;
+ tmrd_mclk = 2;
+ } else {
+ return;
+ }
+
+ if (popts->trwt_override)
+ trwt_mclk = popts->trwt;
+
+ ddr->timing_cfg_0 = (0
+ | ((trwt_mclk & 0x3) << 30) /* RWT */
+ | ((twrt_mclk & 0x3) << 28) /* WRT */
+ | ((trrt_mclk & 0x3) << 26) /* RRT */
+ | ((twwt_mclk & 0x3) << 24) /* WWT */
+ | ((act_pd_exit_mclk & 0xf) << 20) /* ACT_PD_EXIT */
+ | ((pre_pd_exit_mclk & 0xF) << 16) /* PRE_PD_EXIT */
+ | ((taxpd_mclk & 0xf) << 8) /* ODT_PD_EXIT */
+ | ((tmrd_mclk & 0x1f) << 0) /* MRS_CYC */
+ );
+ debug("FSLDDR: timing_cfg_0 = 0x%08x\n", ddr->timing_cfg_0);
+}
+
+/* DDR SDRAM Timing Configuration 3 (TIMING_CFG_3) */
+static void set_timing_cfg_3(struct fsl_ddr_controller *c,
+ unsigned int cas_latency,
+ unsigned int additive_latency)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ /* Extended precharge to activate interval (tRP) */
+ unsigned int ext_pretoact = 0;
+ /* Extended Activate to precharge interval (tRAS) */
+ unsigned int ext_acttopre = 0;
+ /* Extended activate to read/write interval (tRCD) */
+ unsigned int ext_acttorw = 0;
+ /* Extended refresh recovery time (tRFC) */
+ unsigned int ext_refrec;
+ /* Extended MCAS latency from READ cmd */
+ unsigned int ext_caslat = 0;
+ /* Extended additive latency */
+ unsigned int ext_add_lat = 0;
+ /* Extended last data to precharge interval (tWR) */
+ unsigned int ext_wrrec = 0;
+ /* Control Adjust */
+ unsigned int cntl_adj = 0;
+
+ ext_pretoact = picos_to_mclk(c, common_dimm->trp_ps) >> 4;
+ ext_acttopre = picos_to_mclk(c, common_dimm->tras_ps) >> 4;
+ ext_acttorw = picos_to_mclk(c, common_dimm->trcd_ps) >> 4;
+ ext_caslat = (2 * cas_latency - 1) >> 4;
+ ext_add_lat = additive_latency >> 4;
+
+ if (is_ddr4(popts))
+ ext_refrec = (picos_to_mclk(c, common_dimm->trfc1_ps) - 8) >> 4;
+ else
+ ext_refrec = (picos_to_mclk(c, common_dimm->trfc_ps) - 8) >> 4;
+ /* ext_wrrec only deals with 16 clock and above, or 14 with OTF */
+
+ ext_wrrec = (picos_to_mclk(c, common_dimm->twr_ps) +
+ (popts->otf_burst_chop_en ? 2 : 0)) >> 4;
+
+ ddr->timing_cfg_3 = (0
+ | ((ext_pretoact & 0x1) << 28)
+ | ((ext_acttopre & 0x3) << 24)
+ | ((ext_acttorw & 0x1) << 22)
+ | ((ext_refrec & 0x3F) << 16)
+ | ((ext_caslat & 0x3) << 12)
+ | ((ext_add_lat & 0x1) << 10)
+ | ((ext_wrrec & 0x1) << 8)
+ | ((cntl_adj & 0x7) << 0)
+ );
+ debug("FSLDDR: timing_cfg_3 = 0x%08x\n", ddr->timing_cfg_3);
+}
+
+/* DDR SDRAM Timing Configuration 1 (TIMING_CFG_1) */
+static void set_timing_cfg_1(struct fsl_ddr_controller *c, unsigned int cas_latency)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ /* Precharge-to-activate interval (tRP) */
+ unsigned char pretoact_mclk;
+ /* Activate to precharge interval (tRAS) */
+ unsigned char acttopre_mclk;
+ /* Activate to read/write interval (tRCD) */
+ unsigned char acttorw_mclk;
+ /* CASLAT */
+ unsigned char caslat_ctrl;
+ /* Refresh recovery time (tRFC) ; trfc_low */
+ unsigned char refrec_ctrl;
+ /* Last data to precharge minimum interval (tWR) */
+ unsigned char wrrec_mclk;
+ /* Activate-to-activate interval (tRRD) */
+ unsigned char acttoact_mclk;
+ /* Last write data pair to read command issue interval (tWTR) */
+ unsigned char wrtord_mclk;
+
+ pretoact_mclk = picos_to_mclk(c, common_dimm->trp_ps);
+ acttopre_mclk = picos_to_mclk(c, common_dimm->tras_ps);
+ acttorw_mclk = picos_to_mclk(c, common_dimm->trcd_ps);
+
+ /*
+ * Translate CAS Latency to a DDR controller field value:
+ *
+ * CAS Lat DDR I DDR II Ctrl
+ * Clocks SPD Bit SPD Bit Value
+ * ------- ------- ------- -----
+ * 1.0 0 0001
+ * 1.5 1 0010
+ * 2.0 2 2 0011
+ * 2.5 3 0100
+ * 3.0 4 3 0101
+ * 3.5 5 0110
+ * 4.0 4 0111
+ * 4.5 1000
+ * 5.0 5 1001
+ */
+ if (is_ddr1(popts)) {
+ caslat_ctrl = (cas_latency + 1) & 0x07;
+ } else if (is_ddr2(popts)) {
+ caslat_ctrl = 2 * cas_latency - 1;
+ } else {
+ /*
+ * if the CAS latency more than 8 cycle,
+ * we need set extend bit for it at
+ * TIMING_CFG_3[EXT_CASLAT]
+ */
+ if (fsl_ddr_get_version(c) <= 0x40400)
+ caslat_ctrl = 2 * cas_latency - 1;
+ else
+ caslat_ctrl = (cas_latency - 1) << 1;
+ }
+
+ if (is_ddr4(popts)) {
+ /* DDR4 supports 10, 12, 14, 16, 18, 20, 24 */
+ static const u8 wrrec_table[] = {
+ 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10,
+ 12, 12, 14, 14, 16,
+ 16, 18, 18, 20, 20,
+ 24, 24, 24, 24
+ };
+
+ refrec_ctrl = picos_to_mclk(c, common_dimm->trfc1_ps) - 8;
+ wrrec_mclk = picos_to_mclk(c, common_dimm->twr_ps);
+ acttoact_mclk = max(picos_to_mclk(c, common_dimm->trrds_ps), 4U);
+ wrtord_mclk = max(2U, picos_to_mclk(c, 2500));
+ if ((wrrec_mclk < 1) || (wrrec_mclk > 24))
+ printf("Error: WRREC doesn't support %d clocks\n", wrrec_mclk);
+ else
+ wrrec_mclk = wrrec_table[wrrec_mclk - 1];
+ } else {
+ /* DDR_SDRAM_MODE doesn't support 9,11,13,15 */
+ static const u8 wrrec_table[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 10, 10, 12, 12, 14, 14, 0, 0
+ };
+
+ refrec_ctrl = picos_to_mclk(c, common_dimm->trfc_ps) - 8;
+ wrrec_mclk = picos_to_mclk(c, common_dimm->twr_ps);
+ acttoact_mclk = picos_to_mclk(c, common_dimm->trrd_ps);
+ wrtord_mclk = picos_to_mclk(c, common_dimm->twtr_ps);
+ if ((wrrec_mclk < 1) || (wrrec_mclk > 16))
+ printf("Error: WRREC doesn't support %d clocks\n", wrrec_mclk);
+ else
+ wrrec_mclk = wrrec_table[wrrec_mclk - 1];
+ }
+
+ if (popts->otf_burst_chop_en)
+ wrrec_mclk += 2;
+
+ /*
+ * JEDEC has min requirement for tRRD
+ */
+ if (is_ddr3(popts) && acttoact_mclk < 4)
+ acttoact_mclk = 4;
+
+ /*
+ * JEDEC has some min requirements for tWTR
+ */
+ if (is_ddr2(popts) && wrtord_mclk < 2)
+ wrtord_mclk = 2;
+
+ if (is_ddr3(popts) && wrtord_mclk < 4)
+ wrtord_mclk = 4;
+
+ if (popts->otf_burst_chop_en)
+ wrtord_mclk += 2;
+
+ ddr->timing_cfg_1 = (0
+ | ((pretoact_mclk & 0x0F) << 28)
+ | ((acttopre_mclk & 0x0F) << 24)
+ | ((acttorw_mclk & 0xF) << 20)
+ | ((caslat_ctrl & 0xF) << 16)
+ | ((refrec_ctrl & 0xF) << 12)
+ | ((wrrec_mclk & 0x0F) << 8)
+ | ((acttoact_mclk & 0x0F) << 4)
+ | ((wrtord_mclk & 0x0F) << 0)
+ );
+ debug("FSLDDR: timing_cfg_1 = 0x%08x\n", ddr->timing_cfg_1);
+}
+
+/* DDR SDRAM Timing Configuration 2 (TIMING_CFG_2) */
+static void set_timing_cfg_2(struct fsl_ddr_controller *c,
+ unsigned int cas_latency,
+ unsigned int additive_latency)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ /* Additive latency */
+ unsigned char add_lat_mclk;
+ /* CAS-to-preamble override */
+ unsigned short cpo;
+ /* Write latency */
+ unsigned char wr_lat;
+ /* Read to precharge (tRTP) */
+ unsigned char rd_to_pre;
+ /* Write command to write data strobe timing adjustment */
+ unsigned char wr_data_delay;
+ /* Minimum CKE pulse width (tCKE) */
+ unsigned char cke_pls;
+ /* Window for four activates (tFAW) */
+ unsigned short four_act;
+ unsigned int mclk_ps;
+
+ /* FIXME add check that this must be less than acttorw_mclk */
+ add_lat_mclk = additive_latency;
+ cpo = popts->cpo_override;
+
+ if (is_ddr1(popts)) {
+ /*
+ * This is a lie. It should really be 1, but if it is
+ * set to 1, bits overlap into the old controller's
+ * otherwise unused ACSM field. If we leave it 0, then
+ * the HW will magically treat it as 1 for DDR 1. Oh Yea.
+ */
+ wr_lat = 0;
+ } else if (is_ddr2(popts)) {
+ wr_lat = cas_latency - 1;
+ } else if (is_ddr3(popts)) {
+ wr_lat = compute_cas_write_latency_ddr3(c);
+ } else {
+ wr_lat = compute_cas_write_latency_ddr4(c);
+ }
+
+ if (is_ddr4(popts))
+ rd_to_pre = picos_to_mclk(c, 7500);
+ else
+ rd_to_pre = picos_to_mclk(c, common_dimm->trtp_ps);
+
+ /*
+ * JEDEC has some min requirements for tRTP
+ */
+ if (is_ddr2(popts) && rd_to_pre < 2)
+ rd_to_pre = 2;
+
+ if (is_ddr3_4(popts) && rd_to_pre < 4)
+ rd_to_pre = 4;
+
+ if (popts->otf_burst_chop_en)
+ rd_to_pre += 2; /* according to UM */
+
+ wr_data_delay = popts->write_data_delay;
+
+ if (is_ddr4(popts)) {
+ cpo = 0;
+ cke_pls = max(3U, picos_to_mclk(c, 5000));
+ } else if (is_ddr3(popts)) {
+ mclk_ps = get_memory_clk_period_ps(c);
+
+ /*
+ * cke pulse = max(3nCK, 7.5ns) for DDR3-800
+ * max(3nCK, 5.625ns) for DDR3-1066, 1333
+ * max(3nCK, 5ns) for DDR3-1600, 1866, 2133
+ */
+ cke_pls = max(3U, picos_to_mclk(c, mclk_ps > 1870 ? 7500 :
+ (mclk_ps > 1245 ? 5625 : 5000)));
+ } else if (is_ddr2(popts)) {
+ cke_pls = FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR2;
+ } else {
+ cke_pls = FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR1;
+ }
+
+ four_act = picos_to_mclk(c, popts->tfaw_window_four_activates_ps);
+
+ ddr->timing_cfg_2 = (0
+ | ((add_lat_mclk & 0xf) << 28)
+ | ((cpo & 0x1f) << 23)
+ | ((wr_lat & 0xf) << 19)
+ | (((wr_lat & 0x10) >> 4) << 18)
+ | ((rd_to_pre & RD_TO_PRE_MASK) << RD_TO_PRE_SHIFT)
+ | ((wr_data_delay & WR_DATA_DELAY_MASK) << WR_DATA_DELAY_SHIFT)
+ | ((cke_pls & 0x7) << 6)
+ | ((four_act & 0x3f) << 0)
+ );
+ debug("FSLDDR: timing_cfg_2 = 0x%08x\n", ddr->timing_cfg_2);
+}
+
+/* DDR SDRAM Register Control Word */
+static void set_ddr_sdram_rcw(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned int ddr_freq = c->ddr_freq / 1000000;
+ unsigned int rc0a, rc0f;
+
+ if (common_dimm->all_dimms_registered &&
+ !common_dimm->all_dimms_unbuffered) {
+ if (popts->rcw_override) {
+ ddr->ddr_sdram_rcw_1 = popts->rcw_1;
+ ddr->ddr_sdram_rcw_2 = popts->rcw_2;
+ ddr->ddr_sdram_rcw_3 = popts->rcw_3;
+ } else {
+ rc0a = ddr_freq > 3200 ? 0x7 :
+ (ddr_freq > 2933 ? 0x6 :
+ (ddr_freq > 2666 ? 0x5 :
+ (ddr_freq > 2400 ? 0x4 :
+ (ddr_freq > 2133 ? 0x3 :
+ (ddr_freq > 1866 ? 0x2 :
+ (ddr_freq > 1600 ? 1 : 0))))));
+ rc0f = ddr_freq > 3200 ? 0x3 :
+ (ddr_freq > 2400 ? 0x2 :
+ (ddr_freq > 2133 ? 0x1 : 0));
+ ddr->ddr_sdram_rcw_1 =
+ common_dimm->rcw[0] << 28 | \
+ common_dimm->rcw[1] << 24 | \
+ common_dimm->rcw[2] << 20 | \
+ common_dimm->rcw[3] << 16 | \
+ common_dimm->rcw[4] << 12 | \
+ common_dimm->rcw[5] << 8 | \
+ common_dimm->rcw[6] << 4 | \
+ common_dimm->rcw[7];
+ ddr->ddr_sdram_rcw_2 =
+ common_dimm->rcw[8] << 28 | \
+ common_dimm->rcw[9] << 24 | \
+ rc0a << 20 | \
+ common_dimm->rcw[11] << 16 | \
+ common_dimm->rcw[12] << 12 | \
+ common_dimm->rcw[13] << 8 | \
+ common_dimm->rcw[14] << 4 | \
+ rc0f;
+ ddr->ddr_sdram_rcw_3 =
+ ((ddr_freq - 1260 + 19) / 20) << 8;
+ }
+ debug("FSLDDR: ddr_sdram_rcw_1 = 0x%08x\n",
+ ddr->ddr_sdram_rcw_1);
+ debug("FSLDDR: ddr_sdram_rcw_2 = 0x%08x\n",
+ ddr->ddr_sdram_rcw_2);
+ debug("FSLDDR: ddr_sdram_rcw_3 = 0x%08x\n",
+ ddr->ddr_sdram_rcw_3);
+ }
+}
+
+/* DDR SDRAM control configuration (DDR_SDRAM_CFG) */
+static void set_ddr_sdram_cfg(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned int mem_en; /* DDR SDRAM interface logic enable */
+ unsigned int sren; /* Self refresh enable (during sleep) */
+ unsigned int ecc_en = 0; /* ECC enable. */
+ unsigned int rd_en; /* Registered DIMM enable */
+ unsigned int sdram_type; /* Type of SDRAM */
+ unsigned int dyn_pwr; /* Dynamic power management mode */
+ unsigned int dbw; /* DRAM dta bus width */
+ unsigned int eight_be = 0; /* 8-beat burst enable, DDR2 is zero */
+ unsigned int ncap = 0; /* Non-concurrent auto-precharge */
+ unsigned int threet_en; /* Enable 3T timing */
+ unsigned int twot_en; /* Enable 2T timing */
+ unsigned int ba_intlv_ctl; /* Bank (CS) interleaving control */
+ unsigned int x32_en = 0; /* x32 enable */
+ unsigned int pchb8 = 0; /* precharge bit 8 enable */
+ unsigned int hse; /* Global half strength override */
+ unsigned int acc_ecc_en = 0; /* Accumulated ECC enable */
+ unsigned int mem_halt = 0; /* memory controller halt */
+ unsigned int bi = 0; /* Bypass initialization */
+
+ mem_en = 1;
+ sren = popts->self_refresh_in_sleep;
+ if (common_dimm->all_dimms_ecc_capable)
+ ecc_en = 1;
+
+ if (common_dimm->all_dimms_registered &&
+ !common_dimm->all_dimms_unbuffered) {
+ rd_en = 1;
+ twot_en = 0;
+ } else {
+ rd_en = 0;
+ twot_en = popts->twot_en;
+ }
+
+ sdram_type = popts->ddrtype;
+
+ dyn_pwr = popts->dynamic_power;
+ dbw = popts->data_bus_width;
+ /* 8-beat burst enable DDR-III case
+ * we must clear it when use the on-the-fly mode,
+ * must set it when use the 32-bits bus mode.
+ */
+ if (is_ddr3_4(popts)) {
+ if (popts->burst_length == DDR_BL8)
+ eight_be = 1;
+ if (popts->burst_length == DDR_OTF)
+ eight_be = 0;
+ if (dbw == 0x1)
+ eight_be = 1;
+ }
+
+ threet_en = popts->threet_en;
+ ba_intlv_ctl = popts->ba_intlv_ctl;
+ hse = popts->half_strength_driver_enable;
+
+ /* set when ddr bus width < 64 */
+ acc_ecc_en = (dbw != 0 && ecc_en == 1) ? 1 : 0;
+
+ ddr->ddr_sdram_cfg = (0
+ | ((mem_en & 0x1) << 31)
+ | ((sren & 0x1) << 30)
+ | ((ecc_en & 0x1) << 29)
+ | ((rd_en & 0x1) << 28)
+ | ((sdram_type & 0x7) << 24)
+ | ((dyn_pwr & 0x1) << 21)
+ | ((dbw & 0x3) << 19)
+ | ((eight_be & 0x1) << 18)
+ | ((ncap & 0x1) << 17)
+ | ((threet_en & 0x1) << 16)
+ | ((twot_en & 0x1) << 15)
+ | ((ba_intlv_ctl & 0x7F) << 8)
+ | ((x32_en & 0x1) << 5)
+ | ((pchb8 & 0x1) << 4)
+ | ((hse & 0x1) << 3)
+ | ((acc_ecc_en & 0x1) << 2)
+ | ((mem_halt & 0x1) << 1)
+ | ((bi & 0x1) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_cfg = 0x%08x\n", ddr->ddr_sdram_cfg);
+}
+
+/* DDR SDRAM control configuration 2 (DDR_SDRAM_CFG_2) */
+static void set_ddr_sdram_cfg_2(struct fsl_ddr_controller *c,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned int frc_sr = 0; /* Force self refresh */
+ unsigned int sr_ie = 0; /* Self-refresh interrupt enable */
+ unsigned int odt_cfg = 0; /* ODT configuration */
+ unsigned int num_pr; /* Number of posted refreshes */
+ unsigned int slow = 0; /* DDR will be run less than 1250 */
+ unsigned int x4_en = 0; /* x4 DRAM enable */
+ unsigned int obc_cfg; /* On-The-Fly Burst Chop Cfg */
+ unsigned int ap_en; /* Address Parity Enable */
+ unsigned int d_init; /* DRAM data initialization */
+ unsigned int rcw_en = 0; /* Register Control Word Enable */
+ unsigned int md_en = 0; /* Mirrored DIMM Enable */
+ unsigned int qd_en = 0; /* quad-rank DIMM Enable */
+ int i;
+ unsigned int dll_rst_dis; /* DLL reset disable */
+ unsigned int dqs_cfg; /* DQS configuration */
+
+ if (is_ddr4(popts)) {
+ dll_rst_dis = 0;
+ dqs_cfg = 0;
+ } else {
+ dqs_cfg = popts->dqs_config;
+ dll_rst_dis = 1;
+ }
+
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ if (popts->cs_local_opts[i].odt_rd_cfg
+ || popts->cs_local_opts[i].odt_wr_cfg) {
+ odt_cfg = SDRAM_CFG2_ODT_ONLY_READ;
+ break;
+ }
+ }
+ sr_ie = popts->self_refresh_interrupt_en;
+ num_pr = popts->package_3ds + 1;
+
+ /*
+ * 8572 manual says
+ * {TIMING_CFG_1[PRETOACT]
+ * + [DDR_SDRAM_CFG_2[NUM_PR]
+ * * ({EXT_REFREC || REFREC} + 8 + 2)]}
+ * << DDR_SDRAM_INTERVAL[REFINT]
+ */
+ if (is_ddr3_4(popts))
+ obc_cfg = popts->otf_burst_chop_en;
+ else
+ obc_cfg = 0;
+
+ slow = c->ddr_freq < 1249000000;
+
+ if (popts->registered_dimm_en)
+ rcw_en = 1;
+
+ /* DDR4 can have address parity for UDIMM and discrete */
+ if (!is_ddr4(popts) && !popts->registered_dimm_en) {
+ ap_en = 0;
+ } else {
+ ap_en = popts->ap_en;
+ }
+
+ x4_en = popts->x4_en ? 1 : 0;
+
+ /* Use the DDR controller to auto initialize memory. */
+ d_init = common_dimm->all_dimms_ecc_capable ? 1 : 0;;
+ ddr->ddr_data_init = 0xdeadbeef;
+
+ if (is_ddr3_4(popts))
+ md_en = popts->mirrored_dimm;
+
+ qd_en = popts->quad_rank_present ? 1 : 0;
+ ddr->ddr_sdram_cfg_2 = (0
+ | ((frc_sr & 0x1) << 31)
+ | ((sr_ie & 0x1) << 30)
+ | ((dll_rst_dis & 0x1) << 29)
+ | ((dqs_cfg & 0x3) << 26)
+ | ((odt_cfg & 0x3) << 21)
+ | ((num_pr & 0xf) << 12)
+ | ((slow & 1) << 11)
+ | (x4_en << 10)
+ | (qd_en << 9)
+ | (unq_mrs_en << 8)
+ | ((obc_cfg & 0x1) << 6)
+ | ((ap_en & 0x1) << 5)
+ | ((d_init & 0x1) << 4)
+ | ((rcw_en & 0x1) << 2)
+ | ((md_en & 0x1) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_cfg_2 = 0x%08x\n", ddr->ddr_sdram_cfg_2);
+}
+
+/* DDR SDRAM Mode configuration 2 (DDR_SDRAM_MODE_2) */
+static void set_ddr4_sdram_mode_2(struct fsl_ddr_controller *c,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned short esdmode2 = 0; /* Extended SDRAM mode 2 */
+ unsigned short esdmode3 = 0; /* Extended SDRAM mode 3 */
+ int i;
+ unsigned int wr_crc = 0; /* Disable */
+ unsigned int rtt_wr = 0; /* Rtt_WR - dynamic ODT off */
+ unsigned int srt = 0; /* self-refresh temerature, normal range */
+ unsigned int cwl = compute_cas_write_latency_ddr4(c) - 9;
+ unsigned int mpr = 0; /* serial */
+ unsigned int wc_lat;
+ const unsigned int mclk_ps = get_memory_clk_period_ps(c);
+
+ if (popts->rtt_override)
+ rtt_wr = popts->rtt_wr_override_value;
+ else
+ rtt_wr = popts->cs_local_opts[0].odt_rtt_wr;
+
+ if (common_dimm->extended_op_srt)
+ srt = common_dimm->extended_op_srt;
+
+ esdmode2 = (0
+ | ((wr_crc & 0x1) << 12)
+ | ((rtt_wr & 0x3) << 9)
+ | ((srt & 0x3) << 6)
+ | ((cwl & 0x7) << 3));
+
+ if (mclk_ps >= 1250)
+ wc_lat = 0;
+ else if (mclk_ps >= 833)
+ wc_lat = 1;
+ else
+ wc_lat = 2;
+
+ esdmode3 = (0
+ | ((mpr & 0x3) << 11)
+ | ((wc_lat & 0x3) << 9));
+
+ ddr->ddr_sdram_mode_2 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_mode_2 = 0x%08x\n", ddr->ddr_sdram_mode_2);
+
+ if (unq_mrs_en) { /* unique mode registers are supported */
+ for (i = 1; i < c->chip_selects_per_ctrl; i++) {
+ if (popts->rtt_override)
+ rtt_wr = popts->rtt_wr_override_value;
+ else
+ rtt_wr = popts->cs_local_opts[i].odt_rtt_wr;
+
+ esdmode2 &= 0xF9FF; /* clear bit 10, 9 */
+ esdmode2 |= (rtt_wr & 0x3) << 9;
+ switch (i) {
+ case 1:
+ ddr->ddr_sdram_mode_4 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ break;
+ case 2:
+ ddr->ddr_sdram_mode_6 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ break;
+ case 3:
+ ddr->ddr_sdram_mode_8 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ break;
+ }
+ }
+ debug("FSLDDR: ddr_sdram_mode_4 = 0x%08x\n",
+ ddr->ddr_sdram_mode_4);
+ debug("FSLDDR: ddr_sdram_mode_6 = 0x%08x\n",
+ ddr->ddr_sdram_mode_6);
+ debug("FSLDDR: ddr_sdram_mode_8 = 0x%08x\n",
+ ddr->ddr_sdram_mode_8);
+ }
+}
+
+/* DDR SDRAM Mode configuration 2 (DDR_SDRAM_MODE_2) */
+static void set_ddr3_sdram_mode_2(struct fsl_ddr_controller *c,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned short esdmode2 = 0; /* Extended SDRAM mode 2 */
+ unsigned short esdmode3 = 0; /* Extended SDRAM mode 3 */
+ int i;
+ unsigned int rtt_wr = 0; /* Rtt_WR - dynamic ODT off */
+ unsigned int srt = 0; /* self-refresh temerature, normal range */
+ unsigned int asr = 0; /* auto self-refresh disable */
+ unsigned int cwl = compute_cas_write_latency_ddr3(c) - 5;
+ unsigned int pasr = 0; /* partial array self refresh disable */
+
+ if (popts->rtt_override)
+ rtt_wr = popts->rtt_wr_override_value;
+ else
+ rtt_wr = popts->cs_local_opts[0].odt_rtt_wr;
+
+ if (common_dimm->extended_op_srt)
+ srt = common_dimm->extended_op_srt;
+
+ esdmode2 = (0
+ | ((rtt_wr & 0x3) << 9)
+ | ((srt & 0x1) << 7)
+ | ((asr & 0x1) << 6)
+ | ((cwl & 0x7) << 3)
+ | ((pasr & 0x7) << 0));
+ ddr->ddr_sdram_mode_2 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_mode_2 = 0x%08x\n", ddr->ddr_sdram_mode_2);
+
+ if (unq_mrs_en) { /* unique mode registers are supported */
+ for (i = 1; i < c->chip_selects_per_ctrl; i++) {
+ if (popts->rtt_override)
+ rtt_wr = popts->rtt_wr_override_value;
+ else
+ rtt_wr = popts->cs_local_opts[i].odt_rtt_wr;
+
+ esdmode2 &= 0xF9FF; /* clear bit 10, 9 */
+ esdmode2 |= (rtt_wr & 0x3) << 9;
+ switch (i) {
+ case 1:
+ ddr->ddr_sdram_mode_4 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ break;
+ case 2:
+ ddr->ddr_sdram_mode_6 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ break;
+ case 3:
+ ddr->ddr_sdram_mode_8 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ break;
+ }
+ }
+ debug("FSLDDR: ddr_sdram_mode_4 = 0x%08x\n",
+ ddr->ddr_sdram_mode_4);
+ debug("FSLDDR: ddr_sdram_mode_6 = 0x%08x\n",
+ ddr->ddr_sdram_mode_6);
+ debug("FSLDDR: ddr_sdram_mode_8 = 0x%08x\n",
+ ddr->ddr_sdram_mode_8);
+ }
+}
+
+/* DDR SDRAM Mode configuration 2 (DDR_SDRAM_MODE_2) */
+static void set_ddr1_2_sdram_mode_2(struct fsl_ddr_controller *c,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ unsigned short esdmode2 = 0; /* Extended SDRAM mode 2 */
+ unsigned short esdmode3 = 0; /* Extended SDRAM mode 3 */
+
+ ddr->ddr_sdram_mode_2 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_mode_2 = 0x%08x\n", ddr->ddr_sdram_mode_2);
+}
+
+/* DDR SDRAM Mode configuration 9 (DDR_SDRAM_MODE_9) */
+static void set_ddr_sdram_mode_9(struct fsl_ddr_controller *c,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ int i;
+ unsigned short esdmode4 = 0; /* Extended SDRAM mode 4 */
+ unsigned short esdmode5; /* Extended SDRAM mode 5 */
+ int rtt_park = 0;
+ bool four_cs = false;
+ const unsigned int mclk_ps = get_memory_clk_period_ps(0);
+
+ if ((ddr->cs[0].config & SDRAM_CS_CONFIG_EN) &&
+ (ddr->cs[1].config & SDRAM_CS_CONFIG_EN) &&
+ (ddr->cs[2].config & SDRAM_CS_CONFIG_EN) &&
+ (ddr->cs[3].config & SDRAM_CS_CONFIG_EN))
+ four_cs = true;
+
+ if (ddr->cs[0].config & SDRAM_CS_CONFIG_EN) {
+ esdmode5 = 0x00000500; /* Data mask enable, RTT_PARK CS0 */
+ rtt_park = four_cs ? 0 : 1;
+ } else {
+ esdmode5 = 0x00000400; /* Data mask enabled */
+ }
+
+ /*
+ * For DDR3, set C/A latency if address parity is enabled.
+ * For DDR4, set C/A latency for UDIMM only. For RDIMM the delay is
+ * handled by register chip and RCW settings.
+ */
+ if ((ddr->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN) &&
+ (!is_ddr4(popts) || !popts->registered_dimm_en)) {
+ if (mclk_ps >= 935) {
+ /* for DDR4-1600/1866/2133 */
+ esdmode5 |= DDR_MR5_CA_PARITY_LAT_4_CLK;
+ } else if (mclk_ps >= 833) {
+ /* for DDR4-2400 */
+ esdmode5 |= DDR_MR5_CA_PARITY_LAT_5_CLK;
+ } else {
+ printf("parity: mclk_ps = %d not supported\n", mclk_ps);
+ }
+ }
+
+ ddr->ddr_sdram_mode_9 = (0
+ | ((esdmode4 & 0xffff) << 16)
+ | ((esdmode5 & 0xffff) << 0)
+ );
+
+ /* Normally only the first enabled CS use 0x500, others use 0x400
+ * But when four chip-selects are all enabled, all mode registers
+ * need 0x500 to park.
+ */
+
+ debug("FSLDDR: ddr_sdram_mode_9 = 0x%08x\n", ddr->ddr_sdram_mode_9);
+ if (unq_mrs_en) { /* unique mode registers are supported */
+ for (i = 1; i < c->chip_selects_per_ctrl; i++) {
+ if (!rtt_park &&
+ (ddr->cs[i].config & SDRAM_CS_CONFIG_EN)) {
+ esdmode5 |= 0x00000500; /* RTT_PARK */
+ rtt_park = four_cs ? 0 : 1;
+ } else {
+ esdmode5 = 0x00000400;
+ }
+
+ if ((ddr->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN) &&
+ (!is_ddr4(popts) || !popts->registered_dimm_en)) {
+ if (mclk_ps >= 935) {
+ /* for DDR4-1600/1866/2133 */
+ esdmode5 |= DDR_MR5_CA_PARITY_LAT_4_CLK;
+ } else if (mclk_ps >= 833) {
+ /* for DDR4-2400 */
+ esdmode5 |= DDR_MR5_CA_PARITY_LAT_5_CLK;
+ } else {
+ printf("parity: mclk_ps = %d not supported\n",
+ mclk_ps);
+ }
+ }
+
+ switch (i) {
+ case 1:
+ ddr->ddr_sdram_mode_11 = (0
+ | ((esdmode4 & 0xFFFF) << 16)
+ | ((esdmode5 & 0xFFFF) << 0)
+ );
+ break;
+ case 2:
+ ddr->ddr_sdram_mode_13 = (0
+ | ((esdmode4 & 0xFFFF) << 16)
+ | ((esdmode5 & 0xFFFF) << 0)
+ );
+ break;
+ case 3:
+ ddr->ddr_sdram_mode_15 = (0
+ | ((esdmode4 & 0xFFFF) << 16)
+ | ((esdmode5 & 0xFFFF) << 0)
+ );
+ break;
+ }
+ }
+ debug("FSLDDR: ddr_sdram_mode_11 = 0x%08x\n",
+ ddr->ddr_sdram_mode_11);
+ debug("FSLDDR: ddr_sdram_mode_13 = 0x%08x\n",
+ ddr->ddr_sdram_mode_13);
+ debug("FSLDDR: ddr_sdram_mode_15 = 0x%08x\n",
+ ddr->ddr_sdram_mode_15);
+ }
+}
+
+/* DDR SDRAM Mode configuration 10 (DDR_SDRAM_MODE_10) */
+static void set_ddr_sdram_mode_10(struct fsl_ddr_controller *c,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ int i;
+ unsigned short esdmode6 = 0; /* Extended SDRAM mode 6 */
+ unsigned short esdmode7 = 0; /* Extended SDRAM mode 7 */
+ unsigned int tccdl_min = picos_to_mclk(c, common_dimm->tccdl_ps);
+
+ esdmode6 = ((tccdl_min - 4) & 0x7) << 10;
+
+ if (popts->ddr_cdr2 & DDR_CDR2_VREF_RANGE_2)
+ esdmode6 |= 1 << 6; /* Range 2 */
+
+ ddr->ddr_sdram_mode_10 = (0
+ | ((esdmode6 & 0xffff) << 16)
+ | ((esdmode7 & 0xffff) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_mode_10 = 0x%08x\n", ddr->ddr_sdram_mode_10);
+ if (unq_mrs_en) { /* unique mode registers are supported */
+ for (i = 1; i < c->chip_selects_per_ctrl; i++) {
+ switch (i) {
+ case 1:
+ ddr->ddr_sdram_mode_12 = (0
+ | ((esdmode6 & 0xFFFF) << 16)
+ | ((esdmode7 & 0xFFFF) << 0)
+ );
+ break;
+ case 2:
+ ddr->ddr_sdram_mode_14 = (0
+ | ((esdmode6 & 0xFFFF) << 16)
+ | ((esdmode7 & 0xFFFF) << 0)
+ );
+ break;
+ case 3:
+ ddr->ddr_sdram_mode_16 = (0
+ | ((esdmode6 & 0xFFFF) << 16)
+ | ((esdmode7 & 0xFFFF) << 0)
+ );
+ break;
+ }
+ }
+ debug("FSLDDR: ddr_sdram_mode_12 = 0x%08x\n",
+ ddr->ddr_sdram_mode_12);
+ debug("FSLDDR: ddr_sdram_mode_14 = 0x%08x\n",
+ ddr->ddr_sdram_mode_14);
+ debug("FSLDDR: ddr_sdram_mode_16 = 0x%08x\n",
+ ddr->ddr_sdram_mode_16);
+ }
+}
+
+/* DDR SDRAM Interval Configuration (DDR_SDRAM_INTERVAL) */
+static void set_ddr_sdram_interval(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned int refint; /* Refresh interval */
+ unsigned int bstopre; /* Precharge interval */
+
+ refint = picos_to_mclk(c, common_dimm->refresh_rate_ps);
+
+ bstopre = popts->bstopre;
+
+ /* refint field used 0x3FFF in earlier controllers */
+ ddr->ddr_sdram_interval = (0
+ | ((refint & 0xFFFF) << 16)
+ | ((bstopre & 0x3FFF) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_interval = 0x%08x\n", ddr->ddr_sdram_interval);
+}
+
+/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */
+static void set_ddr_sdram_mode_ddr4(struct fsl_ddr_controller *c,
+ unsigned int cas_latency,
+ unsigned int additive_latency,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ int i;
+ unsigned short esdmode; /* Extended SDRAM mode */
+ unsigned short sdmode; /* SDRAM mode */
+
+ /* Mode Register - MR1 */
+ unsigned int qoff = 0; /* Output buffer enable 0=yes, 1=no */
+ unsigned int tdqs_en = 0; /* TDQS Enable: 0=no, 1=yes */
+ unsigned int rtt;
+ unsigned int wrlvl_en = 0; /* Write level enable: 0=no, 1=yes */
+ unsigned int al = 0; /* Posted CAS# additive latency (AL) */
+ unsigned int dic = 0; /* Output driver impedance, 40ohm */
+ unsigned int dll_en = 1; /* DLL Enable 1=Enable (Normal),
+ 0=Disable (Test/Debug) */
+
+ /* Mode Register - MR0 */
+ unsigned int wr = 0; /* Write Recovery */
+ unsigned int dll_rst; /* DLL Reset */
+ unsigned int mode; /* Normal=0 or Test=1 */
+ unsigned int caslat = 4;/* CAS# latency, default set as 6 cycles */
+ /* BT: Burst Type (0=Nibble Sequential, 1=Interleaved) */
+ unsigned int bt;
+ unsigned int bl; /* BL: Burst Length */
+
+ unsigned int wr_mclk;
+ /* DDR4 support WR 10, 12, 14, 16, 18, 20, 24 */
+ static const u8 wr_table[] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 6};
+ /* DDR4 support CAS 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24 */
+ static const u8 cas_latency_table[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11};
+
+ if (popts->rtt_override)
+ rtt = popts->rtt_override_value;
+ else
+ rtt = popts->cs_local_opts[0].odt_rtt_norm;
+
+ if (additive_latency == (cas_latency - 1))
+ al = 1;
+ if (additive_latency == (cas_latency - 2))
+ al = 2;
+
+ if (popts->quad_rank_present)
+ dic = 1; /* output driver impedance 240/7 ohm */
+
+ /*
+ * The esdmode value will also be used for writing
+ * MR1 during write leveling for DDR3, although the
+ * bits specifically related to the write leveling
+ * scheme will be handled automatically by the DDR
+ * controller. so we set the wrlvl_en = 0 here.
+ */
+ esdmode = (0
+ | ((qoff & 0x1) << 12)
+ | ((tdqs_en & 0x1) << 11)
+ | ((rtt & 0x7) << 8)
+ | ((wrlvl_en & 0x1) << 7)
+ | ((al & 0x3) << 3)
+ | ((dic & 0x3) << 1) /* DIC field is split */
+ | ((dll_en & 0x1) << 0)
+ );
+
+ /*
+ * DLL control for precharge PD
+ * 0=slow exit DLL off (tXPDLL)
+ * 1=fast exit DLL on (tXP)
+ */
+
+ wr_mclk = picos_to_mclk(c, common_dimm->twr_ps);
+ if (wr_mclk <= 24) {
+ wr = wr_table[wr_mclk - 10];
+ } else {
+ printf("Error: unsupported write recovery for mode register wr_mclk = %d\n",
+ wr_mclk);
+ }
+
+ dll_rst = 0; /* dll no reset */
+ mode = 0; /* normal mode */
+
+ /* look up table to get the cas latency bits */
+ if (cas_latency >= 9 && cas_latency <= 24)
+ caslat = cas_latency_table[cas_latency - 9];
+ else
+ printf("Error: unsupported cas latency for mode register\n");
+
+ bt = 0; /* Nibble sequential */
+
+ switch (popts->burst_length) {
+ case DDR_BL8:
+ bl = 0;
+ break;
+ case DDR_OTF:
+ bl = 1;
+ break;
+ case DDR_BC4:
+ bl = 2;
+ break;
+ default:
+ printf("Error: invalid burst length of %u specified. ",
+ popts->burst_length);
+ printf("Defaulting to on-the-fly BC4 or BL8 beats.\n");
+ bl = 1;
+ break;
+ }
+
+ sdmode = (0
+ | ((wr & 0x7) << 9)
+ | ((dll_rst & 0x1) << 8)
+ | ((mode & 0x1) << 7)
+ | (((caslat >> 1) & 0x7) << 4)
+ | ((bt & 0x1) << 3)
+ | ((caslat & 1) << 2)
+ | ((bl & 0x3) << 0)
+ );
+
+ ddr->ddr_sdram_mode = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+
+ debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode);
+
+ if (unq_mrs_en) { /* unique mode registers are supported */
+ for (i = 1; i < c->chip_selects_per_ctrl; i++) {
+ if (popts->rtt_override)
+ rtt = popts->rtt_override_value;
+ else
+ rtt = popts->cs_local_opts[i].odt_rtt_norm;
+
+ esdmode &= 0xF8FF; /* clear bit 10,9,8 for rtt */
+ esdmode |= (rtt & 0x7) << 8;
+ switch (i) {
+ case 1:
+ ddr->ddr_sdram_mode_3 = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+ break;
+ case 2:
+ ddr->ddr_sdram_mode_5 = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+ break;
+ case 3:
+ ddr->ddr_sdram_mode_7 = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+ break;
+ }
+ }
+ debug("FSLDDR: ddr_sdram_mode_3 = 0x%08x\n",
+ ddr->ddr_sdram_mode_3);
+ debug("FSLDDR: ddr_sdram_mode_5 = 0x%08x\n",
+ ddr->ddr_sdram_mode_5);
+ debug("FSLDDR: ddr_sdram_mode_5 = 0x%08x\n",
+ ddr->ddr_sdram_mode_5);
+ }
+}
+
+/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */
+static void set_ddr_sdram_mode_ddr3(struct fsl_ddr_controller *c,
+ unsigned int cas_latency,
+ unsigned int additive_latency,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ int i;
+ unsigned short esdmode; /* Extended SDRAM mode */
+ unsigned short sdmode; /* SDRAM mode */
+
+ /* Mode Register - MR1 */
+ unsigned int qoff = 0; /* Output buffer enable 0=yes, 1=no */
+ unsigned int tdqs_en = 0; /* TDQS Enable: 0=no, 1=yes */
+ unsigned int rtt;
+ unsigned int wrlvl_en = 0; /* Write level enable: 0=no, 1=yes */
+ unsigned int al = 0; /* Posted CAS# additive latency (AL) */
+ unsigned int dic = 0; /* Output driver impedance, 40ohm */
+ unsigned int dll_en = 0; /* DLL Enable 0=Enable (Normal),
+ 1=Disable (Test/Debug) */
+
+ /* Mode Register - MR0 */
+ unsigned int dll_on; /* DLL control for precharge PD, 0=off, 1=on */
+ unsigned int wr = 0; /* Write Recovery */
+ unsigned int dll_rst; /* DLL Reset */
+ unsigned int mode; /* Normal=0 or Test=1 */
+ unsigned int caslat = 4;/* CAS# latency, default set as 6 cycles */
+ /* BT: Burst Type (0=Nibble Sequential, 1=Interleaved) */
+ unsigned int bt;
+ unsigned int bl; /* BL: Burst Length */
+
+ unsigned int wr_mclk;
+ /*
+ * DDR_SDRAM_MODE doesn't support 9,11,13,15
+ * Please refer JEDEC Standard No. 79-3E for Mode Register MR0
+ * for this table
+ */
+ static const u8 wr_table[] = {1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
+
+ if (popts->rtt_override)
+ rtt = popts->rtt_override_value;
+ else
+ rtt = popts->cs_local_opts[0].odt_rtt_norm;
+
+ if (additive_latency == (cas_latency - 1))
+ al = 1;
+ if (additive_latency == (cas_latency - 2))
+ al = 2;
+
+ if (popts->quad_rank_present)
+ dic = 1; /* output driver impedance 240/7 ohm */
+
+ /*
+ * The esdmode value will also be used for writing
+ * MR1 during write leveling for DDR3, although the
+ * bits specifically related to the write leveling
+ * scheme will be handled automatically by the DDR
+ * controller. so we set the wrlvl_en = 0 here.
+ */
+ esdmode = (0
+ | ((qoff & 0x1) << 12)
+ | ((tdqs_en & 0x1) << 11)
+ | ((rtt & 0x4) << 7) /* rtt field is split */
+ | ((wrlvl_en & 0x1) << 7)
+ | ((rtt & 0x2) << 5) /* rtt field is split */
+ | ((dic & 0x2) << 4) /* DIC field is split */
+ | ((al & 0x3) << 3)
+ | ((rtt & 0x1) << 2) /* rtt field is split */
+ | ((dic & 0x1) << 1) /* DIC field is split */
+ | ((dll_en & 0x1) << 0)
+ );
+
+ /*
+ * DLL control for precharge PD
+ * 0=slow exit DLL off (tXPDLL)
+ * 1=fast exit DLL on (tXP)
+ */
+ dll_on = 1;
+
+ wr_mclk = picos_to_mclk(c, common_dimm->twr_ps);
+ if (wr_mclk <= 16) {
+ wr = wr_table[wr_mclk - 5];
+ } else {
+ printf("Error: unsupported write recovery for mode register "
+ "wr_mclk = %d\n", wr_mclk);
+ }
+
+ dll_rst = 0; /* dll no reset */
+ mode = 0; /* normal mode */
+
+ /* look up table to get the cas latency bits */
+ if (cas_latency >= 5 && cas_latency <= 16) {
+ unsigned char cas_latency_table[] = {
+ 0x2, /* 5 clocks */
+ 0x4, /* 6 clocks */
+ 0x6, /* 7 clocks */
+ 0x8, /* 8 clocks */
+ 0xa, /* 9 clocks */
+ 0xc, /* 10 clocks */
+ 0xe, /* 11 clocks */
+ 0x1, /* 12 clocks */
+ 0x3, /* 13 clocks */
+ 0x5, /* 14 clocks */
+ 0x7, /* 15 clocks */
+ 0x9, /* 16 clocks */
+ };
+ caslat = cas_latency_table[cas_latency - 5];
+ } else {
+ printf("Error: unsupported cas latency for mode register\n");
+ }
+
+ bt = 0; /* Nibble sequential */
+
+ switch (popts->burst_length) {
+ case DDR_BL8:
+ bl = 0;
+ break;
+ case DDR_OTF:
+ bl = 1;
+ break;
+ case DDR_BC4:
+ bl = 2;
+ break;
+ default:
+ printf("Error: invalid burst length of %u specified. "
+ " Defaulting to on-the-fly BC4 or BL8 beats.\n",
+ popts->burst_length);
+ bl = 1;
+ break;
+ }
+
+ sdmode = (0
+ | ((dll_on & 0x1) << 12)
+ | ((wr & 0x7) << 9)
+ | ((dll_rst & 0x1) << 8)
+ | ((mode & 0x1) << 7)
+ | (((caslat >> 1) & 0x7) << 4)
+ | ((bt & 0x1) << 3)
+ | ((caslat & 1) << 2)
+ | ((bl & 0x3) << 0)
+ );
+
+ ddr->ddr_sdram_mode = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+
+ debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode);
+
+ if (unq_mrs_en) { /* unique mode registers are supported */
+ for (i = 1; i < c->chip_selects_per_ctrl; i++) {
+ if (popts->rtt_override)
+ rtt = popts->rtt_override_value;
+ else
+ rtt = popts->cs_local_opts[i].odt_rtt_norm;
+
+ esdmode &= 0xFDBB; /* clear bit 9,6,2 */
+ esdmode |= (0
+ | ((rtt & 0x4) << 7) /* rtt field is split */
+ | ((rtt & 0x2) << 5) /* rtt field is split */
+ | ((rtt & 0x1) << 2) /* rtt field is split */
+ );
+ switch (i) {
+ case 1:
+ ddr->ddr_sdram_mode_3 = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+ break;
+ case 2:
+ ddr->ddr_sdram_mode_5 = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+ break;
+ case 3:
+ ddr->ddr_sdram_mode_7 = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+ break;
+ }
+ }
+ debug("FSLDDR: ddr_sdram_mode_3 = 0x%08x\n",
+ ddr->ddr_sdram_mode_3);
+ debug("FSLDDR: ddr_sdram_mode_5 = 0x%08x\n",
+ ddr->ddr_sdram_mode_5);
+ debug("FSLDDR: ddr_sdram_mode_5 = 0x%08x\n",
+ ddr->ddr_sdram_mode_5);
+ }
+}
+
+static void set_ddr_sdram_mode_ddr12(struct fsl_ddr_controller *c,
+ unsigned int cas_latency,
+ unsigned int additive_latency,
+ const unsigned int unq_mrs_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned short esdmode; /* Extended SDRAM mode */
+ unsigned short sdmode; /* SDRAM mode */
+
+ /*
+ * FIXME: This ought to be pre-calculated in a
+ * technology-specific routine,
+ * e.g. compute_DDR2_mode_register(), and then the
+ * sdmode and esdmode passed in as part of common_dimm.
+ */
+
+ /* Extended Mode Register */
+ unsigned int mrs = 0; /* Mode Register Set */
+ unsigned int outputs = 0; /* 0=Enabled, 1=Disabled */
+ unsigned int rdqs_en = 0; /* RDQS Enable: 0=no, 1=yes */
+ unsigned int dqs_en = 0; /* DQS# Enable: 0=enable, 1=disable */
+ unsigned int ocd = 0; /* 0x0=OCD not supported,
+ 0x7=OCD default state */
+ unsigned int rtt;
+ unsigned int al; /* Posted CAS# additive latency (AL) */
+ unsigned int ods = 0; /* Output Drive Strength:
+ 0 = Full strength (18ohm)
+ 1 = Reduced strength (4ohm) */
+ unsigned int dll_en = 0; /* DLL Enable 0=Enable (Normal),
+ 1=Disable (Test/Debug) */
+
+ /* Mode Register (MR) */
+ unsigned int mr; /* Mode Register Definition */
+ unsigned int pd; /* Power-Down Mode */
+ unsigned int wr; /* Write Recovery */
+ unsigned int dll_res; /* DLL Reset */
+ unsigned int mode; /* Normal=0 or Test=1 */
+ unsigned int caslat = 0;/* CAS# latency */
+ /* BT: Burst Type (0=Sequential, 1=Interleaved) */
+ unsigned int bt;
+ unsigned int bl; /* BL: Burst Length */
+
+ dqs_en = !popts->dqs_config;
+ rtt = fsl_ddr_get_rtt(popts);
+
+ al = additive_latency;
+
+ esdmode = (0
+ | ((mrs & 0x3) << 14)
+ | ((outputs & 0x1) << 12)
+ | ((rdqs_en & 0x1) << 11)
+ | ((dqs_en & 0x1) << 10)
+ | ((ocd & 0x7) << 7)
+ | ((rtt & 0x2) << 5) /* rtt field is split */
+ | ((al & 0x7) << 3)
+ | ((rtt & 0x1) << 2) /* rtt field is split */
+ | ((ods & 0x1) << 1)
+ | ((dll_en & 0x1) << 0)
+ );
+
+ mr = 0; /* FIXME: CHECKME */
+
+ /*
+ * 0 = Fast Exit (Normal)
+ * 1 = Slow Exit (Low Power)
+ */
+ pd = 0;
+
+ if (is_ddr1(popts))
+ wr = 0; /* Historical */
+ else
+ wr = picos_to_mclk(c, common_dimm->twr_ps);
+
+ dll_res = 0;
+ mode = 0;
+
+ if (is_ddr1(popts)) {
+ if (1 <= cas_latency && cas_latency <= 4) {
+ unsigned char mode_caslat_table[4] = {
+ 0x5, /* 1.5 clocks */
+ 0x2, /* 2.0 clocks */
+ 0x6, /* 2.5 clocks */
+ 0x3 /* 3.0 clocks */
+ };
+ caslat = mode_caslat_table[cas_latency - 1];
+ } else {
+ printf("Warning: unknown cas_latency %d\n", cas_latency);
+ }
+ } else if (is_ddr2(popts)) {
+ caslat = cas_latency;
+ }
+
+ bt = 0;
+
+ switch (popts->burst_length) {
+ case DDR_BL4:
+ bl = 2;
+ break;
+ case DDR_BL8:
+ bl = 3;
+ break;
+ default:
+ printf("Error: invalid burst length of %u specified. "
+ " Defaulting to 4 beats.\n",
+ popts->burst_length);
+ bl = 2;
+ break;
+ }
+
+ sdmode = (0
+ | ((mr & 0x3) << 14)
+ | ((pd & 0x1) << 12)
+ | ((wr & 0x7) << 9)
+ | ((dll_res & 0x1) << 8)
+ | ((mode & 0x1) << 7)
+ | ((caslat & 0x7) << 4)
+ | ((bt & 0x1) << 3)
+ | ((bl & 0x7) << 0)
+ );
+
+ ddr->ddr_sdram_mode = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode);
+}
+
+/*
+ * DDR SDRAM Clock Control (DDR_SDRAM_CLK_CNTL)
+ * The old controller on the 8540/60 doesn't have this register.
+ * Hope it's OK to set it (to 0) anyway.
+ */
+static void set_ddr_sdram_clk_cntl(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ unsigned int clk_adjust; /* Clock adjust */
+ unsigned int ss_en = 0; /* Source synchronous enable */
+
+ if (fsl_ddr_get_version(c) >= 0x40701) {
+ /* clk_adjust in 5-bits on T-series and LS-series */
+ clk_adjust = (popts->clk_adjust & 0x1F) << 22;
+ } else {
+ /* clk_adjust in 4-bits on earlier MPC85xx and P-series */
+ clk_adjust = (popts->clk_adjust & 0xF) << 23;
+ }
+
+ ddr->ddr_sdram_clk_cntl = (0
+ | ((ss_en & 0x1) << 31)
+ | clk_adjust
+ );
+ debug("FSLDDR: clk_cntl = 0x%08x\n", ddr->ddr_sdram_clk_cntl);
+}
+
+/* DDR Initialization Address (DDR_INIT_ADDR) */
+static void set_ddr_init_addr(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ unsigned int init_addr = 0; /* Initialization address */
+
+ ddr->ddr_init_addr = init_addr;
+}
+
+/* DDR Initialization Address (DDR_INIT_EXT_ADDR) */
+static void set_ddr_init_ext_addr(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ unsigned int uia = 0; /* Use initialization address */
+ unsigned int init_ext_addr = 0; /* Initialization address */
+
+ ddr->ddr_init_ext_addr = (0
+ | ((uia & 0x1) << 31)
+ | (init_ext_addr & 0xF)
+ );
+}
+
+/* DDR SDRAM Timing Configuration 4 (TIMING_CFG_4) */
+static void set_timing_cfg_4(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ unsigned int rwt = 0; /* Read-to-write turnaround for same CS */
+ unsigned int wrt = 0; /* Write-to-read turnaround for same CS */
+ unsigned int rrt = 0; /* Read-to-read turnaround for same CS */
+ unsigned int wwt = 0; /* Write-to-write turnaround for same CS */
+ unsigned int trwt_mclk = 0; /* ext_rwt */
+ unsigned int dll_lock = 0; /* DDR SDRAM DLL Lock Time */
+
+ if (is_ddr3_4(popts)) {
+ if (popts->burst_length == DDR_BL8) {
+ /* We set BL/2 for fixed BL8 */
+ rrt = 0; /* BL/2 clocks */
+ wwt = 0; /* BL/2 clocks */
+ } else {
+ /* We need to set BL/2 + 2 to BC4 and OTF */
+ rrt = 2; /* BL/2 + 2 clocks */
+ wwt = 2; /* BL/2 + 2 clocks */
+ }
+ }
+
+ if (is_ddr4(popts))
+ dll_lock = 2; /* tDLLK = 1024 clocks */
+ else if (is_ddr3(popts))
+ dll_lock = 1; /* tDLLK = 512 clocks from spec */
+
+ if (popts->trwt_override)
+ trwt_mclk = popts->trwt;
+
+ ddr->timing_cfg_4 = (0
+ | ((rwt & 0xf) << 28)
+ | ((wrt & 0xf) << 24)
+ | ((rrt & 0xf) << 20)
+ | ((wwt & 0xf) << 16)
+ | ((trwt_mclk & 0xc) << 12)
+ | (dll_lock & 0x3)
+ );
+ debug("FSLDDR: timing_cfg_4 = 0x%08x\n", ddr->timing_cfg_4);
+}
+
+/* DDR SDRAM Timing Configuration 5 (TIMING_CFG_5) */
+static void set_timing_cfg_5(struct fsl_ddr_controller *c, unsigned int cas_latency)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ unsigned int rodt_on = 0; /* Read to ODT on */
+ unsigned int rodt_off = 0; /* Read to ODT off */
+ unsigned int wodt_on = 0; /* Write to ODT on */
+ unsigned int wodt_off = 0; /* Write to ODT off */
+
+ if (is_ddr3_4(popts)) {
+ unsigned int wr_lat = ((ddr->timing_cfg_2 & 0x00780000) >> 19) +
+ ((ddr->timing_cfg_2 & 0x00040000) >> 14);
+ /* rodt_on = timing_cfg_1[caslat] - timing_cfg_2[wrlat] + 1 */
+ if (cas_latency >= wr_lat)
+ rodt_on = cas_latency - wr_lat + 1;
+ rodt_off = 4; /* 4 clocks */
+ wodt_on = 1; /* 1 clocks */
+ wodt_off = 4; /* 4 clocks */
+ }
+
+ ddr->timing_cfg_5 = (0
+ | ((rodt_on & 0x1f) << 24)
+ | ((rodt_off & 0x7) << 20)
+ | ((wodt_on & 0x1f) << 12)
+ | ((wodt_off & 0x7) << 8)
+ );
+ debug("FSLDDR: timing_cfg_5 = 0x%08x\n", ddr->timing_cfg_5);
+}
+
+static void set_timing_cfg_6(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ unsigned int hs_caslat = 0;
+ unsigned int hs_wrlat = 0;
+ unsigned int hs_wrrec = 0;
+ unsigned int hs_clkadj = 0;
+ unsigned int hs_wrlvl_start = 0;
+
+ ddr->timing_cfg_6 = (0
+ | ((hs_caslat & 0x1f) << 24)
+ | ((hs_wrlat & 0x1f) << 19)
+ | ((hs_wrrec & 0x1f) << 12)
+ | ((hs_clkadj & 0x1f) << 6)
+ | ((hs_wrlvl_start & 0x1f) << 0)
+ );
+ debug("FSLDDR: timing_cfg_6 = 0x%08x\n", ddr->timing_cfg_6);
+}
+
+static void set_timing_cfg_7(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned int txpr, tcksre, tcksrx;
+ unsigned int cke_rst, cksre, cksrx, par_lat = 0, cs_to_cmd;
+ const unsigned int mclk_ps = get_memory_clk_period_ps(c);
+
+ txpr = max(5U, picos_to_mclk(c, common_dimm->trfc1_ps + 10000));
+ tcksre = max(5U, picos_to_mclk(c, 10000));
+ tcksrx = max(5U, picos_to_mclk(c, 10000));
+
+ if (ddr->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN && is_ddr4(popts)) {
+ /* for DDR4 only */
+ par_lat = (ddr->ddr_sdram_rcw_2 & 0xf) + 1;
+ debug("PAR_LAT = %u for mclk_ps = %d\n", par_lat, mclk_ps);
+ }
+
+ cs_to_cmd = 0;
+
+ if (txpr <= 200)
+ cke_rst = 0;
+ else if (txpr <= 256)
+ cke_rst = 1;
+ else if (txpr <= 512)
+ cke_rst = 2;
+ else
+ cke_rst = 3;
+
+ if (tcksre <= 19)
+ cksre = tcksre - 5;
+ else
+ cksre = 15;
+
+ if (tcksrx <= 19)
+ cksrx = tcksrx - 5;
+ else
+ cksrx = 15;
+
+ ddr->timing_cfg_7 = (0
+ | ((cke_rst & 0x3) << 28)
+ | ((cksre & 0xf) << 24)
+ | ((cksrx & 0xf) << 20)
+ | ((par_lat & 0xf) << 16)
+ | ((cs_to_cmd & 0xf) << 4)
+ );
+ debug("FSLDDR: timing_cfg_7 = 0x%08x\n", ddr->timing_cfg_7);
+}
+
+static void set_timing_cfg_8(struct fsl_ddr_controller *c, unsigned int cas_latency)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ int rwt_bg, wrt_bg, rrt_bg, wwt_bg;
+ unsigned int acttoact_bg, wrtord_bg, pre_all_rec;
+ int tccdl = picos_to_mclk(c, common_dimm->tccdl_ps);
+ int wr_lat = ((ddr->timing_cfg_2 & 0x00780000) >> 19) +
+ ((ddr->timing_cfg_2 & 0x00040000) >> 14);
+
+ rwt_bg = cas_latency + 2 + 4 - wr_lat;
+ if (rwt_bg < tccdl)
+ rwt_bg = tccdl - rwt_bg;
+ else
+ rwt_bg = 0;
+
+ wrt_bg = wr_lat + 4 + 1 - cas_latency;
+ if (wrt_bg < tccdl)
+ wrt_bg = tccdl - wrt_bg;
+ else
+ wrt_bg = 0;
+
+ if (popts->burst_length == DDR_BL8) {
+ rrt_bg = tccdl - 4;
+ wwt_bg = tccdl - 4;
+ } else {
+ rrt_bg = tccdl - 2;
+ wwt_bg = tccdl - 2;
+ }
+
+ acttoact_bg = picos_to_mclk(c, common_dimm->trrdl_ps);
+ wrtord_bg = max(4U, picos_to_mclk(c, 7500));
+ if (popts->otf_burst_chop_en)
+ wrtord_bg += 2;
+
+ pre_all_rec = 0;
+
+ ddr->timing_cfg_8 = (0
+ | ((rwt_bg & 0xf) << 28)
+ | ((wrt_bg & 0xf) << 24)
+ | ((rrt_bg & 0xf) << 20)
+ | ((wwt_bg & 0xf) << 16)
+ | ((acttoact_bg & 0xf) << 12)
+ | ((wrtord_bg & 0xf) << 8)
+ | ((pre_all_rec & 0x1f) << 0)
+ );
+
+ debug("FSLDDR: timing_cfg_8 = 0x%08x\n", ddr->timing_cfg_8);
+}
+
+static void set_timing_cfg_9(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ unsigned int refrec_cid_mclk = 0;
+ unsigned int acttoact_cid_mclk = 0;
+
+ if (popts->package_3ds) {
+ refrec_cid_mclk =
+ picos_to_mclk(c, common_dimm->trfc_slr_ps);
+ acttoact_cid_mclk = 4U; /* tRRDS_slr */
+ }
+
+ ddr->timing_cfg_9 = (refrec_cid_mclk & 0x3ff) << 16 |
+ (acttoact_cid_mclk & 0xf) << 8;
+
+ debug("FSLDDR: timing_cfg_9 = 0x%08x\n", ddr->timing_cfg_9);
+}
+
+/* This function needs to be called after set_ddr_sdram_cfg() is called */
+static void set_ddr_dq_mapping(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const struct dimm_params *dimm_params = c->dimm_params;
+ unsigned int acc_ecc_en = (ddr->ddr_sdram_cfg >> 2) & 0x1;
+ int i;
+
+ for (i = 0; i < c->dimm_slots_per_ctrl; i++) {
+ if (dimm_params[i].n_ranks)
+ break;
+ }
+ if (i >= c->dimm_slots_per_ctrl) {
+ printf("DDR error: no DIMM found!\n");
+ return;
+ }
+
+ ddr->dq_map_0 = ((dimm_params[i].dq_mapping[0] & 0x3F) << 26) |
+ ((dimm_params[i].dq_mapping[1] & 0x3F) << 20) |
+ ((dimm_params[i].dq_mapping[2] & 0x3F) << 14) |
+ ((dimm_params[i].dq_mapping[3] & 0x3F) << 8) |
+ ((dimm_params[i].dq_mapping[4] & 0x3F) << 2);
+
+ ddr->dq_map_1 = ((dimm_params[i].dq_mapping[5] & 0x3F) << 26) |
+ ((dimm_params[i].dq_mapping[6] & 0x3F) << 20) |
+ ((dimm_params[i].dq_mapping[7] & 0x3F) << 14) |
+ ((dimm_params[i].dq_mapping[10] & 0x3F) << 8) |
+ ((dimm_params[i].dq_mapping[11] & 0x3F) << 2);
+
+ ddr->dq_map_2 = ((dimm_params[i].dq_mapping[12] & 0x3F) << 26) |
+ ((dimm_params[i].dq_mapping[13] & 0x3F) << 20) |
+ ((dimm_params[i].dq_mapping[14] & 0x3F) << 14) |
+ ((dimm_params[i].dq_mapping[15] & 0x3F) << 8) |
+ ((dimm_params[i].dq_mapping[16] & 0x3F) << 2);
+
+ /* dq_map for ECC[4:7] is set to 0 if accumulated ECC is enabled */
+ ddr->dq_map_3 = ((dimm_params[i].dq_mapping[17] & 0x3F) << 26) |
+ ((dimm_params[i].dq_mapping[8] & 0x3F) << 20) |
+ (acc_ecc_en ? 0 :
+ (dimm_params[i].dq_mapping[9] & 0x3F) << 14) |
+ dimm_params[i].dq_mapping_ors;
+
+ debug("FSLDDR: dq_map_0 = 0x%08x\n", ddr->dq_map_0);
+ debug("FSLDDR: dq_map_1 = 0x%08x\n", ddr->dq_map_1);
+ debug("FSLDDR: dq_map_2 = 0x%08x\n", ddr->dq_map_2);
+ debug("FSLDDR: dq_map_3 = 0x%08x\n", ddr->dq_map_3);
+}
+static void set_ddr_sdram_cfg_3(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ int rd_pre;
+
+ rd_pre = popts->quad_rank_present ? 1 : 0;
+
+ ddr->ddr_sdram_cfg_3 = (rd_pre & 0x1) << 16;
+ /* Disable MRS on parity error for RDIMMs */
+ ddr->ddr_sdram_cfg_3 |= popts->registered_dimm_en ? 1 : 0;
+
+ if (popts->package_3ds) { /* only 2,4,8 are supported */
+ if ((popts->package_3ds + 1) & 0x1) {
+ printf("Error: Unsupported 3DS DIMM with %d die\n",
+ popts->package_3ds + 1);
+ } else {
+ ddr->ddr_sdram_cfg_3 |= ((popts->package_3ds + 1) >> 1)
+ << 4;
+ }
+ }
+
+ debug("FSLDDR: ddr_sdram_cfg_3 = 0x%08x\n", ddr->ddr_sdram_cfg_3);
+}
+
+/* DDR ZQ Calibration Control (DDR_ZQ_CNTL) */
+static void set_ddr_zq_cntl(struct fsl_ddr_controller *c,
+ unsigned int zq_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+ unsigned int zqinit; /* POR ZQ Calibration Time (tZQinit) */
+ unsigned int zqoper; /* Normal Operation Full Calibration Time (tZQoper) */
+ unsigned int zqcs; /* Normal Operation Short Calibration Time (tZQCS) */
+ unsigned int zqcs_init;
+
+ if (!zq_en) {
+ ddr->ddr_zq_cntl = 0;
+ goto out;
+ }
+
+ if (is_ddr4(popts)) {
+ zqinit = 10; /* 1024 clocks */
+ zqoper = 9; /* 512 clocks */
+ zqcs = 7; /* 128 clocks */
+ zqcs_init = 5; /* 1024 refresh sequences */
+ } else {
+ zqinit = 9; /* 512 clocks */
+ zqoper = 8; /* 256 clocks */
+ zqcs = 6; /* 64 clocks */
+ zqcs_init = 0;
+ }
+
+ ddr->ddr_zq_cntl = ((zq_en & 0x1) << 31)
+ | ((zqinit & 0xF) << 24)
+ | ((zqoper & 0xF) << 16)
+ | ((zqcs & 0xF) << 8)
+ | (zqcs_init & 0xF);
+
+out:
+ debug("FSLDDR: zq_cntl = 0x%08x\n", ddr->ddr_zq_cntl);
+}
+
+/* DDR Write Leveling Control (DDR_WRLVL_CNTL) */
+static void set_ddr_wrlvl_cntl(struct fsl_ddr_controller *c, unsigned int wrlvl_en)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+
+ /*
+ * First DQS pulse rising edge after margining mode
+ * is programmed (tWL_MRD)
+ */
+ unsigned int wrlvl_mrd = 0;
+ /* ODT delay after margining mode is programmed (tWL_ODTEN) */
+ unsigned int wrlvl_odten = 0;
+ /* DQS/DQS_ delay after margining mode is programmed (tWL_DQSEN) */
+ unsigned int wrlvl_dqsen = 0;
+ /* WRLVL_SMPL: Write leveling sample time */
+ unsigned int wrlvl_smpl = 0;
+ /* WRLVL_WLR: Write leveling repeition time */
+ unsigned int wrlvl_wlr = 0;
+ /* WRLVL_START: Write leveling start time */
+ unsigned int wrlvl_start = 0;
+
+ /* suggest enable write leveling for DDR3 due to fly-by topology */
+ if (wrlvl_en) {
+ /* tWL_MRD min = 40 nCK, we set it 64 */
+ wrlvl_mrd = 0x6;
+ /* tWL_ODTEN 128 */
+ wrlvl_odten = 0x7;
+ /* tWL_DQSEN min = 25 nCK, we set it 32 */
+ wrlvl_dqsen = 0x5;
+ /*
+ * Write leveling sample time at least need 6 clocks
+ * higher than tWLO to allow enough time for progagation
+ * delay and sampling the prime data bits.
+ */
+ wrlvl_smpl = 0xf;
+ /*
+ * Write leveling repetition time
+ * at least tWLO + 6 clocks clocks
+ * we set it 64
+ */
+ wrlvl_wlr = 0x6;
+ /*
+ * Write leveling start time
+ * The value use for the DQS_ADJUST for the first sample
+ * when write leveling is enabled. It probably needs to be
+ * overridden per platform.
+ */
+ wrlvl_start = 0x8;
+ /*
+ * Override the write leveling sample and start time
+ * according to specific board
+ */
+ if (popts->wrlvl_override) {
+ wrlvl_smpl = popts->wrlvl_sample;
+ wrlvl_start = popts->wrlvl_start;
+ }
+ }
+
+ ddr->ddr_wrlvl_cntl = (0
+ | ((wrlvl_en & 0x1) << 31)
+ | ((wrlvl_mrd & 0x7) << 24)
+ | ((wrlvl_odten & 0x7) << 20)
+ | ((wrlvl_dqsen & 0x7) << 16)
+ | ((wrlvl_smpl & 0xf) << 12)
+ | ((wrlvl_wlr & 0x7) << 8)
+ | ((wrlvl_start & 0x1F) << 0)
+ );
+ debug("FSLDDR: wrlvl_cntl = 0x%08x\n", ddr->ddr_wrlvl_cntl);
+ ddr->ddr_wrlvl_cntl_2 = popts->wrlvl_ctl_2;
+ debug("FSLDDR: wrlvl_cntl_2 = 0x%08x\n", ddr->ddr_wrlvl_cntl_2);
+ ddr->ddr_wrlvl_cntl_3 = popts->wrlvl_ctl_3;
+ debug("FSLDDR: wrlvl_cntl_3 = 0x%08x\n", ddr->ddr_wrlvl_cntl_3);
+
+}
+
+/* DDR Self Refresh Counter (DDR_SR_CNTR) */
+static void set_ddr_sr_cntr(struct fsl_ddr_controller *c, unsigned int sr_it)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+
+ /* Self Refresh Idle Threshold */
+ ddr->ddr_sr_cntr = (sr_it & 0xF) << 16;
+}
+
+static void set_ddr_eor(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+
+ if (popts->addr_hash) {
+ ddr->ddr_eor = 0x40000000; /* address hash enable */
+ printf("Address hashing enabled.\n");
+ }
+}
+
+static void set_ddr_cdr1(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+
+ ddr->ddr_cdr1 = popts->ddr_cdr1;
+ debug("FSLDDR: ddr_cdr1 = 0x%08x\n", ddr->ddr_cdr1);
+}
+
+static void set_ddr_cdr2(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const memctl_options_t *popts = &c->memctl_opts;
+
+ ddr->ddr_cdr2 = popts->ddr_cdr2;
+ debug("FSLDDR: ddr_cdr2 = 0x%08x\n", ddr->ddr_cdr2);
+}
+
+static unsigned int
+check_fsl_memctl_config_regs(struct fsl_ddr_controller *c)
+{
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ unsigned int res = 0;
+
+ /*
+ * Check that DDR_SDRAM_CFG[RD_EN] and DDR_SDRAM_CFG[2T_EN] are
+ * not set at the same time.
+ */
+ if (ddr->ddr_sdram_cfg & 0x10000000
+ && ddr->ddr_sdram_cfg & 0x00008000) {
+ printf("Error: DDR_SDRAM_CFG[RD_EN] and DDR_SDRAM_CFG[2T_EN] "
+ " should not be set at the same time.\n");
+ res++;
+ }
+
+ return res;
+}
+
+unsigned int
+compute_fsl_memctl_config_regs(struct fsl_ddr_controller *c)
+{
+ const memctl_options_t *popts = &c->memctl_opts;
+ fsl_ddr_cfg_regs_t *ddr = &c->fsl_ddr_config_reg;
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ const struct dimm_params *dimm_params = c->dimm_params;
+ unsigned int i;
+ unsigned int cas_latency;
+ unsigned int additive_latency;
+ unsigned int sr_it;
+ unsigned int wrlvl_en;
+ unsigned int ip_rev = 0;
+ unsigned int unq_mrs_en = 0;
+ int cs_en = 1;
+ unsigned int ddr_freq;
+ struct ccsr_ddr __iomem *ddrc = c->base;
+
+ memset(ddr, 0, sizeof(fsl_ddr_cfg_regs_t));
+
+ if (common_dimm == NULL) {
+ printf("Error: subset DIMM params struct null pointer\n");
+ return 1;
+ }
+
+ /*
+ * Process overrides first.
+ *
+ * FIXME: somehow add dereated caslat to this
+ */
+ cas_latency = (popts->cas_latency_override)
+ ? popts->cas_latency_override_value
+ : common_dimm->lowest_common_spd_caslat;
+
+ additive_latency = (popts->additive_latency_override)
+ ? popts->additive_latency_override_value
+ : common_dimm->additive_latency;
+
+ sr_it = (popts->auto_self_refresh_en)
+ ? popts->sr_it
+ : 0;
+ /* write leveling */
+ wrlvl_en = (popts->wrlvl_en) ? 1 : 0;
+
+ /* Chip Select Memory Bounds (CSn_BNDS) */
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ unsigned long long ea, sa;
+ unsigned int cs_per_dimm
+ = c->chip_selects_per_ctrl / c->dimm_slots_per_ctrl;
+ unsigned int dimm_number
+ = i / cs_per_dimm;
+ unsigned long long rank_density
+ = dimm_params[dimm_number].rank_density >> c->dbw_capacity_adjust;
+
+ if (dimm_params[dimm_number].n_ranks == 0) {
+ debug("Skipping setup of CS%u "
+ "because n_ranks on DIMM %u is 0\n", i, dimm_number);
+ continue;
+ }
+ if (popts->memctl_interleaving) {
+ switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
+ case FSL_DDR_CS0_CS1_CS2_CS3:
+ break;
+ case FSL_DDR_CS0_CS1:
+ case FSL_DDR_CS0_CS1_AND_CS2_CS3:
+ if (i > 1)
+ cs_en = 0;
+ break;
+ case FSL_DDR_CS2_CS3:
+ default:
+ if (i > 0)
+ cs_en = 0;
+ break;
+ }
+ sa = common_dimm->base_address;
+ ea = sa + common_dimm->total_mem - 1;
+ } else if (!popts->memctl_interleaving) {
+ /*
+ * If memory interleaving between controllers is NOT
+ * enabled, the starting address for each memory
+ * controller is distinct. However, because rank
+ * interleaving is enabled, the starting and ending
+ * addresses of the total memory on that memory
+ * controller needs to be programmed into its
+ * respective CS0_BNDS.
+ */
+ switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
+ case FSL_DDR_CS0_CS1_CS2_CS3:
+ sa = common_dimm->base_address;
+ ea = sa + common_dimm->total_mem - 1;
+ break;
+ case FSL_DDR_CS0_CS1_AND_CS2_CS3:
+ if ((i >= 2) && (dimm_number == 0)) {
+ sa = dimm_params[dimm_number].base_address +
+ 2 * rank_density;
+ ea = sa + 2 * rank_density - 1;
+ } else {
+ sa = dimm_params[dimm_number].base_address;
+ ea = sa + 2 * rank_density - 1;
+ }
+ break;
+ case FSL_DDR_CS0_CS1:
+ if (dimm_params[dimm_number].n_ranks > (i % cs_per_dimm)) {
+ sa = dimm_params[dimm_number].base_address;
+ ea = sa + rank_density - 1;
+ if (i != 1)
+ sa += (i % cs_per_dimm) * rank_density;
+ ea += (i % cs_per_dimm) * rank_density;
+ } else {
+ sa = 0;
+ ea = 0;
+ }
+ if (i == 0)
+ ea += rank_density;
+ break;
+ case FSL_DDR_CS2_CS3:
+ if (dimm_params[dimm_number].n_ranks > (i % cs_per_dimm)) {
+ sa = dimm_params[dimm_number].base_address;
+ ea = sa + rank_density - 1;
+ if (i != 3)
+ sa += (i % cs_per_dimm) * rank_density;
+ ea += (i % cs_per_dimm) * rank_density;
+ } else {
+ sa = 0;
+ ea = 0;
+ }
+ if (i == 2)
+ ea += (rank_density >> c->dbw_capacity_adjust);
+ break;
+ default: /* No bank(chip-select) interleaving */
+ sa = dimm_params[dimm_number].base_address;
+ ea = sa + rank_density - 1;
+ if (dimm_params[dimm_number].n_ranks > (i % cs_per_dimm)) {
+ sa += (i % cs_per_dimm) * rank_density;
+ ea += (i % cs_per_dimm) * rank_density;
+ } else {
+ sa = 0;
+ ea = 0;
+ }
+ break;
+ }
+ }
+
+ sa >>= 24;
+ ea >>= 24;
+
+ if (cs_en) {
+ ddr->cs[i].bnds = (0
+ | ((sa & 0xffff) << 16) /* starting address */
+ | ((ea & 0xffff) << 0) /* ending address */
+ );
+ } else {
+ /* setting bnds to 0xffffffff for inactive CS */
+ ddr->cs[i].bnds = 0xffffffff;
+ }
+
+ debug("FSLDDR: cs[%d]_bnds = 0x%08x\n", i, ddr->cs[i].bnds);
+ set_csn_config(dimm_number, i, ddr, popts, dimm_params);
+ set_csn_config_2(i, ddr);
+ }
+
+ set_ddr_eor(c);
+
+ if (!is_ddr1(popts))
+ set_timing_cfg_0(c);
+
+ set_timing_cfg_3(c, cas_latency,
+ additive_latency);
+ set_timing_cfg_1(c, cas_latency);
+ set_timing_cfg_2(c, cas_latency, additive_latency);
+
+ set_ddr_cdr1(c);
+ set_ddr_cdr2(c);
+ set_ddr_sdram_cfg(c);
+ ip_rev = fsl_ddr_get_version(c);
+ if (ip_rev > 0x40400)
+ unq_mrs_en = 1;
+
+ if ((ip_rev > 0x40700) && (popts->cswl_override != 0))
+ ddr->debug[18] = popts->cswl_override;
+
+ set_ddr_sdram_cfg_2(c, unq_mrs_en);
+ if (is_ddr4(popts)) {
+ set_ddr_sdram_mode_ddr4(c, cas_latency, additive_latency, unq_mrs_en);
+ set_ddr4_sdram_mode_2(c, unq_mrs_en);
+ set_ddr_sdram_mode_9(c, unq_mrs_en);
+ set_ddr_sdram_mode_10(c, unq_mrs_en);
+ } else if (is_ddr3(popts)) {
+ set_ddr_sdram_mode_ddr3(c, cas_latency, additive_latency, unq_mrs_en);
+ set_ddr3_sdram_mode_2(c, unq_mrs_en);
+ } else {
+ set_ddr_sdram_mode_ddr12(c, cas_latency, additive_latency, unq_mrs_en);
+ set_ddr1_2_sdram_mode_2(c, unq_mrs_en);
+ }
+
+ set_ddr_sdram_rcw(c);
+
+ set_ddr_sdram_interval(c);
+
+ ddr->ddr_data_init = 0xdeadbeef;
+
+ set_ddr_sdram_clk_cntl(c);
+ set_ddr_init_addr(c);
+ set_ddr_init_ext_addr(c);
+ set_timing_cfg_4(c);
+ set_timing_cfg_5(c, cas_latency);
+
+ if (is_ddr4(popts)) {
+ set_ddr_sdram_cfg_3(c);
+ set_timing_cfg_6(c);
+ set_timing_cfg_7(c);
+ set_timing_cfg_8(c, cas_latency);
+ set_timing_cfg_9(c);
+ set_ddr_dq_mapping(c);
+ }
+
+ set_ddr_zq_cntl(c, popts->zq_en);
+ set_ddr_wrlvl_cntl(c, wrlvl_en);
+
+ set_ddr_sr_cntr(c, sr_it);
+
+ if (c->erratum_A004508 && ip_rev >= 0x40000 && ip_rev < 0x40400)
+ ddr->debug[2] |= 0x00000200; /* set bit 22 */
+
+ /* Erratum applies when accumulated ECC is used, or DBI is enabled */
+#define IS_ACC_ECC_EN(v) ((v) & 0x4)
+#define IS_DBI(v) ((((v) >> 12) & 0x3) == 0x2)
+ if (c->erratum_A008378) {
+ if (IS_ACC_ECC_EN(ddr->ddr_sdram_cfg) ||
+ IS_DBI(ddr->ddr_sdram_cfg_3)) {
+ ddr->debug[28] = ddr_in32(&ddrc->debug[28]);
+ ddr->debug[28] |= (0x9 << 20);
+ }
+ }
+
+ if (c->erratum_A009942) {
+ ddr_freq = c->ddr_freq / 1000000;
+ ddr->debug[28] |= ddr_in32(&ddrc->debug[28]);
+ ddr->debug[28] &= 0xff0fff00;
+ if (ddr_freq <= 1333)
+ ddr->debug[28] |= 0x0080006a;
+ else if (ddr_freq <= 1600)
+ ddr->debug[28] |= 0x0070006f;
+ else if (ddr_freq <= 1867)
+ ddr->debug[28] |= 0x00700076;
+ else if (ddr_freq <= 2133)
+ ddr->debug[28] |= 0x0060007b;
+ if (popts->cpo_sample)
+ ddr->debug[28] = (ddr->debug[28] & 0xffffff00) |
+ popts->cpo_sample;
+ }
+
+ return check_fsl_memctl_config_regs(c);
+}
diff --git a/drivers/ddr/fsl/ddr1_dimm_params.c b/drivers/ddr/fsl/ddr1_dimm_params.c
new file mode 100644
index 0000000000..268bf5bde4
--- /dev/null
+++ b/drivers/ddr/fsl/ddr1_dimm_params.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ */
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <linux/log2.h>
+#include "fsl_ddr.h"
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * Study these table from Byte 31 of JEDEC SPD Spec.
+ *
+ * DDR I DDR II
+ * Bit Size Size
+ * --- ----- ------
+ * 7 high 512MB 512MB
+ * 6 256MB 256MB
+ * 5 128MB 128MB
+ * 4 64MB 16GB
+ * 3 32MB 8GB
+ * 2 16MB 4GB
+ * 1 2GB 2GB
+ * 0 low 1GB 1GB
+ *
+ * Reorder Table to be linear by stripping the bottom
+ * 2 or 5 bits off and shifting them up to the top.
+ */
+
+static unsigned long long
+compute_ranksize(unsigned int mem_type, unsigned char row_dens)
+{
+ unsigned long long bsize;
+
+ /* Bottom 2 bits up to the top. */
+ bsize = ((row_dens >> 2) | ((row_dens & 3) << 6));
+ bsize <<= 24ULL;
+ debug("DDR: DDR I rank density = 0x%16llx\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * Convert a two-nibble BCD value into a cycle time.
+ * While the spec calls for nano-seconds, picos are returned.
+ *
+ * This implements the tables for bytes 9, 23 and 25 for both
+ * DDR I and II. No allowance for distinguishing the invalid
+ * fields absent for DDR I yet present in DDR II is made.
+ * (That is, cycle times of .25, .33, .66 and .75 ns are
+ * allowed for both DDR II and I.)
+ */
+static unsigned int
+convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
+{
+ /* Table look up the lower nibble, allow DDR I & II. */
+ unsigned int tenths_ps[16] = {
+ 0,
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 250, /* This and the next 3 entries valid ... */
+ 330, /* ... only for tCK calculations. */
+ 660,
+ 750,
+ 0, /* undefined */
+ 0 /* undefined */
+ };
+
+ unsigned int whole_ns = (spd_val & 0xF0) >> 4;
+ unsigned int tenth_ns = spd_val & 0x0F;
+ unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
+
+ return ps;
+}
+
+static unsigned int
+convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
+{
+ unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
+ unsigned int hundredth_ns = spd_val & 0x0F;
+ unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
+
+ return ps;
+}
+
+static unsigned int byte40_table_ps[8] = {
+ 0,
+ 250,
+ 330,
+ 500,
+ 660,
+ 750,
+ 0, /* supposed to be RFC, but not sure what that means */
+ 0 /* Undefined */
+};
+
+static unsigned int
+compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
+{
+ return ((trctrfc_ext & 0x1) * 256 + trfc) * 1000
+ + byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
+}
+
+static unsigned int
+compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
+{
+ return trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
+}
+
+/*
+ * tCKmax from DDR I SPD Byte 43
+ *
+ * Bits 7:2 == whole ns
+ * Bits 1:0 == quarter ns
+ * 00 == 0.00 ns
+ * 01 == 0.25 ns
+ * 10 == 0.50 ns
+ * 11 == 0.75 ns
+ *
+ * Returns picoseconds.
+ */
+static unsigned int
+compute_tckmax_from_spd_ps(unsigned int byte43)
+{
+ return (byte43 >> 2) * 1000 + (byte43 & 0x3) * 250;
+}
+
+/*
+ * Determine Refresh Rate. Ignore self refresh bit on DDR I.
+ * Table from SPD Spec, Byte 12, converted to picoseconds and
+ * filled in with "default" normal values.
+ */
+static unsigned int
+determine_refresh_rate_ps(const unsigned int spd_refresh)
+{
+ unsigned int refresh_time_ps[8] = {
+ 15625000, /* 0 Normal 1.00x */
+ 3900000, /* 1 Reduced .25x */
+ 7800000, /* 2 Extended .50x */
+ 31300000, /* 3 Extended 2.00x */
+ 62500000, /* 4 Extended 4.00x */
+ 125000000, /* 5 Extended 8.00x */
+ 15625000, /* 6 Normal 1.00x filler */
+ 15625000, /* 7 Normal 1.00x filler */
+ };
+
+ return refresh_time_ps[spd_refresh & 0x7];
+}
+
+/*
+ * The purpose of this function is to compute a suitable
+ * CAS latency given the DRAM clock period. The SPD only
+ * defines at most 3 CAS latencies. Typically the slower in
+ * frequency the DIMM runs at, the shorter its CAS latency can be.
+ * If the DIMM is operating at a sufficiently low frequency,
+ * it may be able to run at a CAS latency shorter than the
+ * shortest SPD-defined CAS latency.
+ *
+ * If a CAS latency is not found, 0 is returned.
+ *
+ * Do this by finding in the standard speed bin table the longest
+ * tCKmin that doesn't exceed the value of mclk_ps (tCK).
+ *
+ * An assumption made is that the SDRAM device allows the
+ * CL to be programmed for a value that is lower than those
+ * advertised by the SPD. This is not always the case,
+ * as those modes not defined in the SPD are optional.
+ *
+ * CAS latency de-rating based upon values JEDEC Standard No. 79-E
+ * Table 11.
+ *
+ * ordinal 2, ddr1_speed_bins[1] contains tCK for CL=2
+ */
+ /* CL2.0 CL2.5 CL3.0 */
+unsigned short ddr1_speed_bins[] = {0, 7500, 6000, 5000 };
+
+static unsigned int
+compute_derated_DDR1_CAS_latency(unsigned int mclk_ps)
+{
+ const unsigned int num_speed_bins = ARRAY_SIZE(ddr1_speed_bins);
+ unsigned int lowest_tCKmin_found = 0;
+ unsigned int lowest_tCKmin_CL = 0;
+ unsigned int i;
+
+ debug("mclk_ps = %u\n", mclk_ps);
+
+ for (i = 0; i < num_speed_bins; i++) {
+ unsigned int x = ddr1_speed_bins[i];
+ debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
+ i, x, lowest_tCKmin_found);
+ if (x && lowest_tCKmin_found <= x && x <= mclk_ps) {
+ lowest_tCKmin_found = x;
+ lowest_tCKmin_CL = i + 1;
+ }
+ }
+
+ debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
+
+ return lowest_tCKmin_CL;
+}
+
+/*
+ * ddr1_compute_dimm_parameters for DDR1 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the struct dimm_params structure pointed by pdimm.
+ *
+ * FIXME: use #define for the retvals
+ */
+unsigned int ddr1_compute_dimm_parameters(struct fsl_ddr_controller *c,
+ const struct ddr1_spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ int ret;
+
+ ret = ddr1_spd_check(spd);
+ if (ret) {
+ printf("DIMM: failed checksum\n");
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = spd->nrows;
+ pdimm->rank_density = compute_ranksize(spd->mem_type, spd->bank_dens);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->data_width = spd->dataw_lsb;
+ pdimm->primary_sdram_width = spd->primw;
+ pdimm->ec_sdram_width = spd->ecw;
+
+ /*
+ * FIXME: Need to determine registered_dimm status.
+ * 1 == register buffered
+ * 0 == unbuffered
+ */
+ pdimm->registered_dimm = 0; /* unbuffered */
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = spd->nrow_addr;
+ pdimm->n_col_addr = spd->ncol_addr;
+ pdimm->n_banks_per_sdram_device = spd->nbanks;
+ pdimm->edc_config = spd->config;
+ pdimm->burst_lengths_bitmask = spd->burstl;
+
+ /*
+ * Calculate the Maximum Data Rate based on the Minimum Cycle time.
+ * The SPD clk_cycle field (tCKmin) is measured in tenths of
+ * nanoseconds and represented as BCD.
+ */
+ pdimm->tckmin_x_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
+ pdimm->tckmin_x_minus_1_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
+ pdimm->tckmin_x_minus_2_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
+
+ pdimm->tckmax_ps = compute_tckmax_from_spd_ps(spd->tckmax);
+
+ /*
+ * Compute CAS latencies defined by SPD
+ * The SPD caslat_x should have at least 1 and at most 3 bits set.
+ *
+ * If cas_lat after masking is 0, the __ilog2 function returns
+ * 255 into the variable. This behavior is abused once.
+ */
+ pdimm->caslat_x = ilog2(spd->cas_lat);
+ pdimm->caslat_x_minus_1 = ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_x));
+ pdimm->caslat_x_minus_2 = ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_x)
+ & ~(1 << pdimm->caslat_x_minus_1));
+
+ /* Compute CAS latencies below that defined by SPD */
+ pdimm->caslat_lowest_derated = compute_derated_DDR1_CAS_latency(
+ get_memory_clk_period_ps(c));
+
+ /* Compute timing parameters */
+ pdimm->trcd_ps = spd->trcd * 250;
+ pdimm->trp_ps = spd->trp * 250;
+ pdimm->tras_ps = spd->tras * 1000;
+
+ pdimm->twr_ps = mclk_to_picos(c, 3);
+ pdimm->twtr_ps = mclk_to_picos(c, 1);
+ pdimm->trfc_ps = compute_trfc_ps_from_spd(0, spd->trfc);
+
+ pdimm->trrd_ps = spd->trrd * 250;
+ pdimm->trc_ps = compute_trc_ps_from_spd(0, spd->trc);
+
+ pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
+
+ pdimm->tis_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
+ pdimm->tih_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
+ pdimm->tds_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
+ pdimm->tdh_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
+
+ pdimm->trtp_ps = mclk_to_picos(c, 2); /* By the book. */
+ pdimm->tdqsq_max_ps = spd->tdqsq * 10;
+ pdimm->tqhs_ps = spd->tqhs * 10;
+
+ return 0;
+}
diff --git a/drivers/ddr/fsl/ddr2_dimm_params.c b/drivers/ddr/fsl/ddr2_dimm_params.c
new file mode 100644
index 0000000000..3f8b56330d
--- /dev/null
+++ b/drivers/ddr/fsl/ddr2_dimm_params.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ */
+
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <linux/log2.h>
+#include "fsl_ddr.h"
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * Study these table from Byte 31 of JEDEC SPD Spec.
+ *
+ * DDR I DDR II
+ * Bit Size Size
+ * --- ----- ------
+ * 7 high 512MB 512MB
+ * 6 256MB 256MB
+ * 5 128MB 128MB
+ * 4 64MB 16GB
+ * 3 32MB 8GB
+ * 2 16MB 4GB
+ * 1 2GB 2GB
+ * 0 low 1GB 1GB
+ *
+ * Reorder Table to be linear by stripping the bottom
+ * 2 or 5 bits off and shifting them up to the top.
+ *
+ */
+static unsigned long long
+compute_ranksize(unsigned int mem_type, unsigned char row_dens)
+{
+ unsigned long long bsize;
+
+ /* Bottom 5 bits up to the top. */
+ bsize = ((row_dens >> 5) | ((row_dens & 31) << 3));
+ bsize <<= 27ULL;
+ debug("DDR: DDR II rank density = 0x%16llx\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * Convert a two-nibble BCD value into a cycle time.
+ * While the spec calls for nano-seconds, picos are returned.
+ *
+ * This implements the tables for bytes 9, 23 and 25 for both
+ * DDR I and II. No allowance for distinguishing the invalid
+ * fields absent for DDR I yet present in DDR II is made.
+ * (That is, cycle times of .25, .33, .66 and .75 ns are
+ * allowed for both DDR II and I.)
+ */
+static unsigned int
+convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
+{
+ /* Table look up the lower nibble, allow DDR I & II. */
+ unsigned int tenths_ps[16] = {
+ 0,
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 250, /* This and the next 3 entries valid ... */
+ 330, /* ... only for tCK calculations. */
+ 660,
+ 750,
+ 0, /* undefined */
+ 0 /* undefined */
+ };
+
+ unsigned int whole_ns = (spd_val & 0xF0) >> 4;
+ unsigned int tenth_ns = spd_val & 0x0F;
+ unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
+
+ return ps;
+}
+
+static unsigned int
+convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
+{
+ unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
+ unsigned int hundredth_ns = spd_val & 0x0F;
+ unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
+
+ return ps;
+}
+
+static unsigned int byte40_table_ps[8] = {
+ 0,
+ 250,
+ 330,
+ 500,
+ 660,
+ 750,
+ 0, /* supposed to be RFC, but not sure what that means */
+ 0 /* Undefined */
+};
+
+static unsigned int
+compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
+{
+ return (((trctrfc_ext & 0x1) * 256) + trfc) * 1000
+ + byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
+}
+
+static unsigned int
+compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
+{
+ return trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
+}
+
+/*
+ * Determine Refresh Rate. Ignore self refresh bit on DDR I.
+ * Table from SPD Spec, Byte 12, converted to picoseconds and
+ * filled in with "default" normal values.
+ */
+static unsigned int
+determine_refresh_rate_ps(const unsigned int spd_refresh)
+{
+ unsigned int refresh_time_ps[8] = {
+ 15625000, /* 0 Normal 1.00x */
+ 3900000, /* 1 Reduced .25x */
+ 7800000, /* 2 Extended .50x */
+ 31300000, /* 3 Extended 2.00x */
+ 62500000, /* 4 Extended 4.00x */
+ 125000000, /* 5 Extended 8.00x */
+ 15625000, /* 6 Normal 1.00x filler */
+ 15625000, /* 7 Normal 1.00x filler */
+ };
+
+ return refresh_time_ps[spd_refresh & 0x7];
+}
+
+/*
+ * The purpose of this function is to compute a suitable
+ * CAS latency given the DRAM clock period. The SPD only
+ * defines at most 3 CAS latencies. Typically the slower in
+ * frequency the DIMM runs at, the shorter its CAS latency can.
+ * be. If the DIMM is operating at a sufficiently low frequency,
+ * it may be able to run at a CAS latency shorter than the
+ * shortest SPD-defined CAS latency.
+ *
+ * If a CAS latency is not found, 0 is returned.
+ *
+ * Do this by finding in the standard speed bin table the longest
+ * tCKmin that doesn't exceed the value of mclk_ps (tCK).
+ *
+ * An assumption made is that the SDRAM device allows the
+ * CL to be programmed for a value that is lower than those
+ * advertised by the SPD. This is not always the case,
+ * as those modes not defined in the SPD are optional.
+ *
+ * CAS latency de-rating based upon values JEDEC Standard No. 79-2C
+ * Table 40, "DDR2 SDRAM stanadard speed bins and tCK, tRCD, tRP, tRAS,
+ * and tRC for corresponding bin"
+ *
+ * ordinal 2, ddr2_speed_bins[1] contains tCK for CL=3
+ * Not certain if any good value exists for CL=2
+ */
+ /* CL2 CL3 CL4 CL5 CL6 CL7*/
+unsigned short ddr2_speed_bins[] = { 0, 5000, 3750, 3000, 2500, 1875 };
+
+static unsigned int
+compute_derated_DDR2_CAS_latency(unsigned int mclk_ps)
+{
+ const unsigned int num_speed_bins = ARRAY_SIZE(ddr2_speed_bins);
+ unsigned int lowest_tCKmin_found = 0;
+ unsigned int lowest_tCKmin_CL = 0;
+ unsigned int i;
+
+ debug("mclk_ps = %u\n", mclk_ps);
+
+ for (i = 0; i < num_speed_bins; i++) {
+ unsigned int x = ddr2_speed_bins[i];
+ debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
+ i, x, lowest_tCKmin_found);
+ if (x && x <= mclk_ps && x >= lowest_tCKmin_found ) {
+ lowest_tCKmin_found = x;
+ lowest_tCKmin_CL = i + 2;
+ }
+ }
+
+ debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
+
+ return lowest_tCKmin_CL;
+}
+
+/*
+ * ddr2_compute_dimm_parameters for DDR2 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the struct dimm_params structure pointed by pdimm.
+ *
+ * FIXME: use #define for the retvals
+ */
+unsigned int ddr2_compute_dimm_parameters(struct fsl_ddr_controller *c,
+ const struct ddr2_spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ int ret;
+
+ ret = ddr2_spd_check(spd);
+ if (ret) {
+ printf("DIMM: failed checksum\n");
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = (spd->mod_ranks & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd->mem_type, spd->rank_dens);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->data_width = spd->dataw;
+ pdimm->primary_sdram_width = spd->primw;
+ pdimm->ec_sdram_width = spd->ecw;
+
+ /* These are all the types defined by the JEDEC DDR2 SPD 1.3 spec */
+ switch (spd->dimm_type) {
+ case DDR2_SPD_DIMMTYPE_RDIMM:
+ case DDR2_SPD_DIMMTYPE_72B_SO_RDIMM:
+ case DDR2_SPD_DIMMTYPE_MINI_RDIMM:
+ /* Registered/buffered DIMMs */
+ pdimm->registered_dimm = 1;
+ break;
+
+ case DDR2_SPD_DIMMTYPE_UDIMM:
+ case DDR2_SPD_DIMMTYPE_SO_DIMM:
+ case DDR2_SPD_DIMMTYPE_MICRO_DIMM:
+ case DDR2_SPD_DIMMTYPE_MINI_UDIMM:
+ /* Unbuffered DIMMs */
+ pdimm->registered_dimm = 0;
+ break;
+
+ case DDR2_SPD_DIMMTYPE_72B_SO_CDIMM:
+ default:
+ printf("unknown dimm_type 0x%02X\n", spd->dimm_type);
+ return 1;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = spd->nrow_addr;
+ pdimm->n_col_addr = spd->ncol_addr;
+ pdimm->n_banks_per_sdram_device = spd->nbanks;
+ pdimm->edc_config = spd->config;
+ pdimm->burst_lengths_bitmask = spd->burstl;
+
+ /*
+ * Calculate the Maximum Data Rate based on the Minimum Cycle time.
+ * The SPD clk_cycle field (tCKmin) is measured in tenths of
+ * nanoseconds and represented as BCD.
+ */
+ pdimm->tckmin_x_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
+ pdimm->tckmin_x_minus_1_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
+ pdimm->tckmin_x_minus_2_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
+
+ pdimm->tckmax_ps = convert_bcd_tenths_to_cycle_time_ps(spd->tckmax);
+
+ /*
+ * Compute CAS latencies defined by SPD
+ * The SPD caslat_x should have at least 1 and at most 3 bits set.
+ *
+ * If cas_lat after masking is 0, the __ilog2 function returns
+ * 255 into the variable. This behavior is abused once.
+ */
+ pdimm->caslat_x = ilog2(spd->cas_lat);
+ pdimm->caslat_x_minus_1 = ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_x));
+ pdimm->caslat_x_minus_2 = ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_x)
+ & ~(1 << pdimm->caslat_x_minus_1));
+
+ /* Compute CAS latencies below that defined by SPD */
+ pdimm->caslat_lowest_derated = compute_derated_DDR2_CAS_latency(
+ get_memory_clk_period_ps(c));
+
+ /* Compute timing parameters */
+ pdimm->trcd_ps = spd->trcd * 250;
+ pdimm->trp_ps = spd->trp * 250;
+ pdimm->tras_ps = spd->tras * 1000;
+
+ pdimm->twr_ps = spd->twr * 250;
+ pdimm->twtr_ps = spd->twtr * 250;
+ pdimm->trfc_ps = compute_trfc_ps_from_spd(spd->trctrfc_ext, spd->trfc);
+
+ pdimm->trrd_ps = spd->trrd * 250;
+ pdimm->trc_ps = compute_trc_ps_from_spd(spd->trctrfc_ext, spd->trc);
+
+ pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
+
+ pdimm->tis_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
+ pdimm->tih_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
+ pdimm->tds_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
+ pdimm->tdh_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
+
+ pdimm->trtp_ps = spd->trtp * 250;
+ pdimm->tdqsq_max_ps = spd->tdqsq * 10;
+ pdimm->tqhs_ps = spd->tqhs * 10;
+
+ return 0;
+}
diff --git a/drivers/ddr/fsl/ddr3_dimm_params.c b/drivers/ddr/fsl/ddr3_dimm_params.c
new file mode 100644
index 0000000000..1665e792c3
--- /dev/null
+++ b/drivers/ddr/fsl/ddr3_dimm_params.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Dave Liu <daveliu@freescale.com>
+ *
+ * calculate the organization and timing parameter
+ * from ddr3 spd, please refer to the spec
+ * JEDEC standard No.21-C 4_01_02_11R18.pdf
+ */
+
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include "fsl_ddr.h"
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * each rank size =
+ * sdram capacity(bit) / 8 * primary bus width / sdram width
+ *
+ * where: sdram capacity = spd byte4[3:0]
+ * primary bus width = spd byte8[2:0]
+ * sdram width = spd byte7[2:0]
+ *
+ * SPD byte4 - sdram density and banks
+ * bit[3:0] size(bit) size(byte)
+ * 0000 256Mb 32MB
+ * 0001 512Mb 64MB
+ * 0010 1Gb 128MB
+ * 0011 2Gb 256MB
+ * 0100 4Gb 512MB
+ * 0101 8Gb 1GB
+ * 0110 16Gb 2GB
+ *
+ * SPD byte8 - module memory bus width
+ * bit[2:0] primary bus width
+ * 000 8bits
+ * 001 16bits
+ * 010 32bits
+ * 011 64bits
+ *
+ * SPD byte7 - module organiztion
+ * bit[2:0] sdram device width
+ * 000 4bits
+ * 001 8bits
+ * 010 16bits
+ * 011 32bits
+ *
+ */
+static unsigned long long
+compute_ranksize(const struct ddr3_spd_eeprom *spd)
+{
+ unsigned long long bsize;
+
+ int nbit_sdram_cap_bsize = 0;
+ int nbit_primary_bus_width = 0;
+ int nbit_sdram_width = 0;
+
+ if ((spd->density_banks & 0xf) < 7)
+ nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
+ if ((spd->bus_width & 0x7) < 4)
+ nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
+ if ((spd->organization & 0x7) < 4)
+ nbit_sdram_width = (spd->organization & 0x7) + 2;
+
+ bsize = 1ULL << (nbit_sdram_cap_bsize - 3
+ + nbit_primary_bus_width - nbit_sdram_width);
+
+ debug("DDR: DDR III rank density = 0x%16llx\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * ddr3_compute_dimm_parameters for DDR3 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the struct dimm_params structure pointed by pdimm.
+ *
+ */
+unsigned int ddr3_compute_dimm_parameters(struct fsl_ddr_controller *c,
+ const struct ddr3_spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ int ret;
+ unsigned int mtb_ps;
+ int ftb_10th_ps;
+ int i;
+
+ ret = ddr3_spd_check(spd);
+ if (ret) {
+ printf("DIMM: failed checksum\n");
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ if ((spd->info_size_crc & 0xF) > 1)
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
+ if ((spd->bus_width >> 3) & 0x3)
+ pdimm->ec_sdram_width = 8;
+ else
+ pdimm->ec_sdram_width = 0;
+ pdimm->data_width = pdimm->primary_sdram_width
+ + pdimm->ec_sdram_width;
+ pdimm->device_width = 1 << ((spd->organization & 0x7) + 2);
+
+ /* These are the types defined by the JEDEC DDR3 SPD spec */
+ pdimm->mirrored_dimm = 0;
+ pdimm->registered_dimm = 0;
+ switch (spd->module_type & DDR3_SPD_MODULETYPE_MASK) {
+ case DDR3_SPD_MODULETYPE_RDIMM:
+ case DDR3_SPD_MODULETYPE_MINI_RDIMM:
+ case DDR3_SPD_MODULETYPE_72B_SO_RDIMM:
+ /* Registered/buffered DIMMs */
+ pdimm->registered_dimm = 1;
+ for (i = 0; i < 16; i += 2) {
+ u8 rcw = spd->mod_section.registered.rcw[i/2];
+ pdimm->rcw[i] = (rcw >> 0) & 0x0F;
+ pdimm->rcw[i+1] = (rcw >> 4) & 0x0F;
+ }
+ break;
+
+ case DDR3_SPD_MODULETYPE_UDIMM:
+ case DDR3_SPD_MODULETYPE_SO_DIMM:
+ case DDR3_SPD_MODULETYPE_MICRO_DIMM:
+ case DDR3_SPD_MODULETYPE_MINI_UDIMM:
+ case DDR3_SPD_MODULETYPE_MINI_CDIMM:
+ case DDR3_SPD_MODULETYPE_72B_SO_UDIMM:
+ case DDR3_SPD_MODULETYPE_72B_SO_CDIMM:
+ case DDR3_SPD_MODULETYPE_LRDIMM:
+ case DDR3_SPD_MODULETYPE_16B_SO_DIMM:
+ case DDR3_SPD_MODULETYPE_32B_SO_DIMM:
+ /* Unbuffered DIMMs */
+ if (spd->mod_section.unbuffered.addr_mapping & 0x1)
+ pdimm->mirrored_dimm = 1;
+ break;
+
+ default:
+ printf("unknown module_type 0x%02X\n", spd->module_type);
+ return 1;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
+ pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
+ pdimm->n_banks_per_sdram_device = 8 << ((spd->density_banks >> 4) & 0x7);
+
+ /*
+ * The SPD spec has not the ECC bit,
+ * We consider the DIMM as ECC capability
+ * when the extension bus exist
+ */
+ if (pdimm->ec_sdram_width)
+ pdimm->edc_config = 0x02;
+ else
+ pdimm->edc_config = 0x00;
+
+ /*
+ * The SPD spec has not the burst length byte
+ * but DDR3 spec has nature BL8 and BC4,
+ * BL8 -bit3, BC4 -bit2
+ */
+ pdimm->burst_lengths_bitmask = 0x0c;
+
+ /* MTB - medium timebase
+ * The unit in the SPD spec is ns,
+ * We convert it to ps.
+ * eg: MTB = 0.125ns (125ps)
+ */
+ mtb_ps = (spd->mtb_dividend * 1000) /spd->mtb_divisor;
+ pdimm->mtb_ps = mtb_ps;
+
+ /*
+ * FTB - fine timebase
+ * use 1/10th of ps as our unit to avoid floating point
+ * eg, 10 for 1ps, 25 for 2.5ps, 50 for 5ps
+ */
+ ftb_10th_ps =
+ ((spd->ftb_div & 0xf0) >> 4) * 10 / (spd->ftb_div & 0x0f);
+ pdimm->ftb_10th_ps = ftb_10th_ps;
+ /*
+ * sdram minimum cycle time
+ * we assume the MTB is 0.125ns
+ * eg:
+ * tck_min=15 MTB (1.875ns) ->DDR3-1066
+ * =12 MTB (1.5ns) ->DDR3-1333
+ * =10 MTB (1.25ns) ->DDR3-1600
+ */
+ pdimm->tckmin_x_ps = spd->tck_min * mtb_ps +
+ (spd->fine_tck_min * ftb_10th_ps) / 10;
+
+ /*
+ * CAS latency supported
+ * bit4 - CL4
+ * bit5 - CL5
+ * bit18 - CL18
+ */
+ pdimm->caslat_x = ((spd->caslat_msb << 8) | spd->caslat_lsb) << 4;
+
+ /*
+ * min CAS latency time
+ * eg: taa_min =
+ * DDR3-800D 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25ns)
+ */
+ pdimm->taa_ps = spd->taa_min * mtb_ps +
+ (spd->fine_taa_min * ftb_10th_ps) / 10;
+
+ /*
+ * min write recovery time
+ * eg:
+ * twr_min = 120 MTB (15ns) -> all speed grades.
+ */
+ pdimm->twr_ps = spd->twr_min * mtb_ps;
+
+ /*
+ * min RAS to CAS delay time
+ * eg: trcd_min =
+ * DDR3-800 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25)
+ */
+ pdimm->trcd_ps = spd->trcd_min * mtb_ps +
+ (spd->fine_trcd_min * ftb_10th_ps) / 10;
+
+ /*
+ * min row active to row active delay time
+ * eg: trrd_min =
+ * DDR3-800(1KB page) 80 MTB (10ns)
+ * DDR3-1333(1KB page) 48 MTB (6ns)
+ */
+ pdimm->trrd_ps = spd->trrd_min * mtb_ps;
+
+ /*
+ * min row precharge delay time
+ * eg: trp_min =
+ * DDR3-800D 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25ns)
+ */
+ pdimm->trp_ps = spd->trp_min * mtb_ps +
+ (spd->fine_trp_min * ftb_10th_ps) / 10;
+
+ /* min active to precharge delay time
+ * eg: tRAS_min =
+ * DDR3-800D 300 MTB (37.5ns)
+ * DDR3-1066F 300 MTB (37.5ns)
+ * DDR3-1333H 288 MTB (36ns)
+ * DDR3-1600H 280 MTB (35ns)
+ */
+ pdimm->tras_ps = (((spd->tras_trc_ext & 0xf) << 8) | spd->tras_min_lsb)
+ * mtb_ps;
+ /*
+ * min active to actice/refresh delay time
+ * eg: tRC_min =
+ * DDR3-800D 400 MTB (50ns)
+ * DDR3-1066F 405 MTB (50.625ns)
+ * DDR3-1333H 396 MTB (49.5ns)
+ * DDR3-1600H 370 MTB (46.25ns)
+ */
+ pdimm->trc_ps = (((spd->tras_trc_ext & 0xf0) << 4) | spd->trc_min_lsb)
+ * mtb_ps + (spd->fine_trc_min * ftb_10th_ps) / 10;
+ /*
+ * min refresh recovery delay time
+ * eg: tRFC_min =
+ * 512Mb 720 MTB (90ns)
+ * 1Gb 880 MTB (110ns)
+ * 2Gb 1280 MTB (160ns)
+ */
+ pdimm->trfc_ps = ((spd->trfc_min_msb << 8) | spd->trfc_min_lsb)
+ * mtb_ps;
+ /*
+ * min internal write to read command delay time
+ * eg: twtr_min = 40 MTB (7.5ns) - all speed bins.
+ * tWRT is at least 4 mclk independent of operating freq.
+ */
+ pdimm->twtr_ps = spd->twtr_min * mtb_ps;
+
+ /*
+ * min internal read to precharge command delay time
+ * eg: trtp_min = 40 MTB (7.5ns) - all speed bins.
+ * tRTP is at least 4 mclk independent of operating freq.
+ */
+ pdimm->trtp_ps = spd->trtp_min * mtb_ps;
+
+ /*
+ * Average periodic refresh interval
+ * tREFI = 7.8 us at normal temperature range
+ * = 3.9 us at ext temperature range
+ */
+ pdimm->refresh_rate_ps = 7800000;
+ if ((spd->therm_ref_opt & 0x1) && !(spd->therm_ref_opt & 0x2)) {
+ pdimm->refresh_rate_ps = 3900000;
+ pdimm->extended_op_srt = 1;
+ }
+
+ /*
+ * min four active window delay time
+ * eg: tfaw_min =
+ * DDR3-800(1KB page) 320 MTB (40ns)
+ * DDR3-1066(1KB page) 300 MTB (37.5ns)
+ * DDR3-1333(1KB page) 240 MTB (30ns)
+ * DDR3-1600(1KB page) 240 MTB (30ns)
+ */
+ pdimm->tfaw_ps = (((spd->tfaw_msb & 0xf) << 8) | spd->tfaw_min)
+ * mtb_ps;
+
+ return 0;
+}
diff --git a/drivers/ddr/fsl/ddr4_dimm_params.c b/drivers/ddr/fsl/ddr4_dimm_params.c
new file mode 100644
index 0000000000..f39b6e2853
--- /dev/null
+++ b/drivers/ddr/fsl/ddr4_dimm_params.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP Semiconductor
+ *
+ * calculate the organization and timing parameter
+ * from ddr3 spd, please refer to the spec
+ * JEDEC standard No.21-C 4_01_02_12R23A.pdf
+ *
+ *
+ */
+
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include "fsl_ddr.h"
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * Total DIMM size =
+ * sdram capacity(bit) / 8 * primary bus width / sdram width
+ * * Logical Ranks per DIMM
+ *
+ * where: sdram capacity = spd byte4[3:0]
+ * primary bus width = spd byte13[2:0]
+ * sdram width = spd byte12[2:0]
+ * Logical Ranks per DIMM = spd byte12[5:3] for SDP, DDP, QDP
+ * spd byte12{5:3] * spd byte6[6:4] for 3DS
+ *
+ * To simplify each rank size = total DIMM size / Number of Package Ranks
+ * where Number of Package Ranks = spd byte12[5:3]
+ *
+ * SPD byte4 - sdram density and banks
+ * bit[3:0] size(bit) size(byte)
+ * 0000 256Mb 32MB
+ * 0001 512Mb 64MB
+ * 0010 1Gb 128MB
+ * 0011 2Gb 256MB
+ * 0100 4Gb 512MB
+ * 0101 8Gb 1GB
+ * 0110 16Gb 2GB
+ * 0111 32Gb 4GB
+ *
+ * SPD byte13 - module memory bus width
+ * bit[2:0] primary bus width
+ * 000 8bits
+ * 001 16bits
+ * 010 32bits
+ * 011 64bits
+ *
+ * SPD byte12 - module organization
+ * bit[2:0] sdram device width
+ * 000 4bits
+ * 001 8bits
+ * 010 16bits
+ * 011 32bits
+ *
+ * SPD byte12 - module organization
+ * bit[5:3] number of package ranks per DIMM
+ * 000 1
+ * 001 2
+ * 010 3
+ * 011 4
+ *
+ * SPD byte6 - SDRAM package type
+ * bit[6:4] Die count
+ * 000 1
+ * 001 2
+ * 010 3
+ * 011 4
+ * 100 5
+ * 101 6
+ * 110 7
+ * 111 8
+ *
+ * SPD byte6 - SRAM package type
+ * bit[1:0] Signal loading
+ * 00 Not specified
+ * 01 Multi load stack
+ * 10 Sigle load stack (3DS)
+ * 11 Reserved
+ */
+static unsigned long long
+compute_ranksize(const struct ddr4_spd_eeprom *spd)
+{
+ unsigned long long bsize;
+
+ int nbit_sdram_cap_bsize = 0;
+ int nbit_primary_bus_width = 0;
+ int nbit_sdram_width = 0;
+ int die_count = 0;
+ bool package_3ds;
+
+ if ((spd->density_banks & 0xf) <= 7)
+ nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
+ if ((spd->bus_width & 0x7) < 4)
+ nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
+ if ((spd->organization & 0x7) < 4)
+ nbit_sdram_width = (spd->organization & 0x7) + 2;
+ package_3ds = (spd->package_type & 0x3) == 0x2;
+ if ((spd->package_type & 0x80) && !package_3ds) { /* other than 3DS */
+ printf("Warning: not supported SDRAM package type\n");
+ return 0;
+ }
+ if (package_3ds)
+ die_count = (spd->package_type >> 4) & 0x7;
+
+ bsize = 1ULL << (nbit_sdram_cap_bsize - 3 +
+ nbit_primary_bus_width - nbit_sdram_width +
+ die_count);
+
+ debug("DDR: DDR rank density = 0x%16llx\n", bsize);
+
+ return bsize;
+}
+
+#define spd_to_ps(mtb, ftb) \
+ (mtb * pdimm->mtb_ps + (ftb * pdimm->ftb_10th_ps) / 10)
+/*
+ * ddr4_compute_dimm_parameters for DDR4 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the struct dimm_params structure pointed by pdimm.
+ *
+ */
+unsigned int ddr4_compute_dimm_parameters(struct fsl_ddr_controller *c,
+ const struct ddr4_spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ int ret;
+ int i;
+ const u8 udimm_rc_e_dq[18] = {
+ 0x0c, 0x2c, 0x15, 0x35, 0x15, 0x35, 0x0b, 0x2c, 0x15,
+ 0x35, 0x0b, 0x35, 0x0b, 0x2c, 0x0b, 0x35, 0x15, 0x36
+ };
+ int spd_error = 0;
+ u8 *ptr;
+ u8 val;
+
+ ret = ddr4_spd_check(spd);
+ if (ret) {
+ printf("DIMM: failed checksum\n");
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ if ((spd->info_size_crc & 0xF) > 2)
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->die_density = spd->density_banks & 0xf;
+ pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
+ if ((spd->bus_width >> 3) & 0x3)
+ pdimm->ec_sdram_width = 8;
+ else
+ pdimm->ec_sdram_width = 0;
+ pdimm->data_width = pdimm->primary_sdram_width
+ + pdimm->ec_sdram_width;
+ pdimm->device_width = 1 << ((spd->organization & 0x7) + 2);
+ pdimm->package_3ds = (spd->package_type & 0x3) == 0x2 ?
+ (spd->package_type >> 4) & 0x7 : 0;
+
+ /* These are the types defined by the JEDEC SPD spec */
+ pdimm->mirrored_dimm = 0;
+ pdimm->registered_dimm = 0;
+ switch (spd->module_type & DDR4_SPD_MODULETYPE_MASK) {
+ case DDR4_SPD_MODULETYPE_RDIMM:
+ /* Registered/buffered DIMMs */
+ pdimm->registered_dimm = 1;
+ if (spd->mod_section.registered.reg_map & 0x1)
+ pdimm->mirrored_dimm = 1;
+ val = spd->mod_section.registered.ca_stren;
+ pdimm->rcw[3] = val >> 4;
+ pdimm->rcw[4] = ((val & 0x3) << 2) | ((val & 0xc) >> 2);
+ val = spd->mod_section.registered.clk_stren;
+ pdimm->rcw[5] = ((val & 0x3) << 2) | ((val & 0xc) >> 2);
+ /* Not all in SPD. For convience only. Boards may overwrite. */
+ pdimm->rcw[6] = 0xf;
+ /*
+ * A17 only used for 16Gb and above devices.
+ * C[2:0] only used for 3DS.
+ */
+ pdimm->rcw[8] = pdimm->die_density >= 0x6 ? 0x0 : 0x8 |
+ (pdimm->package_3ds > 0x3 ? 0x0 :
+ (pdimm->package_3ds > 0x1 ? 0x1 :
+ (pdimm->package_3ds > 0 ? 0x2 : 0x3)));
+ if (pdimm->package_3ds || pdimm->n_ranks != 4)
+ pdimm->rcw[13] = 0xc;
+ else
+ pdimm->rcw[13] = 0xd; /* Fix encoded by board */
+
+ break;
+
+ case DDR4_SPD_MODULETYPE_UDIMM:
+ case DDR4_SPD_MODULETYPE_SO_DIMM:
+ /* Unbuffered DIMMs */
+ if (spd->mod_section.unbuffered.addr_mapping & 0x1)
+ pdimm->mirrored_dimm = 1;
+ if ((spd->mod_section.unbuffered.mod_height & 0xe0) == 0 &&
+ (spd->mod_section.unbuffered.ref_raw_card == 0x04)) {
+ /* Fix SPD error found on DIMMs with raw card E0 */
+ for (i = 0; i < 18; i++) {
+ if (spd->mapping[i] == udimm_rc_e_dq[i])
+ continue;
+ spd_error = 1;
+ debug("SPD byte %d: 0x%x, should be 0x%x\n",
+ 60 + i, spd->mapping[i],
+ udimm_rc_e_dq[i]);
+ ptr = (u8 *)&spd->mapping[i];
+ *ptr = udimm_rc_e_dq[i];
+ }
+ if (spd_error)
+ printf("SPD DQ mapping error fixed\n");
+ }
+ break;
+
+ default:
+ printf("unknown module_type 0x%02X\n", spd->module_type);
+ return 1;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
+ pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
+ pdimm->bank_addr_bits = (spd->density_banks >> 4) & 0x3;
+ pdimm->bank_group_bits = (spd->density_banks >> 6) & 0x3;
+
+ /*
+ * The SPD spec has not the ECC bit,
+ * We consider the DIMM as ECC capability
+ * when the extension bus exist
+ */
+ if (pdimm->ec_sdram_width)
+ pdimm->edc_config = 0x02;
+ else
+ pdimm->edc_config = 0x00;
+
+ /*
+ * The SPD spec has not the burst length byte
+ * but DDR4 spec has nature BL8 and BC4,
+ * BL8 -bit3, BC4 -bit2
+ */
+ pdimm->burst_lengths_bitmask = 0x0c;
+
+ /* MTB - medium timebase
+ * The MTB in the SPD spec is 125ps,
+ *
+ * FTB - fine timebase
+ * use 1/10th of ps as our unit to avoid floating point
+ * eg, 10 for 1ps, 25 for 2.5ps, 50 for 5ps
+ */
+ if ((spd->timebases & 0xf) == 0x0) {
+ pdimm->mtb_ps = 125;
+ pdimm->ftb_10th_ps = 10;
+
+ } else {
+ printf("Unknown Timebases\n");
+ }
+
+ /* sdram minimum cycle time */
+ pdimm->tckmin_x_ps = spd_to_ps(spd->tck_min, spd->fine_tck_min);
+
+ /* sdram max cycle time */
+ pdimm->tckmax_ps = spd_to_ps(spd->tck_max, spd->fine_tck_max);
+
+ /*
+ * CAS latency supported
+ * bit0 - CL7
+ * bit4 - CL11
+ * bit8 - CL15
+ * bit12- CL19
+ * bit16- CL23
+ */
+ pdimm->caslat_x = (spd->caslat_b1 << 7) |
+ (spd->caslat_b2 << 15) |
+ (spd->caslat_b3 << 23);
+
+ BUG_ON(spd->caslat_b4 != 0);
+
+ /*
+ * min CAS latency time
+ */
+ pdimm->taa_ps = spd_to_ps(spd->taa_min, spd->fine_taa_min);
+
+ /*
+ * min RAS to CAS delay time
+ */
+ pdimm->trcd_ps = spd_to_ps(spd->trcd_min, spd->fine_trcd_min);
+
+ /*
+ * Min Row Precharge Delay Time
+ */
+ pdimm->trp_ps = spd_to_ps(spd->trp_min, spd->fine_trp_min);
+
+ /* min active to precharge delay time */
+ pdimm->tras_ps = (((spd->tras_trc_ext & 0xf) << 8) +
+ spd->tras_min_lsb) * pdimm->mtb_ps;
+
+ /* min active to actice/refresh delay time */
+ pdimm->trc_ps = spd_to_ps((((spd->tras_trc_ext & 0xf0) << 4) +
+ spd->trc_min_lsb), spd->fine_trc_min);
+ /* Min Refresh Recovery Delay Time */
+ pdimm->trfc1_ps = ((spd->trfc1_min_msb << 8) | (spd->trfc1_min_lsb)) *
+ pdimm->mtb_ps;
+ pdimm->trfc2_ps = ((spd->trfc2_min_msb << 8) | (spd->trfc2_min_lsb)) *
+ pdimm->mtb_ps;
+ pdimm->trfc4_ps = ((spd->trfc4_min_msb << 8) | (spd->trfc4_min_lsb)) *
+ pdimm->mtb_ps;
+ /* min four active window delay time */
+ pdimm->tfaw_ps = (((spd->tfaw_msb & 0xf) << 8) | spd->tfaw_min) *
+ pdimm->mtb_ps;
+
+ /* min row active to row active delay time, different bank group */
+ pdimm->trrds_ps = spd_to_ps(spd->trrds_min, spd->fine_trrds_min);
+ /* min row active to row active delay time, same bank group */
+ pdimm->trrdl_ps = spd_to_ps(spd->trrdl_min, spd->fine_trrdl_min);
+ /* min CAS to CAS Delay Time (tCCD_Lmin), same bank group */
+ pdimm->tccdl_ps = spd_to_ps(spd->tccdl_min, spd->fine_tccdl_min);
+
+ if (pdimm->package_3ds) {
+ if (pdimm->die_density <= 0x4) {
+ pdimm->trfc_slr_ps = 260000;
+ } else if (pdimm->die_density <= 0x5) {
+ pdimm->trfc_slr_ps = 350000;
+ } else {
+ printf("WARN: Unsupported logical rank density 0x%x\n",
+ pdimm->die_density);
+ }
+ }
+
+ /*
+ * Average periodic refresh interval
+ * tREFI = 7.8 us at normal temperature range
+ */
+ pdimm->refresh_rate_ps = 7800000;
+
+ for (i = 0; i < 18; i++)
+ pdimm->dq_mapping[i] = spd->mapping[i];
+
+ pdimm->dq_mapping_ors = ((spd->mapping[0] >> 6) & 0x3) == 0 ? 1 : 0;
+
+ return 0;
+}
diff --git a/drivers/ddr/fsl/fsl_ddr.h b/drivers/ddr/fsl/fsl_ddr.h
new file mode 100644
index 0000000000..ee6069d812
--- /dev/null
+++ b/drivers/ddr/fsl/fsl_ddr.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2008-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP Semiconductor
+ */
+
+#ifndef FSL_DDR_H
+#define FSL_DDR_H
+
+#include <ddr_spd.h>
+#include <soc/fsl/fsl_immap.h>
+
+#define DDR_BL4 4 /* burst length 4 */
+#define DDR_BC4 DDR_BL4 /* burst chop for ddr3 */
+#define DDR_OTF 6 /* on-the-fly BC4 and BL8 */
+#define DDR_BL8 8 /* burst length 8 */
+
+#define DDR3_RTT_OFF 0
+#define DDR3_RTT_60_OHM 1 /* RTT_Nom = RZQ/4 */
+#define DDR3_RTT_120_OHM 2 /* RTT_Nom = RZQ/2 */
+#define DDR3_RTT_40_OHM 3 /* RTT_Nom = RZQ/6 */
+#define DDR3_RTT_20_OHM 4 /* RTT_Nom = RZQ/12 */
+#define DDR3_RTT_30_OHM 5 /* RTT_Nom = RZQ/8 */
+
+#define DDR4_RTT_OFF 0
+#define DDR4_RTT_60_OHM 1 /* RZQ/4 */
+#define DDR4_RTT_120_OHM 2 /* RZQ/2 */
+#define DDR4_RTT_40_OHM 3 /* RZQ/6 */
+#define DDR4_RTT_240_OHM 4 /* RZQ/1 */
+#define DDR4_RTT_48_OHM 5 /* RZQ/5 */
+#define DDR4_RTT_80_OHM 6 /* RZQ/3 */
+#define DDR4_RTT_34_OHM 7 /* RZQ/7 */
+
+#define DDR2_RTT_OFF 0
+#define DDR2_RTT_75_OHM 1
+#define DDR2_RTT_150_OHM 2
+#define DDR2_RTT_50_OHM 3
+
+#define FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR1 1
+#define FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR2 3
+
+#define FSL_DDR_ODT_NEVER 0x0
+#define FSL_DDR_ODT_CS 0x1
+#define FSL_DDR_ODT_ALL_OTHER_CS 0x2
+#define FSL_DDR_ODT_OTHER_DIMM 0x3
+#define FSL_DDR_ODT_ALL 0x4
+#define FSL_DDR_ODT_SAME_DIMM 0x5
+#define FSL_DDR_ODT_CS_AND_OTHER_DIMM 0x6
+#define FSL_DDR_ODT_OTHER_CS_ONSAMEDIMM 0x7
+
+/* define bank(chip select) interleaving mode */
+#define FSL_DDR_CS0_CS1 0x40
+#define FSL_DDR_CS2_CS3 0x20
+#define FSL_DDR_CS0_CS1_AND_CS2_CS3 (FSL_DDR_CS0_CS1 | FSL_DDR_CS2_CS3)
+#define FSL_DDR_CS0_CS1_CS2_CS3 (FSL_DDR_CS0_CS1_AND_CS2_CS3 | 0x04)
+
+/* define memory controller interleaving mode */
+#define FSL_DDR_CACHE_LINE_INTERLEAVING 0x0
+#define FSL_DDR_PAGE_INTERLEAVING 0x1
+#define FSL_DDR_BANK_INTERLEAVING 0x2
+#define FSL_DDR_SUPERBANK_INTERLEAVING 0x3
+#define FSL_DDR_256B_INTERLEAVING 0x8
+#define FSL_DDR_3WAY_1KB_INTERLEAVING 0xA
+#define FSL_DDR_3WAY_4KB_INTERLEAVING 0xC
+#define FSL_DDR_3WAY_8KB_INTERLEAVING 0xD
+/* placeholder for 4-way interleaving */
+#define FSL_DDR_4WAY_1KB_INTERLEAVING 0x1A
+#define FSL_DDR_4WAY_4KB_INTERLEAVING 0x1C
+#define FSL_DDR_4WAY_8KB_INTERLEAVING 0x1D
+
+#define SDRAM_CS_CONFIG_EN 0x80000000
+
+/* DDR_SDRAM_CFG - DDR SDRAM Control Configuration
+ */
+#define SDRAM_CFG_MEM_EN 0x80000000
+#define SDRAM_CFG_SREN 0x40000000
+#define SDRAM_CFG_ECC_EN 0x20000000
+#define SDRAM_CFG_RD_EN 0x10000000
+#define SDRAM_CFG_SDRAM_TYPE_DDR1 0x02000000
+#define SDRAM_CFG_SDRAM_TYPE_DDR2 0x03000000
+#define SDRAM_CFG_SDRAM_TYPE_MASK 0x07000000
+#define SDRAM_CFG_SDRAM_TYPE_SHIFT 24
+#define SDRAM_CFG_DYN_PWR 0x00200000
+#define SDRAM_CFG_DBW_MASK 0x00180000
+#define SDRAM_CFG_DBW_SHIFT 19
+#define SDRAM_CFG_32_BE 0x00080000
+#define SDRAM_CFG_16_BE 0x00100000
+#define SDRAM_CFG_8_BE 0x00040000
+#define SDRAM_CFG_NCAP 0x00020000
+#define SDRAM_CFG_2T_EN 0x00008000
+#define SDRAM_CFG_BI 0x00000001
+
+#define SDRAM_CFG2_FRC_SR 0x80000000
+#define SDRAM_CFG2_D_INIT 0x00000010
+#define SDRAM_CFG2_AP_EN 0x00000020
+#define SDRAM_CFG2_ODT_CFG_MASK 0x00600000
+#define SDRAM_CFG2_ODT_NEVER 0
+#define SDRAM_CFG2_ODT_ONLY_WRITE 1
+#define SDRAM_CFG2_ODT_ONLY_READ 2
+#define SDRAM_CFG2_ODT_ALWAYS 3
+
+#define SDRAM_INTERVAL_BSTOPRE 0x3FFF
+#define TIMING_CFG_2_CPO_MASK 0x0F800000
+
+#define RD_TO_PRE_MASK 0xf
+#define RD_TO_PRE_SHIFT 13
+#define WR_DATA_DELAY_MASK 0xf
+#define WR_DATA_DELAY_SHIFT 9
+
+/* DDR_EOR register */
+#define DDR_EOR_RD_REOD_DIS 0x07000000
+#define DDR_EOR_WD_REOD_DIS 0x00100000
+
+/* DDR_MD_CNTL */
+#define MD_CNTL_MD_EN 0x80000000
+#define MD_CNTL_CS_SEL_CS0 0x00000000
+#define MD_CNTL_CS_SEL_CS1 0x10000000
+#define MD_CNTL_CS_SEL_CS2 0x20000000
+#define MD_CNTL_CS_SEL_CS3 0x30000000
+#define MD_CNTL_CS_SEL_CS0_CS1 0x40000000
+#define MD_CNTL_CS_SEL_CS2_CS3 0x50000000
+#define MD_CNTL_MD_SEL_MR 0x00000000
+#define MD_CNTL_MD_SEL_EMR 0x01000000
+#define MD_CNTL_MD_SEL_EMR2 0x02000000
+#define MD_CNTL_MD_SEL_EMR3 0x03000000
+#define MD_CNTL_SET_REF 0x00800000
+#define MD_CNTL_SET_PRE 0x00400000
+#define MD_CNTL_CKE_CNTL_LOW 0x00100000
+#define MD_CNTL_CKE_CNTL_HIGH 0x00200000
+#define MD_CNTL_WRCW 0x00080000
+#define MD_CNTL_MD_VALUE(x) (x & 0x0000FFFF)
+#define MD_CNTL_CS_SEL(x) (((x) & 0x7) << 28)
+#define MD_CNTL_MD_SEL(x) (((x) & 0xf) << 24)
+
+/* DDR_CDR1 */
+#define DDR_CDR1_DHC_EN 0x80000000
+#define DDR_CDR1_V0PT9_EN 0x40000000
+#define DDR_CDR1_ODT_SHIFT 17
+#define DDR_CDR1_ODT_MASK 0x6
+#define DDR_CDR2_ODT_MASK 0x1
+#define DDR_CDR1_ODT(x) ((x & DDR_CDR1_ODT_MASK) << DDR_CDR1_ODT_SHIFT)
+#define DDR_CDR2_ODT(x) (x & DDR_CDR2_ODT_MASK)
+#define DDR_CDR2_VREF_OVRD(x) (0x00008080 | ((((x) - 37) & 0x3F) << 8))
+#define DDR_CDR2_VREF_TRAIN_EN 0x00000080
+#define DDR_CDR2_VREF_RANGE_2 0x00000040
+
+/* DDR ERR_DISABLE */
+#define DDR_ERR_DISABLE_APED (1 << 8) /* Address parity error disable */
+
+/* Mode Registers */
+#define DDR_MR5_CA_PARITY_LAT_4_CLK 0x1 /* for DDR4-1600/1866/2133 */
+#define DDR_MR5_CA_PARITY_LAT_5_CLK 0x2 /* for DDR4-2400 */
+
+/* DEBUG_26 register */
+#define DDR_CAS_TO_PRE_SUB_MASK 0x0000f000 /* CAS to preamble subtract value */
+#define DDR_CAS_TO_PRE_SUB_SHIFT 12
+
+/* DEBUG_29 register */
+#define DDR_TX_BD_DIS (1 << 10) /* Transmit Bit Deskew Disable */
+
+static inline int is_ddr1(const memctl_options_t *popts)
+{
+ return IS_ENABLED(CONFIG_DDR_FSL_DDR1) &&
+ popts->ddrtype == SDRAM_TYPE_DDR1;
+}
+
+static inline int is_ddr2(const memctl_options_t *popts)
+{
+ return IS_ENABLED(CONFIG_DDR_FSL_DDR2) &&
+ popts->ddrtype == SDRAM_TYPE_DDR2;
+}
+
+static inline int is_ddr3(const memctl_options_t *popts)
+{
+ return IS_ENABLED(CONFIG_DDR_FSL_DDR3) &&
+ popts->ddrtype == SDRAM_TYPE_DDR3;
+}
+
+static inline int is_ddr4(const memctl_options_t *popts)
+{
+ return IS_ENABLED(CONFIG_DDR_FSL_DDR4) &&
+ popts->ddrtype == SDRAM_TYPE_DDR4;
+}
+
+static inline int is_ddr3_4(const memctl_options_t *popts)
+{
+ return is_ddr3(popts) || is_ddr4(popts);
+}
+
+struct fsl_ddr_info;
+
+phys_size_t fsl_ddr_sdram(struct fsl_ddr_info *pinfo);
+u32 fsl_ddr_get_intl3r(void);
+
+void board_mem_sleep_setup(void);
+static inline bool is_warm_boot(void)
+{
+ return false;
+}
+
+int fsl_dp_resume(void);
+
+struct fsl_ddr_controller;
+
+u32 fsl_ddr_get_version(struct fsl_ddr_controller *c);
+
+unsigned int ddr1_compute_dimm_parameters(struct fsl_ddr_controller *c,
+ const struct ddr1_spd_eeprom *spd,
+ struct dimm_params *pdimm);
+unsigned int ddr2_compute_dimm_parameters(struct fsl_ddr_controller *c,
+ const struct ddr2_spd_eeprom *spd,
+ struct dimm_params *pdimm);
+unsigned int ddr3_compute_dimm_parameters(struct fsl_ddr_controller *c,
+ const struct ddr3_spd_eeprom *spd,
+ struct dimm_params *pdimm);
+unsigned int ddr4_compute_dimm_parameters(struct fsl_ddr_controller *c,
+ const struct ddr4_spd_eeprom *spd,
+ struct dimm_params *pdimm);
+void fsl_ddr_set_intl3r(const unsigned int granule_size);
+
+unsigned int compute_fsl_memctl_config_regs(struct fsl_ddr_controller *c);
+unsigned int compute_lowest_common_dimm_parameters(struct fsl_ddr_controller *c);
+unsigned int populate_memctl_options(struct fsl_ddr_controller *c);
+void check_interleaving_options(struct fsl_ddr_info *pinfo);
+
+unsigned int mclk_to_picos(struct fsl_ddr_controller *c, unsigned int mclk);
+unsigned int get_memory_clk_period_ps(struct fsl_ddr_controller *c);
+unsigned int picos_to_mclk(struct fsl_ddr_controller *c, unsigned int picos);
+
+void fsl_ddr_set_memctl_regs(struct fsl_ddr_controller *c, int step);
+
+void erratum_a009942_check_cpo(void);
+
+#endif
diff --git a/drivers/ddr/fsl/fsl_ddr_gen4.c b/drivers/ddr/fsl/fsl_ddr_gen4.c
new file mode 100644
index 0000000000..ac68e4ff03
--- /dev/null
+++ b/drivers/ddr/fsl/fsl_ddr_gen4.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <soc/fsl/fsl_immap.h>
+#include <soc/fsl/immap_lsch2.h>
+#include <asm/system.h>
+#include "fsl_ddr.h"
+
+#define CTLR_INTLV_MASK 0x20000000
+
+static void set_wait_for_bits_clear(void *ptr, u32 value, u32 bits)
+{
+ int timeout = 1000;
+
+ ddr_out32(ptr, value);
+
+ while (ddr_in32(ptr) & bits) {
+ udelay(100);
+ timeout--;
+ }
+ if (timeout <= 0)
+ printf("Error: wait for clear timeout.\n");
+}
+
+/*
+ * regs has the to-be-set values for DDR controller registers
+ * ctrl_num is the DDR controller number
+ * step: 0 goes through the initialization in one pass
+ * 1 sets registers and returns before enabling controller
+ * 2 resumes from step 1 and continues to initialize
+ * Dividing the initialization to two steps to deassert DDR reset signal
+ * to comply with JEDEC specs for RDIMMs.
+ */
+void fsl_ddr_set_memctl_regs(struct fsl_ddr_controller *c, int step)
+{
+ struct ccsr_ddr __iomem *ddr = c->base;
+ const fsl_ddr_cfg_regs_t *regs = &c->fsl_ddr_config_reg;
+ unsigned int i, bus_width;
+ u32 temp32;
+ u32 total_gb_size_per_controller;
+ int timeout;
+ int mod_bnds = 0;
+ u32 mr6;
+ u32 vref_seq1[3] = {0x80, 0x96, 0x16}; /* for range 1 */
+ u32 vref_seq2[3] = {0xc0, 0xf0, 0x70}; /* for range 2 */
+ u32 *vref_seq = vref_seq1;
+ u32 mtcr, err_detect, err_sbe;
+ u32 cs0_bnds, cs1_bnds, cs2_bnds, cs3_bnds, cs0_config;
+ mod_bnds = regs->cs[0].config & CTLR_INTLV_MASK;
+
+ if (step == 2)
+ goto step2;
+
+ /* Set cdr1 first in case 0.9v VDD is enabled for some SoCs*/
+ ddr_out32(&ddr->ddr_cdr1, regs->ddr_cdr1);
+
+ if (regs->ddr_eor)
+ ddr_out32(&ddr->eor, regs->ddr_eor);
+
+ ddr_out32(&ddr->sdram_clk_cntl, regs->ddr_sdram_clk_cntl);
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ if (i == 0) {
+ if (mod_bnds) {
+ debug("modified bnds\n");
+ ddr_out32(&ddr->cs0_bnds,
+ (regs->cs[i].bnds & 0xfffefffe) >> 1);
+ ddr_out32(&ddr->cs0_config,
+ (regs->cs[i].config &
+ ~CTLR_INTLV_MASK));
+ } else {
+ ddr_out32(&ddr->cs0_bnds, regs->cs[i].bnds);
+ ddr_out32(&ddr->cs0_config, regs->cs[i].config);
+ }
+ ddr_out32(&ddr->cs0_config_2, regs->cs[i].config_2);
+
+ } else if (i == 1) {
+ if (mod_bnds) {
+ ddr_out32(&ddr->cs1_bnds,
+ (regs->cs[i].bnds & 0xfffefffe) >> 1);
+ } else {
+ ddr_out32(&ddr->cs1_bnds, regs->cs[i].bnds);
+ }
+ ddr_out32(&ddr->cs1_config, regs->cs[i].config);
+ ddr_out32(&ddr->cs1_config_2, regs->cs[i].config_2);
+
+ } else if (i == 2) {
+ if (mod_bnds) {
+ ddr_out32(&ddr->cs2_bnds,
+ (regs->cs[i].bnds & 0xfffefffe) >> 1);
+ } else {
+ ddr_out32(&ddr->cs2_bnds, regs->cs[i].bnds);
+ }
+ ddr_out32(&ddr->cs2_config, regs->cs[i].config);
+ ddr_out32(&ddr->cs2_config_2, regs->cs[i].config_2);
+
+ } else if (i == 3) {
+ if (mod_bnds) {
+ ddr_out32(&ddr->cs3_bnds,
+ (regs->cs[i].bnds & 0xfffefffe) >> 1);
+ } else {
+ ddr_out32(&ddr->cs3_bnds, regs->cs[i].bnds);
+ }
+ ddr_out32(&ddr->cs3_config, regs->cs[i].config);
+ ddr_out32(&ddr->cs3_config_2, regs->cs[i].config_2);
+ }
+ }
+
+ ddr_out32(&ddr->timing_cfg_3, regs->timing_cfg_3);
+ ddr_out32(&ddr->timing_cfg_0, regs->timing_cfg_0);
+ ddr_out32(&ddr->timing_cfg_1, regs->timing_cfg_1);
+ ddr_out32(&ddr->timing_cfg_2, regs->timing_cfg_2);
+ ddr_out32(&ddr->timing_cfg_4, regs->timing_cfg_4);
+ ddr_out32(&ddr->timing_cfg_5, regs->timing_cfg_5);
+ ddr_out32(&ddr->timing_cfg_6, regs->timing_cfg_6);
+ ddr_out32(&ddr->timing_cfg_7, regs->timing_cfg_7);
+ ddr_out32(&ddr->timing_cfg_8, regs->timing_cfg_8);
+ ddr_out32(&ddr->timing_cfg_9, regs->timing_cfg_9);
+ ddr_out32(&ddr->ddr_zq_cntl, regs->ddr_zq_cntl);
+ ddr_out32(&ddr->dq_map_0, regs->dq_map_0);
+ ddr_out32(&ddr->dq_map_1, regs->dq_map_1);
+ ddr_out32(&ddr->dq_map_2, regs->dq_map_2);
+ ddr_out32(&ddr->dq_map_3, regs->dq_map_3);
+ ddr_out32(&ddr->sdram_cfg_3, regs->ddr_sdram_cfg_3);
+ ddr_out32(&ddr->sdram_mode, regs->ddr_sdram_mode);
+ ddr_out32(&ddr->sdram_mode_2, regs->ddr_sdram_mode_2);
+ ddr_out32(&ddr->sdram_mode_3, regs->ddr_sdram_mode_3);
+ ddr_out32(&ddr->sdram_mode_4, regs->ddr_sdram_mode_4);
+ ddr_out32(&ddr->sdram_mode_5, regs->ddr_sdram_mode_5);
+ ddr_out32(&ddr->sdram_mode_6, regs->ddr_sdram_mode_6);
+ ddr_out32(&ddr->sdram_mode_7, regs->ddr_sdram_mode_7);
+ ddr_out32(&ddr->sdram_mode_8, regs->ddr_sdram_mode_8);
+ ddr_out32(&ddr->sdram_mode_9, regs->ddr_sdram_mode_9);
+ ddr_out32(&ddr->sdram_mode_10, regs->ddr_sdram_mode_10);
+ ddr_out32(&ddr->sdram_mode_11, regs->ddr_sdram_mode_11);
+ ddr_out32(&ddr->sdram_mode_12, regs->ddr_sdram_mode_12);
+ ddr_out32(&ddr->sdram_mode_13, regs->ddr_sdram_mode_13);
+ ddr_out32(&ddr->sdram_mode_14, regs->ddr_sdram_mode_14);
+ ddr_out32(&ddr->sdram_mode_15, regs->ddr_sdram_mode_15);
+ ddr_out32(&ddr->sdram_mode_16, regs->ddr_sdram_mode_16);
+ ddr_out32(&ddr->sdram_md_cntl, regs->ddr_sdram_md_cntl);
+
+ if (c->erratum_A009663)
+ ddr_out32(&ddr->sdram_interval,
+ regs->ddr_sdram_interval & ~SDRAM_INTERVAL_BSTOPRE);
+ else
+ ddr_out32(&ddr->sdram_interval, regs->ddr_sdram_interval);
+
+ ddr_out32(&ddr->sdram_data_init, regs->ddr_data_init);
+ ddr_out32(&ddr->ddr_wrlvl_cntl, regs->ddr_wrlvl_cntl);
+ if (regs->ddr_wrlvl_cntl_2)
+ ddr_out32(&ddr->ddr_wrlvl_cntl_2, regs->ddr_wrlvl_cntl_2);
+ if (regs->ddr_wrlvl_cntl_3)
+ ddr_out32(&ddr->ddr_wrlvl_cntl_3, regs->ddr_wrlvl_cntl_3);
+
+ ddr_out32(&ddr->ddr_sr_cntr, regs->ddr_sr_cntr);
+ ddr_out32(&ddr->ddr_sdram_rcw_1, regs->ddr_sdram_rcw_1);
+ ddr_out32(&ddr->ddr_sdram_rcw_2, regs->ddr_sdram_rcw_2);
+ ddr_out32(&ddr->ddr_sdram_rcw_3, regs->ddr_sdram_rcw_3);
+ ddr_out32(&ddr->ddr_sdram_rcw_4, regs->ddr_sdram_rcw_4);
+ ddr_out32(&ddr->ddr_sdram_rcw_5, regs->ddr_sdram_rcw_5);
+ ddr_out32(&ddr->ddr_sdram_rcw_6, regs->ddr_sdram_rcw_6);
+
+ if (is_warm_boot()) {
+ ddr_out32(&ddr->sdram_cfg_2,
+ regs->ddr_sdram_cfg_2 & ~SDRAM_CFG2_D_INIT);
+ ddr_out32(&ddr->init_addr, 0x80000000); /* FIXME */
+ ddr_out32(&ddr->init_ext_addr, DDR_INIT_ADDR_EXT_UIA);
+
+ /* DRAM VRef will not be trained */
+ ddr_out32(&ddr->ddr_cdr2,
+ regs->ddr_cdr2 & ~DDR_CDR2_VREF_TRAIN_EN);
+ } else {
+ ddr_out32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2);
+ ddr_out32(&ddr->init_addr, regs->ddr_init_addr);
+ ddr_out32(&ddr->init_ext_addr, regs->ddr_init_ext_addr);
+ ddr_out32(&ddr->ddr_cdr2, regs->ddr_cdr2);
+ }
+
+ /* part 1 of 2 */
+ if (c->erratum_A009803) {
+ if (regs->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN) {
+ if (regs->ddr_sdram_cfg & SDRAM_CFG_RD_EN) { /* for RDIMM */
+ ddr_out32(&ddr->ddr_sdram_rcw_2,
+ regs->ddr_sdram_rcw_2 & ~0xf0);
+ }
+ ddr_out32(&ddr->err_disable, regs->err_disable |
+ DDR_ERR_DISABLE_APED);
+ }
+ } else {
+ ddr_out32(&ddr->err_disable, regs->err_disable);
+ }
+ ddr_out32(&ddr->err_int_en, regs->err_int_en);
+ for (i = 0; i < 64; i++) {
+ if (regs->debug[i]) {
+ debug("Write to debug_%d as %08x\n",
+ i+1, regs->debug[i]);
+ ddr_out32(&ddr->debug[i], regs->debug[i]);
+ }
+ }
+
+ if (c->erratum_A008511) {
+ /* Part 1 of 2 */
+ if (fsl_ddr_get_version(c) == 0x50200) {
+ /* Disable DRAM VRef training */
+ ddr_out32(&ddr->ddr_cdr2,
+ regs->ddr_cdr2 & ~DDR_CDR2_VREF_TRAIN_EN);
+ /* disable transmit bit deskew */
+ temp32 = ddr_in32(&ddr->debug[28]);
+ temp32 |= DDR_TX_BD_DIS;
+ ddr_out32(&ddr->debug[28], temp32);
+ ddr_out32(&ddr->debug[25], 0x9000);
+ } else if (fsl_ddr_get_version(c) == 0x50201) {
+ /* Output enable forced off */
+ ddr_out32(&ddr->debug[37], 1 << 31);
+ /* Enable Vref training */
+ ddr_out32(&ddr->ddr_cdr2,
+ regs->ddr_cdr2 | DDR_CDR2_VREF_TRAIN_EN);
+ } else {
+ debug("Erratum A008511 doesn't apply.\n");
+ }
+ }
+
+ if (c->erratum_A009803 || c->erratum_A008511)
+ /* Disable D_INIT */
+ ddr_out32(&ddr->sdram_cfg_2,
+ regs->ddr_sdram_cfg_2 & ~SDRAM_CFG2_D_INIT);
+
+ if (c->erratum_A009801) {
+ temp32 = ddr_in32(&ddr->debug[25]);
+ temp32 &= ~DDR_CAS_TO_PRE_SUB_MASK;
+ temp32 |= 9 << DDR_CAS_TO_PRE_SUB_SHIFT;
+ ddr_out32(&ddr->debug[25], temp32);
+ }
+
+ if (c->erratum_A010165) {
+ temp32 = c->ddr_freq / 1000000;
+ if ((temp32 > 1900) && (temp32 < 2300)) {
+ temp32 = ddr_in32(&ddr->debug[28]);
+ ddr_out32(&ddr->debug[28], temp32 | 0x000a0000);
+ }
+ }
+
+ /*
+ * For RDIMMs, JEDEC spec requires clocks to be stable before reset is
+ * deasserted. Clocks start when any chip select is enabled and clock
+ * control register is set. Because all DDR components are connected to
+ * one reset signal, this needs to be done in two steps. Step 1 is to
+ * get the clocks started. Step 2 resumes after reset signal is
+ * deasserted.
+ */
+ if (step == 1) {
+ udelay(200);
+ return;
+ }
+
+step2:
+ /* Set, but do not enable the memory */
+ temp32 = regs->ddr_sdram_cfg;
+ temp32 &= ~(SDRAM_CFG_MEM_EN);
+ ddr_out32(&ddr->sdram_cfg, temp32);
+
+ /*
+ * 500 painful micro-seconds must elapse between
+ * the DDR clock setup and the DDR config enable.
+ * DDR2 need 200 us, and DDR3 need 500 us from spec,
+ * we choose the max, that is 500 us for all of case.
+ */
+ udelay(500);
+ dsb();
+ isb();
+
+ if (is_warm_boot()) {
+ /* enter self-refresh */
+ temp32 = ddr_in32(&ddr->sdram_cfg_2);
+ temp32 |= SDRAM_CFG2_FRC_SR;
+ ddr_out32(&ddr->sdram_cfg_2, temp32);
+ /* do board specific memory setup */
+ board_mem_sleep_setup();
+
+ temp32 = (ddr_in32(&ddr->sdram_cfg) | SDRAM_CFG_BI);
+ } else {
+ temp32 = ddr_in32(&ddr->sdram_cfg) & ~SDRAM_CFG_BI;
+ }
+
+ /* Let the controller go */
+ ddr_out32(&ddr->sdram_cfg, temp32 | SDRAM_CFG_MEM_EN);
+ dsb();
+ isb();
+
+ if (c->erratum_A008511 || c->erratum_A009803) {
+ /* Part 2 of 2 */
+ timeout = 40;
+ /* Wait for idle. D_INIT needs to be cleared earlier, or timeout */
+ while (!(ddr_in32(&ddr->debug[1]) & 0x2) && timeout > 0) {
+ udelay(1000);
+ timeout--;
+ }
+ if (timeout <= 0) {
+ printf("Controler %d timeout, debug_2 = %x\n",
+ c->num, ddr_in32(&ddr->debug[1]));
+ }
+ }
+
+ if (c->erratum_A008511) {
+ /* This erraum only applies to verion 5.2.0 */
+ if (fsl_ddr_get_version(c) == 0x50200) {
+ /* The vref setting sequence is different for range 2 */
+ if (regs->ddr_cdr2 & DDR_CDR2_VREF_RANGE_2)
+ vref_seq = vref_seq2;
+
+ /* Set VREF */
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ if (!(regs->cs[i].config & SDRAM_CS_CONFIG_EN))
+ continue;
+
+ mr6 = (regs->ddr_sdram_mode_10 >> 16) |
+ MD_CNTL_MD_EN |
+ MD_CNTL_CS_SEL(i) |
+ MD_CNTL_MD_SEL(6) |
+ 0x00200000;
+ temp32 = mr6 | vref_seq[0];
+ set_wait_for_bits_clear(&ddr->sdram_md_cntl,
+ temp32, MD_CNTL_MD_EN);
+ udelay(1);
+ debug("MR6 = 0x%08x\n", temp32);
+ temp32 = mr6 | vref_seq[1];
+ set_wait_for_bits_clear(&ddr->sdram_md_cntl,
+ temp32, MD_CNTL_MD_EN);
+ udelay(1);
+ debug("MR6 = 0x%08x\n", temp32);
+ temp32 = mr6 | vref_seq[2];
+ set_wait_for_bits_clear(&ddr->sdram_md_cntl,
+ temp32, MD_CNTL_MD_EN);
+ udelay(1);
+ debug("MR6 = 0x%08x\n", temp32);
+ }
+ ddr_out32(&ddr->sdram_md_cntl, 0);
+ temp32 = ddr_in32(&ddr->debug[28]);
+ temp32 &= ~DDR_TX_BD_DIS; /* Enable deskew */
+ ddr_out32(&ddr->debug[28], temp32);
+ ddr_out32(&ddr->debug[1], 0x400); /* restart deskew */
+ /* wait for idle */
+ timeout = 40;
+ while (!(ddr_in32(&ddr->debug[1]) & 0x2) && timeout > 0) {
+ udelay(1000);
+ timeout--;
+ }
+ if (timeout <= 0) {
+ printf("Controler %d timeout, debug_2 = %x\n",
+ c->num, ddr_in32(&ddr->debug[1]));
+ }
+ }
+ }
+
+ if (c->erratum_A009803 && regs->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN) {
+ /* if it's RDIMM */
+ if (regs->ddr_sdram_cfg & SDRAM_CFG_RD_EN) {
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ if (!(regs->cs[i].config & SDRAM_CS_CONFIG_EN))
+ continue;
+ set_wait_for_bits_clear(&ddr->sdram_md_cntl,
+ MD_CNTL_MD_EN |
+ MD_CNTL_CS_SEL(i) |
+ 0x070000ed,
+ MD_CNTL_MD_EN);
+ udelay(1);
+ }
+ }
+
+ ddr_out32(&ddr->err_disable,
+ regs->err_disable & ~DDR_ERR_DISABLE_APED);
+ }
+
+ /* Restore D_INIT */
+ ddr_out32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2);
+
+ total_gb_size_per_controller = 0;
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ if (!(regs->cs[i].config & 0x80000000))
+ continue;
+ total_gb_size_per_controller += 1 << (
+ ((regs->cs[i].config >> 14) & 0x3) + 2 +
+ ((regs->cs[i].config >> 8) & 0x7) + 12 +
+ ((regs->cs[i].config >> 4) & 0x3) + 0 +
+ ((regs->cs[i].config >> 0) & 0x7) + 8 +
+ ((regs->ddr_sdram_cfg_3 >> 4) & 0x3) +
+ 3 - ((regs->ddr_sdram_cfg >> 19) & 0x3) -
+ 26); /* minus 26 (count of 64M) */
+ }
+ /*
+ * total memory / bus width = transactions needed
+ * transactions needed / data rate = seconds
+ * to add plenty of buffer, double the time
+ * For example, 2GB on 666MT/s 64-bit bus takes about 402ms
+ * Let's wait for 800ms
+ */
+ bus_width = 3 - ((ddr_in32(&ddr->sdram_cfg) & SDRAM_CFG_DBW_MASK)
+ >> SDRAM_CFG_DBW_SHIFT);
+ timeout = ((total_gb_size_per_controller << (6 - bus_width)) * 100 /
+ (c->ddr_freq >> 20)) << 2;
+ total_gb_size_per_controller >>= 4; /* shift down to gb size */
+ debug("total %d GB\n", total_gb_size_per_controller);
+ debug("Need to wait up to %d * 10ms\n", timeout);
+
+ /* Poll DDR_SDRAM_CFG_2[D_INIT] bit until auto-data init is done. */
+ while ((ddr_in32(&ddr->sdram_cfg_2) & SDRAM_CFG2_D_INIT) &&
+ (timeout >= 0)) {
+ udelay(10000); /* throttle polling rate */
+ timeout--;
+ }
+
+ if (timeout <= 0)
+ printf("Waiting for D_INIT timeout. Memory may not work.\n");
+
+ if (mod_bnds) {
+ debug("Reset to original bnds\n");
+ ddr_out32(&ddr->cs0_bnds, regs->cs[0].bnds);
+ ddr_out32(&ddr->cs1_bnds, regs->cs[1].bnds);
+ ddr_out32(&ddr->cs2_bnds, regs->cs[2].bnds);
+ ddr_out32(&ddr->cs3_bnds, regs->cs[3].bnds);
+ ddr_out32(&ddr->cs0_config, regs->cs[0].config);
+ }
+
+ if (c->erratum_A009663)
+ ddr_out32(&ddr->sdram_interval, regs->ddr_sdram_interval);
+
+ if (is_warm_boot()) {
+ /* exit self-refresh */
+ temp32 = ddr_in32(&ddr->sdram_cfg_2);
+ temp32 &= ~SDRAM_CFG2_FRC_SR;
+ ddr_out32(&ddr->sdram_cfg_2, temp32);
+ }
+
+#define BIST_PATTERN1 0xFFFFFFFF
+#define BIST_PATTERN2 0x0
+#define BIST_CR 0x80010000
+#define BIST_CR_EN 0x80000000
+#define BIST_CR_STAT 0x00000001
+ /* Perform build-in test on memory. Three-way interleaving is not yet
+ * supported by this code. */
+ if (0) {
+ printf("Running BIST test. This will take a while...");
+ cs0_config = ddr_in32(&ddr->cs0_config);
+ cs0_bnds = ddr_in32(&ddr->cs0_bnds);
+ cs1_bnds = ddr_in32(&ddr->cs1_bnds);
+ cs2_bnds = ddr_in32(&ddr->cs2_bnds);
+ cs3_bnds = ddr_in32(&ddr->cs3_bnds);
+ if (cs0_config & CTLR_INTLV_MASK) {
+ /* set bnds to non-interleaving */
+ ddr_out32(&ddr->cs0_bnds, (cs0_bnds & 0xfffefffe) >> 1);
+ ddr_out32(&ddr->cs1_bnds, (cs1_bnds & 0xfffefffe) >> 1);
+ ddr_out32(&ddr->cs2_bnds, (cs2_bnds & 0xfffefffe) >> 1);
+ ddr_out32(&ddr->cs3_bnds, (cs3_bnds & 0xfffefffe) >> 1);
+ }
+ ddr_out32(&ddr->mtp1, BIST_PATTERN1);
+ ddr_out32(&ddr->mtp2, BIST_PATTERN1);
+ ddr_out32(&ddr->mtp3, BIST_PATTERN2);
+ ddr_out32(&ddr->mtp4, BIST_PATTERN2);
+ ddr_out32(&ddr->mtp5, BIST_PATTERN1);
+ ddr_out32(&ddr->mtp6, BIST_PATTERN1);
+ ddr_out32(&ddr->mtp7, BIST_PATTERN2);
+ ddr_out32(&ddr->mtp8, BIST_PATTERN2);
+ ddr_out32(&ddr->mtp9, BIST_PATTERN1);
+ ddr_out32(&ddr->mtp10, BIST_PATTERN2);
+ mtcr = BIST_CR;
+ ddr_out32(&ddr->mtcr, mtcr);
+ timeout = 100;
+ while (timeout > 0 && (mtcr & BIST_CR_EN)) {
+ mdelay(1000);
+ timeout--;
+ mtcr = ddr_in32(&ddr->mtcr);
+ }
+ if (timeout <= 0)
+ printf("Timeout\n");
+ else
+ printf("Done\n");
+ err_detect = ddr_in32(&ddr->err_detect);
+ err_sbe = ddr_in32(&ddr->err_sbe);
+ if (mtcr & BIST_CR_STAT) {
+ printf("BIST test failed on controller %d.\n",
+ c->num);
+ }
+ if (err_detect || (err_sbe & 0xffff)) {
+ printf("ECC error detected on controller %d.\n",
+ c->num);
+ }
+
+ if (cs0_config & CTLR_INTLV_MASK) {
+ /* restore bnds registers */
+ ddr_out32(&ddr->cs0_bnds, cs0_bnds);
+ ddr_out32(&ddr->cs1_bnds, cs1_bnds);
+ ddr_out32(&ddr->cs2_bnds, cs2_bnds);
+ ddr_out32(&ddr->cs3_bnds, cs3_bnds);
+ }
+ }
+}
diff --git a/drivers/ddr/fsl/lc_common_dimm_params.c b/drivers/ddr/fsl/lc_common_dimm_params.c
new file mode 100644
index 0000000000..2de4cca9cc
--- /dev/null
+++ b/drivers/ddr/fsl/lc_common_dimm_params.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2008-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP Semiconductor
+ */
+
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <linux/log2.h>
+#include "fsl_ddr.h"
+
+static unsigned int
+compute_cas_latency_ddr34(memctl_options_t *popts,
+ struct fsl_ddr_controller *c,
+ const struct dimm_params *dimm_params,
+ struct common_timing_params *outpdimm,
+ unsigned int number_of_dimms)
+{
+ unsigned int i;
+ unsigned int common_caslat;
+ unsigned int caslat_actual;
+ unsigned int retry = 16;
+ unsigned int tmp = ~0;
+ const unsigned int mclk_ps = get_memory_clk_period_ps(c);
+ unsigned int taamax;
+
+ if (is_ddr3(popts))
+ taamax = 20000;
+ else
+ taamax = 18000;
+
+ /* compute the common CAS latency supported between slots */
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks)
+ tmp &= dimm_params[i].caslat_x;
+ }
+ common_caslat = tmp;
+
+ /* validate if the memory clk is in the range of dimms */
+ if (mclk_ps < outpdimm->tckmin_x_ps) {
+ printf("DDR clock (MCLK cycle %u ps) is faster than "
+ "the slowest DIMM(s) (tCKmin %u ps) can support.\n",
+ mclk_ps, outpdimm->tckmin_x_ps);
+ }
+
+ if (is_ddr4(popts) && mclk_ps > outpdimm->tckmax_ps) {
+ printf("DDR clock (MCLK cycle %u ps) is slower than DIMM(s) (tCKmax %u ps) can support.\n",
+ mclk_ps, outpdimm->tckmax_ps);
+ }
+
+ /* determine the acutal cas latency */
+ caslat_actual = (outpdimm->taamin_ps + mclk_ps - 1) / mclk_ps;
+ /* check if the dimms support the CAS latency */
+ while (!(common_caslat & (1 << caslat_actual)) && retry > 0) {
+ caslat_actual++;
+ retry--;
+ }
+ /* once the caculation of caslat_actual is completed
+ * we must verify that this CAS latency value does not
+ * exceed tAAmax, which is 20 ns for all DDR3 speed grades,
+ * 18ns for all DDR4 speed grades.
+ */
+ if (caslat_actual * mclk_ps > taamax) {
+ printf("The chosen cas latency %d is too large\n",
+ caslat_actual);
+ }
+ outpdimm->lowest_common_spd_caslat = caslat_actual;
+ debug("lowest_common_spd_caslat is 0x%x\n", caslat_actual);
+
+ return 0;
+}
+
+static unsigned int
+compute_cas_latency_ddr12(memctl_options_t *popts,
+ struct fsl_ddr_controller *c,
+ const struct dimm_params *dimm_params,
+ struct common_timing_params *outpdimm,
+ unsigned int number_of_dimms)
+{
+ int i;
+ const unsigned int mclk_ps = get_memory_clk_period_ps(c);
+ unsigned int lowest_good_caslat;
+ unsigned int not_ok;
+ unsigned int temp1, temp2;
+
+ debug("using mclk_ps = %u\n", mclk_ps);
+ if (mclk_ps > outpdimm->tckmax_ps) {
+ printf("Warning: DDR clock (%u ps) is slower than DIMM(s) (tCKmax %u ps)\n",
+ mclk_ps, outpdimm->tckmax_ps);
+ }
+
+ /*
+ * Compute a CAS latency suitable for all DIMMs
+ *
+ * Strategy for SPD-defined latencies: compute only
+ * CAS latency defined by all DIMMs.
+ */
+
+ /*
+ * Step 1: find CAS latency common to all DIMMs using bitwise
+ * operation.
+ */
+ temp1 = 0xFF;
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks) {
+ temp2 = 0;
+ temp2 |= 1 << dimm_params[i].caslat_x;
+ temp2 |= 1 << dimm_params[i].caslat_x_minus_1;
+ temp2 |= 1 << dimm_params[i].caslat_x_minus_2;
+ /*
+ * If there was no entry for X-2 (X-1) in
+ * the SPD, then caslat_x_minus_2
+ * (caslat_x_minus_1) contains either 255 or
+ * 0xFFFFFFFF because that's what the glorious
+ * __ilog2 function returns for an input of 0.
+ * On 32-bit PowerPC, left shift counts with bit
+ * 26 set (that the value of 255 or 0xFFFFFFFF
+ * will have), cause the destination register to
+ * be 0. That is why this works.
+ */
+ temp1 &= temp2;
+ }
+ }
+
+ /*
+ * Step 2: check each common CAS latency against tCK of each
+ * DIMM's SPD.
+ */
+ lowest_good_caslat = 0;
+ temp2 = 0;
+ while (temp1) {
+ not_ok = 0;
+ temp2 = ilog2(temp1);
+ debug("checking common caslat = %u\n", temp2);
+
+ /* Check if this CAS latency will work on all DIMMs at tCK. */
+ for (i = 0; i < number_of_dimms; i++) {
+ if (!dimm_params[i].n_ranks)
+ continue;
+
+ if (dimm_params[i].caslat_x == temp2) {
+ if (mclk_ps >= dimm_params[i].tckmin_x_ps) {
+ debug("CL = %u ok on DIMM %u at tCK=%u ps with tCKmin_X_ps of %u\n",
+ temp2, i, mclk_ps,
+ dimm_params[i].tckmin_x_ps);
+ continue;
+ } else {
+ not_ok++;
+ }
+ }
+
+ if (dimm_params[i].caslat_x_minus_1 == temp2) {
+ unsigned int tckmin_x_minus_1_ps
+ = dimm_params[i].tckmin_x_minus_1_ps;
+ if (mclk_ps >= tckmin_x_minus_1_ps) {
+ debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_1_ps of %u\n",
+ temp2, i, mclk_ps,
+ tckmin_x_minus_1_ps);
+ continue;
+ } else {
+ not_ok++;
+ }
+ }
+
+ if (dimm_params[i].caslat_x_minus_2 == temp2) {
+ unsigned int tckmin_x_minus_2_ps
+ = dimm_params[i].tckmin_x_minus_2_ps;
+ if (mclk_ps >= tckmin_x_minus_2_ps) {
+ debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_2_ps of %u\n",
+ temp2, i, mclk_ps,
+ tckmin_x_minus_2_ps);
+ continue;
+ } else {
+ not_ok++;
+ }
+ }
+ }
+
+ if (!not_ok)
+ lowest_good_caslat = temp2;
+
+ temp1 &= ~(1 << temp2);
+ }
+
+ debug("lowest common SPD-defined CAS latency = %u\n",
+ lowest_good_caslat);
+ outpdimm->lowest_common_spd_caslat = lowest_good_caslat;
+
+
+ /*
+ * Compute a common 'de-rated' CAS latency.
+ *
+ * The strategy here is to find the *highest* dereated cas latency
+ * with the assumption that all of the DIMMs will support a dereated
+ * CAS latency higher than or equal to their lowest dereated value.
+ */
+ temp1 = 0;
+ for (i = 0; i < number_of_dimms; i++)
+ temp1 = max(temp1, dimm_params[i].caslat_lowest_derated);
+
+ outpdimm->highest_common_derated_caslat = temp1;
+ debug("highest common dereated CAS latency = %u\n", temp1);
+
+ return 0;
+}
+
+/*
+ * compute_lowest_common_dimm_parameters()
+ *
+ * Determine the worst-case DIMM timing parameters from the set of DIMMs
+ * whose parameters have been computed into the array pointed to
+ * by dimm_params.
+ */
+unsigned int
+compute_lowest_common_dimm_parameters(struct fsl_ddr_controller *c)
+{
+ int number_of_dimms = c->dimm_slots_per_ctrl;
+ memctl_options_t *popts = &c->memctl_opts;
+ const struct dimm_params *dimm_params = c->dimm_params;
+ struct common_timing_params *outpdimm = &c->common_timing_params;
+ unsigned int i, j;
+
+ unsigned int tckmin_x_ps = 0;
+ unsigned int tckmax_ps = 0xFFFFFFFF;
+ unsigned int trcd_ps = 0;
+ unsigned int trp_ps = 0;
+ unsigned int tras_ps = 0;
+ unsigned int taamin_ps = 0;
+ unsigned int twr_ps = 0;
+ unsigned int trfc1_ps = 0;
+ unsigned int trfc2_ps = 0;
+ unsigned int trfc4_ps = 0;
+ unsigned int trrds_ps = 0;
+ unsigned int trrdl_ps = 0;
+ unsigned int tccdl_ps = 0;
+ unsigned int trfc_slr_ps = 0;
+ unsigned int twtr_ps = 0;
+ unsigned int trfc_ps = 0;
+ unsigned int trrd_ps = 0;
+ unsigned int trtp_ps = 0;
+ unsigned int trc_ps = 0;
+ unsigned int refresh_rate_ps = 0;
+ unsigned int extended_op_srt = 1;
+ unsigned int tis_ps = 0;
+ unsigned int tih_ps = 0;
+ unsigned int tds_ps = 0;
+ unsigned int tdh_ps = 0;
+ unsigned int tdqsq_max_ps = 0;
+ unsigned int tqhs_ps = 0;
+ unsigned int temp1, temp2;
+ unsigned int additive_latency = 0;
+
+ temp1 = 0;
+ for (i = 0; i < number_of_dimms; i++) {
+ /*
+ * If there are no ranks on this DIMM,
+ * it probably doesn't exist, so skip it.
+ */
+ if (dimm_params[i].n_ranks == 0) {
+ temp1++;
+ continue;
+ }
+ if (dimm_params[i].n_ranks == 4 && i != 0) {
+ printf("Found Quad-rank DIMM in wrong bank, ignored."
+ " Software may not run as expected.\n");
+ temp1++;
+ continue;
+ }
+
+ /*
+ * check if quad-rank DIMM is plugged if
+ * CONFIG_CHIP_SELECT_QUAD_CAPABLE is not defined
+ * Only the board with proper design is capable
+ */
+ if (dimm_params[i].n_ranks == 4 && \
+ c->chip_selects_per_ctrl / c->dimm_slots_per_ctrl < 4) {
+ printf("Found Quad-rank DIMM, not able to support.");
+ temp1++;
+ continue;
+ }
+
+ /*
+ * Find minimum tckmax_ps to find fastest slow speed,
+ * i.e., this is the slowest the whole system can go.
+ */
+ outpdimm->tckmax_ps = min(tckmax_ps,
+ (unsigned int)dimm_params[i].tckmax_ps);
+ if (is_ddr3_4(popts))
+ outpdimm->taamin_ps = max(taamin_ps,
+ (unsigned int)dimm_params[i].taa_ps);
+ outpdimm->tckmin_x_ps = max(tckmin_x_ps,
+ (unsigned int)dimm_params[i].tckmin_x_ps);
+ outpdimm->trcd_ps = max(trcd_ps, (unsigned int)dimm_params[i].trcd_ps);
+ outpdimm->trp_ps = max(trp_ps, (unsigned int)dimm_params[i].trp_ps);
+ outpdimm->tras_ps = max(tras_ps, (unsigned int)dimm_params[i].tras_ps);
+
+ if (is_ddr4(popts)) {
+ outpdimm->twr_ps = 15000;
+ outpdimm->trfc1_ps = max(trfc1_ps,
+ (unsigned int)dimm_params[i].trfc1_ps);
+ outpdimm->trfc2_ps = max(trfc2_ps,
+ (unsigned int)dimm_params[i].trfc2_ps);
+ outpdimm->trfc4_ps = max(trfc4_ps,
+ (unsigned int)dimm_params[i].trfc4_ps);
+ outpdimm->trrds_ps = max(trrds_ps,
+ (unsigned int)dimm_params[i].trrds_ps);
+ outpdimm->trrdl_ps = max(trrdl_ps,
+ (unsigned int)dimm_params[i].trrdl_ps);
+ outpdimm->tccdl_ps = max(tccdl_ps,
+ (unsigned int)dimm_params[i].tccdl_ps);
+ outpdimm->trfc_slr_ps = max(trfc_slr_ps,
+ (unsigned int)dimm_params[i].trfc_slr_ps);
+ } else {
+ twr_ps = max(twr_ps, (unsigned int)dimm_params[i].twr_ps);
+ outpdimm->twtr_ps = max(twtr_ps, (unsigned int)dimm_params[i].twtr_ps);
+ outpdimm->trfc_ps = max(trfc_ps, (unsigned int)dimm_params[i].trfc_ps);
+ outpdimm->trrd_ps = max(trrd_ps, (unsigned int)dimm_params[i].trrd_ps);
+ outpdimm->trtp_ps = max(trtp_ps, (unsigned int)dimm_params[i].trtp_ps);
+ }
+ outpdimm->trc_ps = max(trc_ps, (unsigned int)dimm_params[i].trc_ps);
+ if (is_ddr1(popts) || is_ddr2(popts)) {
+ outpdimm->tis_ps = max(tis_ps, (unsigned int)dimm_params[i].tis_ps);
+ outpdimm->tih_ps = max(tih_ps, (unsigned int)dimm_params[i].tih_ps);
+ outpdimm->tds_ps = max(tds_ps, (unsigned int)dimm_params[i].tds_ps);
+ outpdimm->tdh_ps = max(tdh_ps, (unsigned int)dimm_params[i].tdh_ps);
+ outpdimm->tqhs_ps = max(tqhs_ps, (unsigned int)dimm_params[i].tqhs_ps);
+ /*
+ * Find maximum tdqsq_max_ps to find slowest.
+ *
+ * FIXME: is finding the slowest value the correct
+ * strategy for this parameter?
+ */
+ outpdimm->tdqsq_max_ps = max(tdqsq_max_ps,
+ (unsigned int)dimm_params[i].tdqsq_max_ps);
+ }
+ outpdimm->refresh_rate_ps = max(refresh_rate_ps,
+ (unsigned int)dimm_params[i].refresh_rate_ps);
+ /* extended_op_srt is either 0 or 1, 0 having priority */
+ outpdimm->extended_op_srt = min(extended_op_srt,
+ (unsigned int)dimm_params[i].extended_op_srt);
+ }
+
+ outpdimm->ndimms_present = number_of_dimms - temp1;
+
+ if (temp1 == number_of_dimms) {
+ debug("no dimms this memory controller\n");
+ return 0;
+ }
+
+ /* Determine common burst length for all DIMMs. */
+ temp1 = 0xff;
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks) {
+ temp1 &= dimm_params[i].burst_lengths_bitmask;
+ }
+ }
+ outpdimm->all_dimms_burst_lengths_bitmask = temp1;
+
+ /* Determine if all DIMMs registered buffered. */
+ temp1 = temp2 = 0;
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks) {
+ if (dimm_params[i].registered_dimm) {
+ temp1 = 1;
+ printf("Detected RDIMM %s\n",
+ dimm_params[i].mpart);
+ } else {
+ temp2 = 1;
+ printf("Detected UDIMM %s\n",
+ dimm_params[i].mpart);
+ }
+ }
+ }
+
+ outpdimm->all_dimms_registered = 0;
+ outpdimm->all_dimms_unbuffered = 0;
+ if (temp1 && !temp2) {
+ outpdimm->all_dimms_registered = 1;
+ } else if (!temp1 && temp2) {
+ outpdimm->all_dimms_unbuffered = 1;
+ } else {
+ printf("ERROR: Mix of registered buffered and unbuffered "
+ "DIMMs detected!\n");
+ }
+
+ temp1 = 0;
+ if (outpdimm->all_dimms_registered)
+ for (j = 0; j < 16; j++) {
+ outpdimm->rcw[j] = dimm_params[0].rcw[j];
+ for (i = 1; i < number_of_dimms; i++) {
+ if (!dimm_params[i].n_ranks)
+ continue;
+ if (dimm_params[i].rcw[j] != dimm_params[0].rcw[j]) {
+ temp1 = 1;
+ break;
+ }
+ }
+ }
+
+ if (temp1 != 0)
+ printf("ERROR: Mix different RDIMM detected!\n");
+
+ /* calculate cas latency for all DDR types */
+ if (is_ddr3_4(popts)) {
+ if (compute_cas_latency_ddr34(popts, c, dimm_params,
+ outpdimm, number_of_dimms))
+ return 1;
+ } else {
+ if (compute_cas_latency_ddr12(popts, c, dimm_params,
+ outpdimm, number_of_dimms))
+ return 1;
+ }
+
+ /* Determine if all DIMMs ECC capable. */
+ temp1 = 1;
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks &&
+ !(dimm_params[i].edc_config & EDC_ECC)) {
+ temp1 = 0;
+ break;
+ }
+ }
+ if (temp1) {
+ debug("all DIMMs ECC capable\n");
+ } else {
+ debug("Warning: not all DIMMs ECC capable, cant enable ECC\n");
+ }
+ outpdimm->all_dimms_ecc_capable = temp1;
+
+ /*
+ * Compute additive latency.
+ *
+ * For DDR1, additive latency should be 0.
+ *
+ * For DDR2, with ODT enabled, use "a value" less than ACTTORW,
+ * which comes from Trcd, and also note that:
+ * add_lat + caslat must be >= 4
+ *
+ * For DDR3, we use the AL=0
+ *
+ * When to use additive latency for DDR2:
+ *
+ * I. Because you are using CL=3 and need to do ODT on writes and
+ * want functionality.
+ * 1. Are you going to use ODT? (Does your board not have
+ * additional termination circuitry for DQ, DQS, DQS_,
+ * DM, RDQS, RDQS_ for x4/x8 configs?)
+ * 2. If so, is your lowest supported CL going to be 3?
+ * 3. If so, then you must set AL=1 because
+ *
+ * WL >= 3 for ODT on writes
+ * RL = AL + CL
+ * WL = RL - 1
+ * ->
+ * WL = AL + CL - 1
+ * AL + CL - 1 >= 3
+ * AL + CL >= 4
+ * QED
+ *
+ * RL >= 3 for ODT on reads
+ * RL = AL + CL
+ *
+ * Since CL aren't usually less than 2, AL=0 is a minimum,
+ * so the WL-derived AL should be the -- FIXME?
+ *
+ * II. Because you are using auto-precharge globally and want to
+ * use additive latency (posted CAS) to get more bandwidth.
+ * 1. Are you going to use auto-precharge mode globally?
+ *
+ * Use addtivie latency and compute AL to be 1 cycle less than
+ * tRCD, i.e. the READ or WRITE command is in the cycle
+ * immediately following the ACTIVATE command..
+ *
+ * III. Because you feel like it or want to do some sort of
+ * degraded-performance experiment.
+ * 1. Do you just want to use additive latency because you feel
+ * like it?
+ *
+ * Validation: AL is less than tRCD, and within the other
+ * read-to-precharge constraints.
+ */
+
+ additive_latency = 0;
+
+ if (is_ddr2(popts) && outpdimm->lowest_common_spd_caslat < 4 &&
+ picos_to_mclk(c, trcd_ps > outpdimm->lowest_common_spd_caslat)) {
+ additive_latency = picos_to_mclk(c, trcd_ps) -
+ outpdimm->lowest_common_spd_caslat;
+ if (mclk_to_picos(c, additive_latency) > trcd_ps) {
+ additive_latency = picos_to_mclk(c, trcd_ps);
+ debug("setting additive_latency to %u because it was "
+ " greater than tRCD_ps\n", additive_latency);
+ }
+ }
+
+ /*
+ * Validate additive latency
+ *
+ * AL <= tRCD(min)
+ */
+ if (mclk_to_picos(c, additive_latency) > trcd_ps) {
+ printf("Error: invalid additive latency exceeds tRCD(min).\n");
+ return 1;
+ }
+
+ /*
+ * RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled
+ * WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled
+ * ADD_LAT (the register) must be set to a value less
+ * than ACTTORW if WL = 1, then AL must be set to 1
+ * RD_TO_PRE (the register) must be set to a minimum
+ * tRTP + AL if AL is nonzero
+ */
+
+ /*
+ * Additive latency will be applied only if the memctl option to
+ * use it.
+ */
+ outpdimm->additive_latency = additive_latency;
+
+ debug("tCKmin_ps = %u\n", outpdimm->tckmin_x_ps);
+ debug("trcd_ps = %u\n", outpdimm->trcd_ps);
+ debug("trp_ps = %u\n", outpdimm->trp_ps);
+ debug("tras_ps = %u\n", outpdimm->tras_ps);
+ if (is_ddr4(popts)) {
+ debug("trfc1_ps = %u\n", trfc1_ps);
+ debug("trfc2_ps = %u\n", trfc2_ps);
+ debug("trfc4_ps = %u\n", trfc4_ps);
+ debug("trrds_ps = %u\n", trrds_ps);
+ debug("trrdl_ps = %u\n", trrdl_ps);
+ debug("tccdl_ps = %u\n", tccdl_ps);
+ debug("trfc_slr_ps = %u\n", trfc_slr_ps);
+ } else {
+ debug("twtr_ps = %u\n", outpdimm->twtr_ps);
+ debug("trfc_ps = %u\n", outpdimm->trfc_ps);
+ debug("trrd_ps = %u\n", outpdimm->trrd_ps);
+ }
+ debug("twr_ps = %u\n", outpdimm->twr_ps);
+ debug("trc_ps = %u\n", outpdimm->trc_ps);
+
+ return 0;
+}
diff --git a/drivers/ddr/fsl/main.c b/drivers/ddr/fsl/main.c
new file mode 100644
index 0000000000..b0df34c933
--- /dev/null
+++ b/drivers/ddr/fsl/main.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2008-2014 Freescale Semiconductor, Inc.
+ */
+
+/*
+ * Generic driver for Freescale DDR/DDR2/DDR3 memory controller.
+ * Based on code from spd_sdram.c
+ * Author: James Yang [at freescale.com]
+ */
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <linux/log2.h>
+#include "fsl_ddr.h"
+
+/*
+ * ASSUMPTIONS:
+ * - Same number of CONFIG_DIMM_SLOTS_PER_CTLR on each controller
+ * - Same memory data bus width on all controllers
+ *
+ * NOTES:
+ *
+ * The memory controller and associated documentation use confusing
+ * terminology when referring to the orgranization of DRAM.
+ *
+ * Here is a terminology translation table:
+ *
+ * memory controller/documention |industry |this code |signals
+ * -------------------------------|-----------|-----------|-----------------
+ * physical bank/bank |rank |rank |chip select (CS)
+ * logical bank/sub-bank |bank |bank |bank address (BA)
+ * page/row |row |page |row address
+ * ??? |column |column |column address
+ *
+ * The naming confusion is further exacerbated by the descriptions of the
+ * memory controller interleaving feature, where accesses are interleaved
+ * _BETWEEN_ two seperate memory controllers. This is configured only in
+ * CS0_CONFIG[INTLV_CTL] of each memory controller.
+ *
+ * memory controller documentation | number of chip selects
+ * | per memory controller supported
+ * --------------------------------|-----------------------------------------
+ * cache line interleaving | 1 (CS0 only)
+ * page interleaving | 1 (CS0 only)
+ * bank interleaving | 1 (CS0 only)
+ * superbank interleraving | depends on bank (chip select)
+ * | interleraving [rank interleaving]
+ * | mode used on every memory controller
+ *
+ * Even further confusing is the existence of the interleaving feature
+ * _WITHIN_ each memory controller. The feature is referred to in
+ * documentation as chip select interleaving or bank interleaving,
+ * although it is configured in the DDR_SDRAM_CFG field.
+ *
+ * Name of field | documentation name | this code
+ * -----------------------------|-----------------------|------------------
+ * DDR_SDRAM_CFG[BA_INTLV_CTL] | Bank (chip select) | rank interleaving
+ * | interleaving
+ */
+
+static unsigned long long step_assign_addresses_linear(struct fsl_ddr_info *pinfo,
+ unsigned long long current_mem_base)
+{
+ int i, j;
+ unsigned long long total_mem = 0;
+
+ /*
+ * Simple linear assignment if memory
+ * controllers are not interleaved.
+ */
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+ unsigned long long total_ctlr_mem = 0;
+
+ c->common_timing_params.base_address = current_mem_base;
+
+ for (j = 0; j < c->dimm_slots_per_ctrl; j++) {
+ /* Compute DIMM base addresses. */
+ unsigned long long cap = c->dimm_params[j].capacity >>
+ pinfo->c[i].dbw_capacity_adjust;
+
+ c->dimm_params[j].base_address = current_mem_base;
+ debug("ctrl %d dimm %d base 0x%llx\n", i, j, current_mem_base);
+ current_mem_base += cap;
+ total_ctlr_mem += cap;
+ }
+ debug("ctrl %d total 0x%llx\n", i, total_ctlr_mem);
+ c->common_timing_params.total_mem = total_ctlr_mem;
+ total_mem += total_ctlr_mem;
+ }
+
+ return total_mem;
+}
+
+static unsigned long long step_assign_addresses_interleaved(struct fsl_ddr_info *pinfo,
+ unsigned long long current_mem_base)
+{
+ unsigned long long total_mem, total_ctlr_mem;
+ unsigned long long rank_density, ctlr_density = 0;
+ int i;
+
+ rank_density = pinfo->c[0].dimm_params[0].rank_density >>
+ pinfo->c[0].dbw_capacity_adjust;
+
+ switch (pinfo->c[0].memctl_opts.ba_intlv_ctl &
+ FSL_DDR_CS0_CS1_CS2_CS3) {
+ case FSL_DDR_CS0_CS1_CS2_CS3:
+ ctlr_density = 4 * rank_density;
+ break;
+ case FSL_DDR_CS0_CS1:
+ case FSL_DDR_CS0_CS1_AND_CS2_CS3:
+ ctlr_density = 2 * rank_density;
+ break;
+ case FSL_DDR_CS2_CS3:
+ default:
+ ctlr_density = rank_density;
+ break;
+ }
+
+ debug("rank density is 0x%llx, ctlr density is 0x%llx\n",
+ rank_density, ctlr_density);
+
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ if (c->memctl_opts.memctl_interleaving) {
+ switch (c->memctl_opts.memctl_interleaving_mode) {
+ case FSL_DDR_256B_INTERLEAVING:
+ case FSL_DDR_CACHE_LINE_INTERLEAVING:
+ case FSL_DDR_PAGE_INTERLEAVING:
+ case FSL_DDR_BANK_INTERLEAVING:
+ case FSL_DDR_SUPERBANK_INTERLEAVING:
+ total_ctlr_mem = 2 * ctlr_density;
+ break;
+ case FSL_DDR_3WAY_1KB_INTERLEAVING:
+ case FSL_DDR_3WAY_4KB_INTERLEAVING:
+ case FSL_DDR_3WAY_8KB_INTERLEAVING:
+ total_ctlr_mem = 3 * ctlr_density;
+ break;
+ case FSL_DDR_4WAY_1KB_INTERLEAVING:
+ case FSL_DDR_4WAY_4KB_INTERLEAVING:
+ case FSL_DDR_4WAY_8KB_INTERLEAVING:
+ total_ctlr_mem = 4 * ctlr_density;
+ break;
+ default:
+ panic("Unknown interleaving mode");
+ }
+ c->common_timing_params.base_address = current_mem_base;
+ c->common_timing_params.total_mem = total_ctlr_mem;
+ total_mem = current_mem_base + total_ctlr_mem;
+ debug("ctrl %d base 0x%llx\n", i, current_mem_base);
+ debug("ctrl %d total 0x%llx\n", i, total_ctlr_mem);
+ } else {
+ total_mem += step_assign_addresses_linear(pinfo, current_mem_base);
+ }
+ }
+
+ return total_mem;
+}
+
+static unsigned long long step_assign_addresses(struct fsl_ddr_info *pinfo)
+{
+ unsigned int i, j;
+ unsigned long long total_mem;
+
+ /*
+ * If a reduced data width is requested, but the SPD
+ * specifies a physically wider device, adjust the
+ * computed dimm capacities accordingly before
+ * assigning addresses.
+ */
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+ unsigned int found = 0;
+
+ switch (c->memctl_opts.data_bus_width) {
+ case 2:
+ /* 16-bit */
+ for (j = 0; j < c->dimm_slots_per_ctrl; j++) {
+ unsigned int dw;
+ if (!c->dimm_params[j].n_ranks)
+ continue;
+ dw = c->dimm_params[j].primary_sdram_width;
+ if ((dw == 72 || dw == 64)) {
+ pinfo->c[i].dbw_capacity_adjust = 2;
+ break;
+ } else if ((dw == 40 || dw == 32)) {
+ pinfo->c[i].dbw_capacity_adjust = 1;
+ break;
+ }
+ }
+ break;
+
+ case 1:
+ /* 32-bit */
+ for (j = 0; j < c->dimm_slots_per_ctrl; j++) {
+ unsigned int dw;
+ dw = c->dimm_params[j].data_width;
+ if (c->dimm_params[j].n_ranks
+ && (dw == 72 || dw == 64)) {
+ /*
+ * FIXME: can't really do it
+ * like this because this just
+ * further reduces the memory
+ */
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ pinfo->c[i].dbw_capacity_adjust = 1;
+
+ break;
+
+ case 0:
+ /* 64-bit */
+ break;
+
+ default:
+ printf("unexpected data bus width "
+ "specified controller %u\n", i);
+ return 1;
+ }
+ debug("dbw_cap_adj[%d]=%d\n", i, pinfo->c[i].dbw_capacity_adjust);
+ }
+
+ if (pinfo->c[0].memctl_opts.memctl_interleaving)
+ total_mem = step_assign_addresses_interleaved(pinfo, pinfo->mem_base);
+ else
+ total_mem = step_assign_addresses_linear(pinfo, pinfo->mem_base);
+
+ debug("Total mem by %s is 0x%llx\n", __func__, total_mem);
+
+ return total_mem;
+}
+
+static int compute_dimm_parameters(struct fsl_ddr_controller *c,
+ struct spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ const memctl_options_t *popts = &c->memctl_opts;
+ int ret = -EINVAL;
+
+ memset(pdimm, 0, sizeof(*pdimm));
+
+ if (is_ddr1(popts))
+ ret = ddr1_compute_dimm_parameters(c, (void *)spd, pdimm);
+ else if (is_ddr2(popts))
+ ret = ddr2_compute_dimm_parameters(c, (void *)spd, pdimm);
+ else if (is_ddr3(popts))
+ ret = ddr3_compute_dimm_parameters(c, (void *)spd, pdimm);
+ else if (is_ddr4(popts))
+ ret = ddr4_compute_dimm_parameters(c, (void *)spd, pdimm);
+
+ return ret;
+}
+
+static unsigned long long fsl_ddr_compute(struct fsl_ddr_info *pinfo)
+{
+ unsigned int i, j;
+ unsigned long long total_mem = 0;
+ int assert_reset = 0;
+ int retval;
+ unsigned int max_end = 0;
+
+ /* STEP 2: Compute DIMM parameters from SPD data */
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ if (!c->spd_installed_dimms)
+ continue;
+
+ for (j = 0; j < c->dimm_slots_per_ctrl; j++) {
+ struct spd_eeprom *spd = &c->spd_installed_dimms[j];
+ struct dimm_params *pdimm = &c->dimm_params[j];
+
+ retval = compute_dimm_parameters(c, spd, pdimm);
+ if (retval == 2) {
+ printf("Error: compute_dimm_parameters"
+ " non-zero returned FATAL value "
+ "for memctl=%u dimm=%u\n", i, j);
+ return 0;
+ }
+ if (retval) {
+ debug("Warning: compute_dimm_parameters"
+ " non-zero return value for memctl=%u "
+ "dimm=%u\n", i, j);
+ }
+ }
+ }
+
+ /*
+ * STEP 3: Compute a common set of timing parameters
+ * suitable for all of the DIMMs on each memory controller
+ */
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ debug("Computing lowest common DIMM parameters for memctl=%u\n",
+ i);
+ compute_lowest_common_dimm_parameters(c);
+ }
+
+ /* STEP 4: Gather configuration requirements from user */
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ debug("Reloading memory controller "
+ "configuration options for memctl=%u\n", i);
+ /*
+ * This "reloads" the memory controller options
+ * to defaults. If the user "edits" an option,
+ * next_step points to the step after this,
+ * which is currently STEP_ASSIGN_ADDRESSES.
+ */
+ populate_memctl_options(c);
+ /*
+ * For RDIMMs, JEDEC spec requires clocks to be stable
+ * before reset signal is deasserted. For the boards
+ * using fixed parameters, this function should be
+ * be called from board init file.
+ */
+ if (c->common_timing_params.all_dimms_registered)
+ assert_reset = 1;
+ }
+
+ /* STEP 5: Assign addresses to chip selects */
+ check_interleaving_options(pinfo);
+ total_mem = step_assign_addresses(pinfo);
+ debug("Total mem %llu assigned\n", total_mem);
+
+ /* STEP 6: compute controller register values */
+ debug("FSL Memory ctrl register computation\n");
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ if (c->common_timing_params.ndimms_present == 0) {
+ memset(&c->fsl_ddr_config_reg, 0,
+ sizeof(fsl_ddr_cfg_regs_t));
+ continue;
+ }
+
+ compute_fsl_memctl_config_regs(c);
+ }
+
+ /*
+ * Compute the amount of memory available just by
+ * looking for the highest valid CSn_BNDS value.
+ * This allows us to also experiment with using
+ * only CS0 when using dual-rank DIMMs.
+ */
+
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ for (j = 0; j < c->chip_selects_per_ctrl; j++) {
+ fsl_ddr_cfg_regs_t *reg = &c->fsl_ddr_config_reg;
+ if (reg->cs[j].config & 0x80000000) {
+ unsigned int end;
+ /*
+ * 0xfffffff is a special value we put
+ * for unused bnds
+ */
+ if (reg->cs[j].bnds == 0xffffffff)
+ continue;
+ end = reg->cs[j].bnds & 0xffff;
+ if (end > max_end) {
+ max_end = end;
+ }
+ }
+ }
+ }
+
+ total_mem = 1 + (((unsigned long long)max_end << 24ULL) |
+ 0xFFFFFFULL) - pinfo->mem_base;
+
+ return total_mem;
+}
+
+phys_size_t fsl_ddr_sdram(struct fsl_ddr_info *pinfo)
+{
+ unsigned int i;
+ unsigned long long total_memory;
+ int deassert_reset = 0;
+
+ total_memory = fsl_ddr_compute(pinfo);
+
+ /* setup 3-way interleaving before enabling DDRC */
+ switch (pinfo->c[0].memctl_opts.memctl_interleaving_mode) {
+ case FSL_DDR_3WAY_1KB_INTERLEAVING:
+ case FSL_DDR_3WAY_4KB_INTERLEAVING:
+ case FSL_DDR_3WAY_8KB_INTERLEAVING:
+ fsl_ddr_set_intl3r(
+ pinfo->c[0].memctl_opts.
+ memctl_interleaving_mode);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Program configuration registers.
+ * JEDEC specs requires clocks to be stable before deasserting reset
+ * for RDIMMs. Clocks start after chip select is enabled and clock
+ * control register is set. During step 1, all controllers have their
+ * registers set but not enabled. Step 2 proceeds after deasserting
+ * reset through board FPGA or GPIO.
+ * For non-registered DIMMs, initialization can go through but it is
+ * also OK to follow the same flow.
+ */
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ if (c->common_timing_params.all_dimms_registered)
+ deassert_reset = 1;
+ }
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ debug("Programming controller %u\n", i);
+ if (c->common_timing_params.ndimms_present == 0) {
+ debug("No dimms present on controller %u; "
+ "skipping programming\n", i);
+ continue;
+ }
+ /*
+ * The following call with step = 1 returns before enabling
+ * the controller. It has to finish with step = 2 later.
+ */
+ fsl_ddr_set_memctl_regs(c, deassert_reset ? 1 : 0);
+ }
+ if (deassert_reset) {
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ struct fsl_ddr_controller *c = &pinfo->c[i];
+
+ /* Call with step = 2 to continue initialization */
+ fsl_ddr_set_memctl_regs(c, 2);
+ }
+ }
+
+ debug("total_memory by %s = %llu\n", __func__, total_memory);
+
+ return total_memory;
+}
diff --git a/drivers/ddr/fsl/options.c b/drivers/ddr/fsl/options.c
new file mode 100644
index 0000000000..73e9ab044e
--- /dev/null
+++ b/drivers/ddr/fsl/options.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP Semiconductor
+ */
+
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include "fsl_ddr.h"
+
+struct dynamic_odt {
+ unsigned int odt_rd_cfg;
+ unsigned int odt_wr_cfg;
+ unsigned int odt_rtt_norm;
+ unsigned int odt_rtt_wr;
+};
+
+/* Quad rank is not verified yet due availability.
+ * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
+ */
+static const struct dynamic_odt single_Q_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS_AND_OTHER_DIMM,
+ DDR4_RTT_34_OHM, /* unverified */
+ DDR4_RTT_120_OHM
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR4_RTT_OFF,
+ DDR4_RTT_120_OHM
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS_AND_OTHER_DIMM,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_120_OHM
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER, /* tied high */
+ DDR4_RTT_OFF,
+ DDR4_RTT_120_OHM
+ }
+};
+
+static const struct dynamic_odt single_D_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_ALL,
+ DDR4_RTT_40_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR4_RTT_OFF,
+ DDR4_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt single_S_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_ALL,
+ DDR4_RTT_40_OHM,
+ DDR4_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+};
+
+static const struct dynamic_odt dual_DD_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR4_RTT_120_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR4_RTT_120_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_DS_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR4_RTT_120_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_ALL,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_120_OHM
+ },
+ {0, 0, 0, 0}
+};
+static const struct dynamic_odt dual_SD_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_ALL,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_120_OHM
+ },
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR4_RTT_120_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_SS_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_ALL,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_120_OHM
+ },
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_ALL,
+ DDR4_RTT_34_OHM,
+ DDR4_RTT_120_OHM
+ },
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt dual_D0_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR4_RTT_40_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR4_RTT_OFF,
+ DDR4_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt dual_0D_ddr4[4] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR4_RTT_40_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR4_RTT_OFF,
+ DDR4_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_S0_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR4_RTT_40_OHM,
+ DDR4_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+
+};
+
+static const struct dynamic_odt dual_0S_ddr4[4] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR4_RTT_40_OHM,
+ DDR4_RTT_OFF
+ },
+ {0, 0, 0, 0}
+
+};
+
+static const struct dynamic_odt odt_unknown_ddr4[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR4_RTT_120_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR4_RTT_120_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR4_RTT_120_OHM,
+ DDR4_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR4_RTT_120_OHM,
+ DDR4_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt single_Q_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS_AND_OTHER_DIMM,
+ DDR3_RTT_20_OHM,
+ DDR3_RTT_120_OHM
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER, /* tied high */
+ DDR3_RTT_OFF,
+ DDR3_RTT_120_OHM
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS_AND_OTHER_DIMM,
+ DDR3_RTT_20_OHM,
+ DDR3_RTT_120_OHM
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER, /* tied high */
+ DDR3_RTT_OFF,
+ DDR3_RTT_120_OHM
+ }
+};
+
+static const struct dynamic_odt single_D_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_ALL,
+ DDR3_RTT_40_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR3_RTT_OFF,
+ DDR3_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt single_S_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_ALL,
+ DDR3_RTT_40_OHM,
+ DDR3_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+};
+
+static const struct dynamic_odt dual_DD_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR3_RTT_120_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR3_RTT_30_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR3_RTT_120_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR3_RTT_30_OHM,
+ DDR3_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_DS_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR3_RTT_120_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR3_RTT_30_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_ALL,
+ DDR3_RTT_20_OHM,
+ DDR3_RTT_120_OHM
+ },
+ {0, 0, 0, 0}
+};
+static const struct dynamic_odt dual_SD_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_ALL,
+ DDR3_RTT_20_OHM,
+ DDR3_RTT_120_OHM
+ },
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR3_RTT_120_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR3_RTT_20_OHM,
+ DDR3_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_SS_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_ALL,
+ DDR3_RTT_30_OHM,
+ DDR3_RTT_120_OHM
+ },
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_ALL,
+ DDR3_RTT_30_OHM,
+ DDR3_RTT_120_OHM
+ },
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt dual_D0_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR3_RTT_40_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR3_RTT_OFF,
+ DDR3_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt dual_0D_ddr3[4] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_SAME_DIMM,
+ DDR3_RTT_40_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR3_RTT_OFF,
+ DDR3_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_S0_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR3_RTT_40_OHM,
+ DDR3_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+
+};
+
+static const struct dynamic_odt dual_0S_ddr3[4] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR3_RTT_40_OHM,
+ DDR3_RTT_OFF
+ },
+ {0, 0, 0, 0}
+
+};
+
+static const struct dynamic_odt odt_unknown_ddr3[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR3_RTT_120_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR3_RTT_120_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR3_RTT_120_OHM,
+ DDR3_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR3_RTT_120_OHM,
+ DDR3_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt single_Q_ddr12[4] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt single_D_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_ALL,
+ DDR2_RTT_150_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt single_S_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_ALL,
+ DDR2_RTT_150_OHM,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+};
+
+static const struct dynamic_odt dual_DD_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_DS_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt dual_SD_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_SS_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_OTHER_DIMM,
+ FSL_DDR_ODT_OTHER_DIMM,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt dual_D0_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_ALL,
+ DDR2_RTT_150_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+static const struct dynamic_odt dual_0D_ddr12[4] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_ALL,
+ DDR2_RTT_150_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ }
+};
+
+static const struct dynamic_odt dual_S0_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR2_RTT_150_OHM,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+
+};
+
+static const struct dynamic_odt dual_0S_ddr12[4] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR2_RTT_150_OHM,
+ DDR2_RTT_OFF
+ },
+ {0, 0, 0, 0}
+
+};
+
+static const struct dynamic_odt odt_unknown_ddr12[4] = {
+ { /* cs0 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs1 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ },
+ { /* cs2 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_CS,
+ DDR2_RTT_75_OHM,
+ DDR2_RTT_OFF
+ },
+ { /* cs3 */
+ FSL_DDR_ODT_NEVER,
+ FSL_DDR_ODT_NEVER,
+ DDR2_RTT_OFF,
+ DDR2_RTT_OFF
+ }
+};
+
+/*
+ * Automatically seleect bank interleaving mode based on DIMMs
+ * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
+ * This function only deal with one or two slots per controller.
+ */
+static inline unsigned int auto_bank_intlv(struct fsl_ddr_controller *c,
+ struct dimm_params *pdimm)
+{
+ if (c->dimm_slots_per_ctrl == 1) {
+ if (pdimm[0].n_ranks == 4)
+ return FSL_DDR_CS0_CS1_CS2_CS3;
+ else if (pdimm[0].n_ranks == 2)
+ return FSL_DDR_CS0_CS1;
+ }
+
+ if (c->dimm_slots_per_ctrl == 2) {
+ if (pdimm[0].n_ranks == 2) {
+ if (pdimm[1].n_ranks == 2)
+ return FSL_DDR_CS0_CS1_CS2_CS3;
+ else
+ return FSL_DDR_CS0_CS1;
+ }
+ }
+
+ return 0;
+}
+
+unsigned int populate_memctl_options(struct fsl_ddr_controller *c)
+{
+ const struct common_timing_params *common_dimm = &c->common_timing_params;
+ memctl_options_t *popts = &c->memctl_opts;
+ struct dimm_params *pdimm = c->dimm_params;
+ unsigned int i;
+ const struct dynamic_odt *pdodt;
+ const struct dynamic_odt *single_Q, *single_S, *single_D;
+ const struct dynamic_odt *dual_DD, *dual_DS, *dual_0S;
+ const struct dynamic_odt *dual_D0, *dual_SD, *dual_SS, *dual_S0, *dual_0D;
+
+ if (is_ddr1(popts) || is_ddr2(popts)) {
+ pdodt = odt_unknown_ddr12;
+ single_Q = single_Q_ddr12;
+ single_D = single_D_ddr12;
+ single_S = single_S_ddr12;
+ dual_DD = dual_DD_ddr12;
+ dual_DS = dual_DS_ddr12;
+ dual_0S = dual_0S_ddr12;
+ dual_D0 = dual_D0_ddr12;
+ dual_SD = dual_SD_ddr12;
+ dual_SS = dual_SS_ddr12;
+ dual_S0 = dual_S0_ddr12;
+ dual_0D = dual_0D_ddr12;
+ } else if (is_ddr3(popts)) {
+ pdodt = odt_unknown_ddr3;
+ single_Q = single_Q_ddr3;
+ single_D = single_D_ddr3;
+ single_S = single_S_ddr3;
+ dual_DD = dual_DD_ddr3;
+ dual_DS = dual_DS_ddr3;
+ dual_0S = dual_0S_ddr3;
+ dual_D0 = dual_D0_ddr3;
+ dual_SD = dual_SD_ddr3;
+ dual_SS = dual_SS_ddr3;
+ dual_S0 = dual_S0_ddr3;
+ dual_0D = dual_0D_ddr3;
+ } else if (is_ddr4(popts)) {
+ pdodt = odt_unknown_ddr4;
+ single_Q = single_Q_ddr4;
+ single_D = single_D_ddr4;
+ single_S = single_S_ddr4;
+ dual_DD = dual_DD_ddr4;
+ dual_DS = dual_DS_ddr4;
+ dual_0S = dual_0S_ddr4;
+ dual_D0 = dual_D0_ddr4;
+ dual_SD = dual_SD_ddr4;
+ dual_SS = dual_SS_ddr4;
+ dual_S0 = dual_S0_ddr4;
+ dual_0D = dual_0D_ddr4;
+ } else {
+ return -EINVAL;
+ }
+
+ if (!is_ddr1(popts)) {
+ /* Chip select options. */
+ if (c->dimm_slots_per_ctrl == 1) {
+ switch (pdimm[0].n_ranks) {
+ case 1:
+ pdodt = single_S;
+ break;
+ case 2:
+ pdodt = single_D;
+ break;
+ case 4:
+ pdodt = single_Q;
+ break;
+ }
+ } else if (c->dimm_slots_per_ctrl == 2) {
+ switch (pdimm[0].n_ranks) {
+ case 4:
+ pdodt = single_Q;
+ if (pdimm[1].n_ranks)
+ printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
+ break;
+ case 2:
+ switch (pdimm[1].n_ranks) {
+ case 2:
+ pdodt = dual_DD;
+ break;
+ case 1:
+ pdodt = dual_DS;
+ break;
+ case 0:
+ pdodt = dual_D0;
+ break;
+ }
+ break;
+ case 1:
+ switch (pdimm[1].n_ranks) {
+ case 2:
+ pdodt = dual_SD;
+ break;
+ case 1:
+ pdodt = dual_SS;
+ break;
+ case 0:
+ pdodt = dual_S0;
+ break;
+ }
+ break;
+ case 0:
+ switch (pdimm[1].n_ranks) {
+ case 2:
+ pdodt = dual_0D;
+ break;
+ case 1:
+ pdodt = dual_0S;
+ break;
+ }
+ break;
+ }
+ }
+ }
+
+ /* Pick chip-select local options. */
+ for (i = 0; i < c->chip_selects_per_ctrl; i++) {
+ if (is_ddr1(popts)) {
+ popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
+ popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
+ } else {
+ popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
+ popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
+ popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
+ popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
+ }
+ popts->cs_local_opts[i].auto_precharge = 0;
+ }
+
+ /* Pick interleaving mode. */
+
+ /*
+ * 0 = no interleaving
+ * 1 = interleaving between 2 controllers
+ */
+ popts->memctl_interleaving = 0;
+
+ /*
+ * 0 = cacheline
+ * 1 = page
+ * 2 = (logical) bank
+ * 3 = superbank (only if CS interleaving is enabled)
+ */
+ popts->memctl_interleaving_mode = 0;
+
+ /*
+ * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
+ * 1: page: bit to the left of the column bits selects the memctl
+ * 2: bank: bit to the left of the bank bits selects the memctl
+ * 3: superbank: bit to the left of the chip select selects the memctl
+ *
+ * NOTE: ba_intlv (rank interleaving) is independent of memory
+ * controller interleaving; it is only within a memory controller.
+ * Must use superbank interleaving if rank interleaving is used and
+ * memory controller interleaving is enabled.
+ */
+
+ /*
+ * 0 = no
+ * 0x40 = CS0,CS1
+ * 0x20 = CS2,CS3
+ * 0x60 = CS0,CS1 + CS2,CS3
+ * 0x04 = CS0,CS1,CS2,CS3
+ */
+ popts->ba_intlv_ctl = 0;
+
+ /* Memory Organization Parameters */
+ popts->registered_dimm_en = common_dimm->all_dimms_registered;
+
+ /*
+ * Choose DQS config
+ * 0 for DDR1
+ * 1 for DDR2
+ */
+ if (is_ddr2(popts) || is_ddr3(popts))
+ popts->dqs_config = 1;
+
+ /* Choose self-refresh during sleep. */
+ popts->self_refresh_in_sleep = 1;
+
+ /* Choose dynamic power management mode. */
+ popts->dynamic_power = 0;
+
+ popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
+
+ /* Choose ddr controller address mirror mode */
+ if (is_ddr3_4(popts)) {
+ if (pdimm[0].n_ranks != 0) {
+ if (pdimm[0].primary_sdram_width == 64)
+ popts->data_bus_width = 0;
+ else if (pdimm[0].primary_sdram_width == 32)
+ popts->data_bus_width = 1;
+ else if (pdimm[0].primary_sdram_width == 16)
+ popts->data_bus_width = 2;
+ else {
+ panic("Error: primary sdram width %u is invalid!\n",
+ pdimm[0].primary_sdram_width);
+ }
+ }
+
+ if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
+ /* 32-bit or 16-bit bus */
+ popts->otf_burst_chop_en = 0;
+ popts->burst_length = DDR_BL8;
+ } else {
+ popts->otf_burst_chop_en = 1; /* on-the-fly burst chop */
+ popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
+ }
+
+ for (i = 0; i < c->dimm_slots_per_ctrl; i++) {
+ if (pdimm[i].n_ranks) {
+ popts->mirrored_dimm = pdimm[i].mirrored_dimm;
+ break;
+ }
+ }
+ } else {
+ if (pdimm[0].n_ranks != 0) {
+ if ((pdimm[0].data_width >= 64) && \
+ (pdimm[0].data_width <= 72))
+ popts->data_bus_width = 0;
+ else if ((pdimm[0].data_width >= 32) && \
+ (pdimm[0].data_width <= 40))
+ popts->data_bus_width = 1;
+ else {
+ panic("Error: data width %u is invalid!\n",
+ pdimm[0].data_width);
+ }
+ }
+
+ popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
+ }
+
+
+ /* Global Timing Parameters. */
+ debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(c));
+
+ /* Pick a caslat override. */
+ popts->cas_latency_override = 0;
+ popts->cas_latency_override_value = 3;
+ if (popts->cas_latency_override) {
+ debug("using caslat override value = %u\n",
+ popts->cas_latency_override_value);
+ }
+
+ /* Decide whether to use the computed derated latency */
+ popts->use_derated_caslat = 0;
+
+ /* Choose an additive latency. */
+ popts->additive_latency_override = 0;
+ popts->additive_latency_override_value = 3;
+ if (popts->additive_latency_override) {
+ debug("using additive latency override value = %u\n",
+ popts->additive_latency_override_value);
+ }
+
+ /*
+ * 2T_EN setting
+ *
+ * Factors to consider for 2T_EN:
+ * - number of DIMMs installed
+ * - number of components, number of active ranks
+ * - how much time you want to spend playing around
+ */
+ popts->twot_en = 0;
+ popts->threet_en = 0;
+
+ /* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
+ if (popts->registered_dimm_en)
+ popts->ap_en = 1; /* 0 = disable, 1 = enable */
+ else
+ popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
+
+ /*
+ * BSTTOPRE precharge interval
+ *
+ * Set this to 0 for global auto precharge
+ * The value of 0x100 has been used for DDR1, DDR2, DDR3.
+ * It is not wrong. Any value should be OK. The performance depends on
+ * applications. There is no one good value for all. One way to set
+ * is to use 1/4 of refint value.
+ */
+ popts->bstopre = picos_to_mclk(c, common_dimm->refresh_rate_ps)
+ >> 2;
+
+ /*
+ * Window for four activates -- tFAW
+ *
+ * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
+ * FIXME: varies depending upon number of column addresses or data
+ * FIXME: width, was considering looking at pdimm->primary_sdram_width
+ */
+ if (is_ddr1(popts))
+ popts->tfaw_window_four_activates_ps = mclk_to_picos(c, 1);
+ else if (is_ddr2(popts))
+ /*
+ * x4/x8; some datasheets have 35000
+ * x16 wide columns only? Use 50000?
+ */
+ popts->tfaw_window_four_activates_ps = 37500;
+ else
+ popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
+
+ popts->zq_en = 0;
+ popts->wrlvl_en = 0;
+
+ if (is_ddr3_4(popts)) {
+ /*
+ * due to ddr3 dimm is fly-by topology
+ * we suggest to enable write leveling to
+ * meet the tQDSS under different loading.
+ */
+ popts->wrlvl_en = 1;
+ popts->zq_en = 1;
+ popts->wrlvl_override = 0;
+ }
+
+ if (pdimm[0].n_ranks == 4)
+ popts->quad_rank_present = 1;
+
+ popts->package_3ds = pdimm->package_3ds;
+
+ if (!is_ddr4(popts)) {
+ ulong ddr_freq = c->ddr_freq / 1000000;
+ if (popts->registered_dimm_en) {
+ popts->rcw_override = 1;
+ popts->rcw_1 = 0x000a5a00;
+ if (ddr_freq <= 800)
+ popts->rcw_2 = 0x00000000;
+ else if (ddr_freq <= 1066)
+ popts->rcw_2 = 0x00100000;
+ else if (ddr_freq <= 1333)
+ popts->rcw_2 = 0x00200000;
+ else
+ popts->rcw_2 = 0x00300000;
+ }
+ }
+
+ if (c->board_options)
+ c->board_options(popts, pdimm, c);
+
+ return 0;
+}
+
+void check_interleaving_options(struct fsl_ddr_info *pinfo)
+{
+ int i, j, k, check_n_ranks, intlv_invalid = 0;
+ unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
+ unsigned long long check_rank_density;
+ struct dimm_params *dimm;
+
+ /*
+ * Check if all controllers are configured for memory
+ * controller interleaving. Identical dimms are recommended. At least
+ * the size, row and col address should be checked.
+ */
+ j = 0;
+ check_n_ranks = pinfo->c[0].dimm_params[0].n_ranks;
+ check_rank_density = pinfo->c[0].dimm_params[0].rank_density;
+ check_n_row_addr = pinfo->c[0].dimm_params[0].n_row_addr;
+ check_n_col_addr = pinfo->c[0].dimm_params[0].n_col_addr;
+ check_intlv = pinfo->c[0].memctl_opts.memctl_interleaving_mode;
+ for (i = 0; i < pinfo->num_ctrls; i++) {
+ dimm = &pinfo->c[i].dimm_params[0];
+ if (!pinfo->c[i].memctl_opts.memctl_interleaving) {
+ continue;
+ } else if (((check_rank_density != dimm->rank_density) ||
+ (check_n_ranks != dimm->n_ranks) ||
+ (check_n_row_addr != dimm->n_row_addr) ||
+ (check_n_col_addr != dimm->n_col_addr) ||
+ (check_intlv !=
+ pinfo->c[i].memctl_opts.memctl_interleaving_mode))){
+ intlv_invalid = 1;
+ break;
+ } else {
+ j++;
+ }
+
+ }
+ if (intlv_invalid) {
+ for (i = 0; i < pinfo->num_ctrls; i++)
+ pinfo->c[i].memctl_opts.memctl_interleaving = 0;
+ printf("Not all DIMMs are identical. "
+ "Memory controller interleaving disabled.\n");
+ } else {
+ switch (check_intlv) {
+ case FSL_DDR_256B_INTERLEAVING:
+ case FSL_DDR_CACHE_LINE_INTERLEAVING:
+ case FSL_DDR_PAGE_INTERLEAVING:
+ case FSL_DDR_BANK_INTERLEAVING:
+ case FSL_DDR_SUPERBANK_INTERLEAVING:
+ if (pinfo->num_ctrls == 3)
+ k = 2;
+ else
+ k = pinfo->num_ctrls;
+ break;
+ case FSL_DDR_3WAY_1KB_INTERLEAVING:
+ case FSL_DDR_3WAY_4KB_INTERLEAVING:
+ case FSL_DDR_3WAY_8KB_INTERLEAVING:
+ case FSL_DDR_4WAY_1KB_INTERLEAVING:
+ case FSL_DDR_4WAY_4KB_INTERLEAVING:
+ case FSL_DDR_4WAY_8KB_INTERLEAVING:
+ default:
+ k = pinfo->num_ctrls;
+ break;
+ }
+ debug("%d of %d controllers are interleaving.\n", j, k);
+ if (j && (j != k)) {
+ for (i = 0; i < pinfo->num_ctrls; i++)
+ pinfo->c[i].memctl_opts.memctl_interleaving = 0;
+ if (pinfo->num_ctrls > 1)
+ printf("Not all controllers have compatible interleaving mode. All disabled.\n");
+ }
+ }
+ debug("Checking interleaving options completed\n");
+}
diff --git a/drivers/ddr/fsl/util.c b/drivers/ddr/fsl/util.c
new file mode 100644
index 0000000000..977d22dcaa
--- /dev/null
+++ b/drivers/ddr/fsl/util.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2008-2014 Freescale Semiconductor, Inc.
+ */
+
+#include <common.h>
+#include <soc/fsl/fsl_ddr_sdram.h>
+#include <soc/fsl/fsl_immap.h>
+#include <io.h>
+#include <soc/fsl/immap_lsch2.h>
+#include <asm-generic/div64.h>
+#include "fsl_ddr.h"
+
+/* To avoid 64-bit full-divides, we factor this here */
+#define ULL_2E12 2000000000000ULL
+#define UL_5POW12 244140625UL
+#define UL_2POW13 (1UL << 13)
+
+#define ULL_8FS 0xFFFFFFFFULL
+
+u32 fsl_ddr_get_version(struct fsl_ddr_controller *c)
+{
+ struct ccsr_ddr __iomem *ddr = c->base;
+ u32 ver_major_minor_errata;
+
+ ver_major_minor_errata = (ddr_in32(&ddr->ip_rev1) & 0xFFFF) << 8;
+ ver_major_minor_errata |= (ddr_in32(&ddr->ip_rev2) & 0xFF00) >> 8;
+
+ return ver_major_minor_errata;
+}
+
+/*
+ * Round up mclk_ps to nearest 1 ps in memory controller code
+ * if the error is 0.5ps or more.
+ *
+ * If an imprecise data rate is too high due to rounding error
+ * propagation, compute a suitably rounded mclk_ps to compute
+ * a working memory controller configuration.
+ */
+unsigned int get_memory_clk_period_ps(struct fsl_ddr_controller *c)
+{
+ unsigned int data_rate = c->ddr_freq;
+ unsigned int result;
+
+ /* Round to nearest 10ps, being careful about 64-bit multiply/divide */
+ unsigned long long rem, mclk_ps = ULL_2E12;
+
+ /* Now perform the big divide, the result fits in 32-bits */
+ rem = do_div(mclk_ps, data_rate);
+ result = (rem >= (data_rate >> 1)) ? mclk_ps + 1 : mclk_ps;
+
+ return result;
+}
+
+/* Convert picoseconds into DRAM clock cycles (rounding up if needed). */
+unsigned int picos_to_mclk(struct fsl_ddr_controller *c, unsigned int picos)
+{
+ unsigned long long clks, clks_rem;
+ unsigned int data_rate = c->ddr_freq;
+
+ /* Short circuit for zero picos */
+ if (!picos)
+ return 0;
+
+ /* First multiply the time by the data rate (32x32 => 64) */
+ clks = picos * (unsigned long long)data_rate;
+ /*
+ * Now divide by 5^12 and track the 32-bit remainder, then divide
+ * by 2*(2^12) using shifts (and updating the remainder).
+ */
+ clks_rem = do_div(clks, UL_5POW12);
+ clks_rem += (clks & (UL_2POW13-1)) * UL_5POW12;
+ clks >>= 13;
+
+ /* If we had a remainder greater than the 1ps error, then round up */
+ if (clks_rem > data_rate)
+ clks++;
+
+ /* Clamp to the maximum representable value */
+ if (clks > ULL_8FS)
+ clks = ULL_8FS;
+ return (unsigned int) clks;
+}
+
+unsigned int mclk_to_picos(struct fsl_ddr_controller *c, unsigned int mclk)
+{
+ return get_memory_clk_period_ps(c) * mclk;
+}
+
+void fsl_ddr_set_intl3r(const unsigned int granule_size)
+{
+}
+
+u32 fsl_ddr_get_intl3r(void)
+{
+ u32 val = 0;
+ return val;
+}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a25a871809..6d874357b7 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -18,7 +18,7 @@ config I2C_AT91
config I2C_IMX
bool "MPC85xx/MPC5200/i.MX I2C Master driver"
- depends on (ARCH_IMX && !ARCH_IMX1) || ARCH_MPC85XX || ARCH_MPC5200
+ depends on (ARCH_IMX && !ARCH_IMX1) || ARCH_MPC85XX || ARCH_MPC5200 || ARCH_LAYERSCAPE
help
If you say yes to this option, support will be included for many
built-in I2C master controllers found in Freescale SoCs. This is true
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 4c7346063c..6911f803b2 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -47,7 +47,6 @@
#include <io.h>
#include <i2c/i2c.h>
-#include <mach/clock.h>
#include "i2c-imx.h"
@@ -276,6 +275,9 @@ static void i2c_fsl_stop(struct i2c_adapter *adapter)
}
#ifdef CONFIG_PPC
+
+#include <mach/clock.h>
+
static void i2c_fsl_set_clk(struct fsl_i2c_struct *i2c_fsl,
unsigned int rate)
{
diff --git a/drivers/mci/imx-esdhc-pbl.c b/drivers/mci/imx-esdhc-pbl.c
index 8a03c5fcf4..f7f8c3348d 100644
--- a/drivers/mci/imx-esdhc-pbl.c
+++ b/drivers/mci/imx-esdhc-pbl.c
@@ -15,11 +15,12 @@
#include <io.h>
#include <mci.h>
#include <linux/sizes.h>
+#include <asm-generic/sections.h>
+#include <mach/xload.h>
#ifdef CONFIG_ARCH_IMX
#include <mach/atf.h>
#include <mach/imx6-regs.h>
#include <mach/imx8mq-regs.h>
-#include <mach/xload.h>
#include <mach/imx-header.h>
#endif
#include "sdhci.h"
@@ -406,3 +407,60 @@ int imx8_esdhc_start_image(int instance)
MX8MQ_ATF_BL33_BASE_ADDR, SZ_32K);
}
#endif
+
+#ifdef CONFIG_ARCH_LS1046
+
+/*
+ * The image on the SD card starts at 0x1000. We reserved 128KiB for the PBL,
+ * so the 2nd stage image starts here:
+ */
+#define LS1046A_SD_IMAGE_OFFSET (SZ_4K + SZ_128K)
+
+/**
+ * ls1046a_esdhc_start_image - Load and start a 2nd stage from the ESDHC controller
+ *
+ * This loads and starts a 2nd stage barebox from an SD card and starts it. We
+ * assume the image has been generated with scripts/pblimage.c which puts the
+ * second stage to an offset of 128KiB in the image.
+ *
+ * Return: If successful, this function does not return. A negative error
+ * code is returned when this function fails.
+ */
+int ls1046a_esdhc_start_image(unsigned long r0, unsigned long r1, unsigned long r2)
+{
+ int ret;
+ uint32_t val;
+ struct esdhc esdhc = {
+ .regs = IOMEM(0x01560000),
+ .is_be = true,
+ };
+ unsigned long sdram = 0x80000000;
+ void (*barebox)(unsigned long, unsigned long, unsigned long) =
+ (void *)(sdram + LS1046A_SD_IMAGE_OFFSET);
+
+ /*
+ * The ROM leaves us here with a clock frequency of around 400kHz. Speed
+ * this up a bit. FIXME: The resulting frequency has not yet been verified
+ * to work on all cards.
+ */
+ val = esdhc_read32(&esdhc, SDHCI_CLOCK_CONTROL__TIMEOUT_CONTROL__SOFTWARE_RESET);
+ val &= ~0x0000fff0;
+ val |= (2 << 8) | (6 << 4);
+ esdhc_write32(&esdhc, SDHCI_CLOCK_CONTROL__TIMEOUT_CONTROL__SOFTWARE_RESET, val);
+
+ esdhc_write32(&esdhc, ESDHC_DMA_SYSCTL, ESDHC_SYSCTL_DMA_SNOOP);
+
+ ret = esdhc_read_blocks(&esdhc, (void *)sdram,
+ ALIGN(barebox_image_size + LS1046A_SD_IMAGE_OFFSET, 512));
+ if (ret) {
+ pr_err("%s: reading blocks failed with: %d\n", __func__, ret);
+ return ret;
+ }
+
+ printf("Starting barebox\n");
+
+ barebox(r0, r1, r2);
+
+ return -EINVAL;
+}
+#endif
diff --git a/drivers/mci/imx-esdhc.c b/drivers/mci/imx-esdhc.c
index 2fa0974742..f71ca539ed 100644
--- a/drivers/mci/imx-esdhc.c
+++ b/drivers/mci/imx-esdhc.c
@@ -741,7 +741,8 @@ static struct esdhc_soc_data usdhc_imx6sx_data = {
};
static struct esdhc_soc_data esdhc_ls_data = {
- .flags = ESDHC_FLAG_MULTIBLK_NO_INT | ESDHC_FLAG_BIGENDIAN,
+ .flags = ESDHC_FLAG_MULTIBLK_NO_INT | ESDHC_FLAG_BIGENDIAN |
+ ESDHC_FLAG_CACHE_SNOOPING,
};
static __maybe_unused struct of_device_id fsl_esdhc_compatible[] = {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1aa096f005..3e3de5a975 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -137,6 +137,14 @@ config DRIVER_NET_FEC_IMX
depends on ARCH_HAS_FEC_IMX
select PHYLIB
+config DRIVER_NET_FSL_FMAN
+ bool "Freescale fman ethernet driver"
+ select PHYLIB
+ select FSL_QE_FIRMWARE
+ help
+ This option enabled support for the Freescale fman core found
+ on Layerscape SoCs.
+
config DRIVER_NET_GIANFAR
bool "Gianfar Ethernet"
depends on ARCH_MPC85XX
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 304bbba02d..6ccd22cc10 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_DRIVER_NET_ENC28J60) += enc28j60.o
obj-$(CONFIG_DRIVER_NET_EP93XX) += ep93xx.o
obj-$(CONFIG_DRIVER_NET_ETHOC) += ethoc.o
obj-$(CONFIG_DRIVER_NET_FEC_IMX) += fec_imx.o
+obj-$(CONFIG_DRIVER_NET_FSL_FMAN) += fsl-fman.o
obj-$(CONFIG_DRIVER_NET_GIANFAR) += gianfar.o
obj-$(CONFIG_DRIVER_NET_KS8851_MLL) += ks8851_mll.o
obj-$(CONFIG_DRIVER_NET_MACB) += macb.o
diff --git a/drivers/net/fsl-fman.c b/drivers/net/fsl-fman.c
new file mode 100644
index 0000000000..1a11ca4926
--- /dev/null
+++ b/drivers/net/fsl-fman.c
@@ -0,0 +1,1333 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2009-2012 Freescale Semiconductor, Inc.
+ * Dave Liu <daveliu@freescale.com>
+ */
+
+#include <common.h>
+#include <io.h>
+#include <malloc.h>
+#include <net.h>
+#include <init.h>
+#include <of_net.h>
+#include <dma.h>
+#include <firmware.h>
+#include <soc/fsl/qe.h>
+#include <soc/fsl/fsl_fman.h>
+#include <soc/fsl/fsl_memac.h>
+
+/* Port ID */
+#define OH_PORT_ID_BASE 0x01
+#define MAX_NUM_OH_PORT 7
+#define RX_PORT_1G_BASE 0x08
+#define MAX_NUM_RX_PORT_1G 8
+#define RX_PORT_10G_BASE 0x10
+#define RX_PORT_10G_BASE2 0x08
+#define TX_PORT_1G_BASE 0x28
+#define MAX_NUM_TX_PORT_1G 8
+#define TX_PORT_10G_BASE 0x30
+#define TX_PORT_10G_BASE2 0x28
+#define MIIM_TIMEOUT 0xFFFF
+
+struct fm_muram {
+ void *base;
+ void *top;
+ size_t size;
+ void *alloc;
+};
+#define FM_MURAM_RES_SIZE 0x01000
+
+/* Rx/Tx buffer descriptor */
+struct fm_port_bd {
+ u16 status;
+ u16 len;
+ u32 res0;
+ u16 res1;
+ u16 buf_ptr_hi;
+ u32 buf_ptr_lo;
+};
+
+/* Common BD flags */
+#define BD_LAST 0x0800
+
+/* Rx BD status flags */
+#define RxBD_EMPTY 0x8000
+#define RxBD_LAST BD_LAST
+#define RxBD_FIRST 0x0400
+#define RxBD_PHYS_ERR 0x0008
+#define RxBD_SIZE_ERR 0x0004
+#define RxBD_ERROR (RxBD_PHYS_ERR | RxBD_SIZE_ERR)
+
+/* Tx BD status flags */
+#define TxBD_READY 0x8000
+#define TxBD_LAST BD_LAST
+
+/* Rx/Tx queue descriptor */
+struct fm_port_qd {
+ u16 gen;
+ u16 bd_ring_base_hi;
+ u32 bd_ring_base_lo;
+ u16 bd_ring_size;
+ u16 offset_in;
+ u16 offset_out;
+ u16 res0;
+ u32 res1[0x4];
+};
+
+/* IM global parameter RAM */
+struct fm_port_global_pram {
+ u32 mode; /* independent mode register */
+ u32 rxqd_ptr; /* Rx queue descriptor pointer */
+ u32 txqd_ptr; /* Tx queue descriptor pointer */
+ u16 mrblr; /* max Rx buffer length */
+ u16 rxqd_bsy_cnt; /* RxQD busy counter, should be cleared */
+ u32 res0[0x4];
+ struct fm_port_qd rxqd; /* Rx queue descriptor */
+ struct fm_port_qd txqd; /* Tx queue descriptor */
+ u32 res1[0x28];
+};
+
+#define FM_PRAM_SIZE sizeof(struct fm_port_global_pram)
+#define FM_PRAM_ALIGN 256
+#define PRAM_MODE_GLOBAL 0x20000000
+#define PRAM_MODE_GRACEFUL_STOP 0x00800000
+
+#define FM_FREE_POOL_SIZE 0x20000 /* 128K bytes */
+#define FM_FREE_POOL_ALIGN 256
+
+/* Fman ethernet private struct */
+struct fm_eth {
+ struct fm_bmi_tx_port *tx_port;
+ struct fm_bmi_rx_port *rx_port;
+ phy_interface_t enet_if;
+ struct eth_device edev;
+ struct device_d *dev;
+ struct fm_port_global_pram *rx_pram; /* Rx parameter table */
+ struct fm_port_global_pram *tx_pram; /* Tx parameter table */
+ void *rx_bd_ring; /* Rx BD ring base */
+ void *cur_rxbd; /* current Rx BD */
+ void *rx_buf; /* Rx buffer base */
+ void *tx_bd_ring; /* Tx BD ring base */
+ void *cur_txbd; /* current Tx BD */
+ struct memac *regs;
+};
+
+#define RX_BD_RING_SIZE 8
+#define TX_BD_RING_SIZE 8
+#define MAX_RXBUF_LOG2 11
+#define MAX_RXBUF_LEN (1 << MAX_RXBUF_LOG2)
+
+struct fsl_fman_mdio {
+ struct mii_bus bus;
+ struct memac_mdio_controller *regs;
+};
+
+enum fman_port_type {
+ FMAN_PORT_TYPE_RX = 0, /* RX Port */
+ FMAN_PORT_TYPE_TX, /* TX Port */
+};
+
+struct fsl_fman_port {
+ void *regs;
+ enum fman_port_type type;
+ struct fm_bmi_rx_port *rxport;
+ struct fm_bmi_tx_port *txport;
+};
+
+static struct fm_eth *to_fm_eth(struct eth_device *edev)
+{
+ return container_of(edev, struct fm_eth, edev);
+}
+
+static struct fm_muram muram;
+
+static void *fm_muram_base(void)
+{
+ return muram.base;
+}
+
+static void *fm_muram_alloc(size_t size, unsigned long align)
+{
+ void *ret;
+ unsigned long align_mask;
+ size_t off;
+ void *save;
+
+ align_mask = align - 1;
+ save = muram.alloc;
+
+ off = (unsigned long)save & align_mask;
+ if (off != 0)
+ muram.alloc += (align - off);
+ off = size & align_mask;
+ if (off != 0)
+ size += (align - off);
+ if ((muram.alloc + size) >= muram.top) {
+ muram.alloc = save;
+ printf("%s: run out of ram.\n", __func__);
+ return NULL;
+ }
+
+ ret = muram.alloc;
+ muram.alloc += size;
+
+ return ret;
+}
+
+/*
+ * fm_upload_ucode - Fman microcode upload worker function
+ *
+ * This function does the actual uploading of an Fman microcode
+ * to an Fman.
+ */
+static int fm_upload_ucode(struct fm_imem *imem,
+ u32 *ucode, unsigned int size)
+{
+ unsigned int i;
+ unsigned int timeout = 1000000;
+
+ /* enable address auto increase */
+ out_be32(&imem->iadd, IRAM_IADD_AIE);
+ /* write microcode to IRAM */
+ for (i = 0; i < size / 4; i++)
+ out_be32(&imem->idata, (be32_to_cpu(ucode[i])));
+
+ /* verify if the writing is over */
+ out_be32(&imem->iadd, 0);
+ while ((in_be32(&imem->idata) != be32_to_cpu(ucode[0])) && --timeout)
+ ;
+ if (!timeout) {
+ printf("microcode upload timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* enable microcode from IRAM */
+ out_be32(&imem->iready, IRAM_READY);
+
+ return 0;
+}
+
+static int fman_upload_firmware(struct device_d *dev, struct fm_imem *fm_imem)
+{
+ int i, size, ret;
+ const struct qe_firmware *firmware;
+
+ get_builtin_firmware(fsl_fman_ucode_ls1046_r1_0_106_4_18_bin, &firmware, &size);
+
+ ret = qe_validate_firmware(firmware, size);
+ if (ret)
+ return ret;
+
+ if (firmware->count != 1) {
+ dev_err(dev, "Invalid data in firmware header\n");
+ return -EINVAL;
+ }
+
+ /* Loop through each microcode. */
+ for (i = 0; i < firmware->count; i++) {
+ const struct qe_microcode *ucode = &firmware->microcode[i];
+
+ /* Upload a microcode if it's present */
+ if (be32_to_cpu(ucode->code_offset)) {
+ u32 ucode_size;
+ u32 *code;
+ dev_info(dev, "Uploading microcode version %u.%u.%u\n",
+ ucode->major, ucode->minor,
+ ucode->revision);
+ code = (void *)firmware +
+ be32_to_cpu(ucode->code_offset);
+ ucode_size = sizeof(u32) * be32_to_cpu(ucode->count);
+ ret = fm_upload_ucode(fm_imem, code, ucode_size);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u32 fm_assign_risc(int port_id)
+{
+ u32 risc_sel, val;
+ risc_sel = (port_id & 0x1) ? FMFPPRC_RISC2 : FMFPPRC_RISC1;
+ val = (port_id << FMFPPRC_PORTID_SHIFT) & FMFPPRC_PORTID_MASK;
+ val |= ((risc_sel << FMFPPRC_ORA_SHIFT) | risc_sel);
+
+ return val;
+}
+
+static void fm_init_fpm(struct fm_fpm *fpm)
+{
+ int i, port_id;
+ u32 val;
+
+ setbits_be32(&fpm->fmfpee, FMFPEE_EHM | FMFPEE_UEC |
+ FMFPEE_CER | FMFPEE_DER);
+
+ /* IM mode, each even port ID to RISC#1, each odd port ID to RISC#2 */
+
+ /* offline/parser port */
+ for (i = 0; i < MAX_NUM_OH_PORT; i++) {
+ port_id = OH_PORT_ID_BASE + i;
+ val = fm_assign_risc(port_id);
+ out_be32(&fpm->fpmprc, val);
+ }
+ /* Rx 1G port */
+ for (i = 0; i < MAX_NUM_RX_PORT_1G; i++) {
+ port_id = RX_PORT_1G_BASE + i;
+ val = fm_assign_risc(port_id);
+ out_be32(&fpm->fpmprc, val);
+ }
+ /* Tx 1G port */
+ for (i = 0; i < MAX_NUM_TX_PORT_1G; i++) {
+ port_id = TX_PORT_1G_BASE + i;
+ val = fm_assign_risc(port_id);
+ out_be32(&fpm->fpmprc, val);
+ }
+ /* Rx 10G port */
+ port_id = RX_PORT_10G_BASE;
+ val = fm_assign_risc(port_id);
+ out_be32(&fpm->fpmprc, val);
+ /* Tx 10G port */
+ port_id = TX_PORT_10G_BASE;
+ val = fm_assign_risc(port_id);
+ out_be32(&fpm->fpmprc, val);
+
+ /* disable the dispatch limit in IM case */
+ out_be32(&fpm->fpmflc, FMFP_FLC_DISP_LIM_NONE);
+ /* clear events */
+ out_be32(&fpm->fmfpee, FMFPEE_CLEAR_EVENT);
+
+ /* clear risc events */
+ for (i = 0; i < 4; i++)
+ out_be32(&fpm->fpmcev[i], 0xffffffff);
+
+ /* clear error */
+ out_be32(&fpm->fpmrcr, FMFP_RCR_MDEC | FMFP_RCR_IDEC);
+}
+
+static int fm_init_bmi(struct fm_bmi_common *bmi)
+{
+ int blk, i, port_id;
+ u32 val;
+ size_t offset;
+ void *base;
+
+ /* alloc free buffer pool in MURAM */
+ base = fm_muram_alloc(FM_FREE_POOL_SIZE, FM_FREE_POOL_ALIGN);
+ if (!base) {
+ printf("%s: no muram for free buffer pool\n", __func__);
+ return -ENOMEM;
+ }
+ offset = base - fm_muram_base();
+
+ /* Need 128KB total free buffer pool size */
+ val = offset / 256;
+ blk = FM_FREE_POOL_SIZE / 256;
+ /* in IM, we must not begin from offset 0 in MURAM */
+ val |= ((blk - 1) << FMBM_CFG1_FBPS_SHIFT);
+ out_be32(&bmi->fmbm_cfg1, val);
+
+ /* disable all BMI interrupt */
+ out_be32(&bmi->fmbm_ier, FMBM_IER_DISABLE_ALL);
+
+ /* clear all events */
+ out_be32(&bmi->fmbm_ievr, FMBM_IEVR_CLEAR_ALL);
+
+ /*
+ * set port parameters - FMBM_PP_x
+ * max tasks 10G Rx/Tx=12, 1G Rx/Tx 4, others is 1
+ * max dma 10G Rx/Tx=3, others is 1
+ * set port FIFO size - FMBM_PFS_x
+ * 4KB for all Rx and Tx ports
+ */
+ /* offline/parser port */
+ for (i = 0; i < MAX_NUM_OH_PORT; i++) {
+ port_id = OH_PORT_ID_BASE + i - 1;
+ /* max tasks=1, max dma=1, no extra */
+ out_be32(&bmi->fmbm_pp[port_id], 0);
+ /* port FIFO size - 256 bytes, no extra */
+ out_be32(&bmi->fmbm_pfs[port_id], 0);
+ }
+ /* Rx 1G port */
+ for (i = 0; i < MAX_NUM_RX_PORT_1G; i++) {
+ port_id = RX_PORT_1G_BASE + i - 1;
+ /* max tasks=4, max dma=1, no extra */
+ out_be32(&bmi->fmbm_pp[port_id], FMBM_PP_MXT(4));
+ /* FIFO size - 4KB, no extra */
+ out_be32(&bmi->fmbm_pfs[port_id], FMBM_PFS_IFSZ(0xf));
+ }
+ /* Tx 1G port FIFO size - 4KB, no extra */
+ for (i = 0; i < MAX_NUM_TX_PORT_1G; i++) {
+ port_id = TX_PORT_1G_BASE + i - 1;
+ /* max tasks=4, max dma=1, no extra */
+ out_be32(&bmi->fmbm_pp[port_id], FMBM_PP_MXT(4));
+ /* FIFO size - 4KB, no extra */
+ out_be32(&bmi->fmbm_pfs[port_id], FMBM_PFS_IFSZ(0xf));
+ }
+ /* Rx 10G port */
+ port_id = RX_PORT_10G_BASE - 1;
+ /* max tasks=12, max dma=3, no extra */
+ out_be32(&bmi->fmbm_pp[port_id], FMBM_PP_MXT(12) | FMBM_PP_MXD(3));
+ /* FIFO size - 4KB, no extra */
+ out_be32(&bmi->fmbm_pfs[port_id], FMBM_PFS_IFSZ(0xf));
+
+ /* Tx 10G port */
+ port_id = TX_PORT_10G_BASE - 1;
+ /* max tasks=12, max dma=3, no extra */
+ out_be32(&bmi->fmbm_pp[port_id], FMBM_PP_MXT(12) | FMBM_PP_MXD(3));
+ /* FIFO size - 4KB, no extra */
+ out_be32(&bmi->fmbm_pfs[port_id], FMBM_PFS_IFSZ(0xf));
+
+ /* initialize internal buffers data base (linked list) */
+ out_be32(&bmi->fmbm_init, FMBM_INIT_START);
+
+ return 0;
+}
+
+static void fm_init_qmi(struct fm_qmi_common *qmi)
+{
+ /* disable all error interrupts */
+ out_be32(&qmi->fmqm_eien, FMQM_EIEN_DISABLE_ALL);
+ /* clear all error events */
+ out_be32(&qmi->fmqm_eie, FMQM_EIE_CLEAR_ALL);
+
+ /* disable all interrupts */
+ out_be32(&qmi->fmqm_ien, FMQM_IEN_DISABLE_ALL);
+ /* clear all interrupts */
+ out_be32(&qmi->fmqm_ie, FMQM_IE_CLEAR_ALL);
+}
+
+static int fm_init_common(struct device_d *dev, struct ccsr_fman *reg)
+{
+ int ret;
+
+ /* Upload the Fman microcode if it's present */
+ ret = fman_upload_firmware(dev, &reg->fm_imem);
+ if (ret)
+ return ret;
+
+ fm_init_qmi(&reg->fm_qmi_common);
+ fm_init_fpm(&reg->fm_fpm);
+
+ /* clear DMA status */
+ setbits_be32(&reg->fm_dma.fmdmsr, FMDMSR_CLEAR_ALL);
+
+ /* set DMA mode */
+ setbits_be32(&reg->fm_dma.fmdmmr, FMDMMR_SBER);
+
+ return fm_init_bmi(&reg->fm_bmi_common);
+}
+
+#define memac_out_32(a, v) out_be32(a, v)
+#define memac_in_32(a) in_be32(a)
+#define memac_clrbits_32(a, v) clrbits_be32(a, v)
+#define memac_setbits_32(a, v) setbits_be32(a, v)
+
+static int memac_mdio_write(struct mii_bus *bus, int port_addr, int regnum, u16 value)
+{
+ struct fsl_fman_mdio *priv = container_of(bus, struct fsl_fman_mdio, bus);
+ struct memac_mdio_controller *regs = priv->regs;
+ u32 mdio_ctl;
+
+ memac_clrbits_32(&regs->mdio_stat, MDIO_STAT_ENC);
+
+ /* Wait till the bus is free */
+ while ((memac_in_32(&regs->mdio_stat)) & MDIO_STAT_BSY)
+ ;
+
+ /* Set the port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(port_addr) | MDIO_CTL_DEV_ADDR(regnum);
+ memac_out_32(&regs->mdio_ctl, mdio_ctl);
+
+ /* Wait till the bus is free */
+ while ((memac_in_32(&regs->mdio_stat)) & MDIO_STAT_BSY)
+ ;
+
+ /* Write the value to the register */
+ memac_out_32(&regs->mdio_data, MDIO_DATA(value));
+
+ /* Wait till the MDIO write is complete */
+ while ((memac_in_32(&regs->mdio_data)) & MDIO_DATA_BSY)
+ ;
+
+ return 0;
+}
+
+static int memac_mdio_read(struct mii_bus *bus, int port_addr, int regnum)
+{
+ struct fsl_fman_mdio *priv = container_of(bus, struct fsl_fman_mdio, bus);
+ struct memac_mdio_controller *regs = priv->regs;
+ u32 mdio_ctl;
+
+ memac_clrbits_32(&regs->mdio_stat, MDIO_STAT_ENC);
+
+ /* Wait till the bus is free */
+ while ((memac_in_32(&regs->mdio_stat)) & MDIO_STAT_BSY)
+ ;
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(port_addr) | MDIO_CTL_DEV_ADDR(regnum);
+ memac_out_32(&regs->mdio_ctl, mdio_ctl);
+
+ /* Wait till the bus is free */
+ while ((memac_in_32(&regs->mdio_stat)) & MDIO_STAT_BSY)
+ ;
+
+ /* Initiate the read */
+ mdio_ctl |= MDIO_CTL_READ;
+ memac_out_32(&regs->mdio_ctl, mdio_ctl);
+
+ /* Wait till the MDIO write is complete */
+ while ((memac_in_32(&regs->mdio_data)) & MDIO_DATA_BSY)
+ ;
+
+ /* Return all Fs if nothing was there */
+ if (memac_in_32(&regs->mdio_stat) & MDIO_STAT_RD_ER)
+ return 0xffff;
+
+ return memac_in_32(&regs->mdio_data) & 0xffff;
+}
+
+static u16 muram_readw(u16 *addr)
+{
+ unsigned long base = (unsigned long)addr & ~0x3UL;
+ u32 val32 = in_be32((void *)base);
+ int byte_pos;
+ u16 ret;
+
+ byte_pos = (unsigned long)addr & 0x3UL;
+ if (byte_pos)
+ ret = (u16)(val32 & 0x0000ffff);
+ else
+ ret = (u16)((val32 & 0xffff0000) >> 16);
+
+ return ret;
+}
+
+static void muram_writew(u16 *addr, u16 val)
+{
+ unsigned long base = (unsigned long)addr & ~0x3UL;
+ u32 org32 = in_be32((void *)base);
+ u32 val32;
+ int byte_pos;
+
+ byte_pos = (unsigned long)addr & 0x3UL;
+ if (byte_pos)
+ val32 = (org32 & 0xffff0000) | val;
+ else
+ val32 = (org32 & 0x0000ffff) | ((u32)val << 16);
+
+ out_be32((void *)base, val32);
+}
+
+static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port)
+{
+ int timeout = 1000000;
+
+ clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN);
+
+ /* wait until the rx port is not busy */
+ while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--)
+ ;
+}
+
+static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port)
+{
+ /* set BMI to independent mode, Rx port disable */
+ out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM);
+ /* clear FOF in IM case */
+ out_be32(&rx_port->fmbm_rim, 0);
+ /* Rx frame next engine -RISC */
+ out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX);
+ /* Rx command attribute - no order, MR[3] = 1 */
+ clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK);
+ setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4));
+ /* enable Rx statistic counters */
+ out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN);
+ /* disable Rx performance counters */
+ out_be32(&rx_port->fmbm_rpc, 0);
+}
+
+static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port)
+{
+ int timeout = 1000000;
+
+ clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN);
+
+ /* wait until the tx port is not busy */
+ while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--)
+ ;
+}
+
+static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port)
+{
+ /* set BMI to independent mode, Tx port disable */
+ out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM);
+ /* Tx frame next engine -RISC */
+ out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
+ out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
+ /* Tx command attribute - no order, MR[3] = 1 */
+ clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK);
+ setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4));
+ /* enable Tx statistic counters */
+ out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN);
+ /* disable Tx performance counters */
+ out_be32(&tx_port->fmbm_tpc, 0);
+}
+
+static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth)
+{
+ struct fm_port_global_pram *pram;
+ u32 pram_page_offset;
+ void *rx_bd_ring_base;
+ void *rx_buf_pool;
+ u32 bd_ring_base_lo, bd_ring_base_hi;
+ u32 buf_lo, buf_hi;
+ struct fm_port_bd *rxbd;
+ struct fm_port_qd *rxqd;
+ struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port;
+ int i;
+
+ /* alloc global parameter ram at MURAM */
+ pram = fm_muram_alloc(FM_PRAM_SIZE, FM_PRAM_ALIGN);
+ if (!pram) {
+ printf("%s: No muram for Rx global parameter\n", __func__);
+ return -ENOMEM;
+ }
+
+ fm_eth->rx_pram = pram;
+
+ /* parameter page offset to MURAM */
+ pram_page_offset = (void *)pram - fm_muram_base();
+
+ /* enable global mode- snooping data buffers and BDs */
+ out_be32(&pram->mode, PRAM_MODE_GLOBAL);
+
+ /* init the Rx queue descriptor pointer */
+ out_be32(&pram->rxqd_ptr, pram_page_offset + 0x20);
+
+ /* set the max receive buffer length, power of 2 */
+ muram_writew(&pram->mrblr, MAX_RXBUF_LOG2);
+
+ /* alloc Rx buffer descriptors from main memory */
+ rx_bd_ring_base = dma_alloc_coherent(sizeof(struct fm_port_bd)
+ * RX_BD_RING_SIZE, DMA_ADDRESS_BROKEN);
+ if (!rx_bd_ring_base)
+ return -ENOMEM;
+
+ memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd)
+ * RX_BD_RING_SIZE);
+
+ /* alloc Rx buffer from main memory */
+ rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE);
+ if (!rx_buf_pool)
+ return -ENOMEM;
+
+ memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE);
+
+ /* save them to fm_eth */
+ fm_eth->rx_bd_ring = rx_bd_ring_base;
+ fm_eth->cur_rxbd = rx_bd_ring_base;
+ fm_eth->rx_buf = rx_buf_pool;
+
+ /* init Rx BDs ring */
+ rxbd = rx_bd_ring_base;
+ for (i = 0; i < RX_BD_RING_SIZE; i++) {
+ muram_writew(&rxbd->status, RxBD_EMPTY);
+ muram_writew(&rxbd->len, 0);
+ buf_hi = upper_32_bits(virt_to_phys(rx_buf_pool +
+ i * MAX_RXBUF_LEN));
+ buf_lo = lower_32_bits(virt_to_phys(rx_buf_pool +
+ i * MAX_RXBUF_LEN));
+ muram_writew(&rxbd->buf_ptr_hi, (u16)buf_hi);
+ out_be32(&rxbd->buf_ptr_lo, buf_lo);
+ rxbd++;
+ }
+
+ /* set the Rx queue descriptor */
+ rxqd = &pram->rxqd;
+ muram_writew(&rxqd->gen, 0);
+ bd_ring_base_hi = upper_32_bits(virt_to_phys(rx_bd_ring_base));
+ bd_ring_base_lo = lower_32_bits(virt_to_phys(rx_bd_ring_base));
+ muram_writew(&rxqd->bd_ring_base_hi, (u16)bd_ring_base_hi);
+ out_be32(&rxqd->bd_ring_base_lo, bd_ring_base_lo);
+ muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd)
+ * RX_BD_RING_SIZE);
+ muram_writew(&rxqd->offset_in, 0);
+ muram_writew(&rxqd->offset_out, 0);
+
+ /* set IM parameter ram pointer to Rx Frame Queue ID */
+ out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset);
+
+ return 0;
+}
+
+static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth)
+{
+ struct fm_port_global_pram *pram;
+ u32 pram_page_offset;
+ void *tx_bd_ring_base;
+ u32 bd_ring_base_lo, bd_ring_base_hi;
+ struct fm_port_bd *txbd;
+ struct fm_port_qd *txqd;
+ struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port;
+ int i;
+
+ /* alloc global parameter ram at MURAM */
+ pram = fm_muram_alloc(FM_PRAM_SIZE, FM_PRAM_ALIGN);
+ if (!pram)
+ return -ENOMEM;
+
+ fm_eth->tx_pram = pram;
+
+ /* parameter page offset to MURAM */
+ pram_page_offset = (void *)pram - fm_muram_base();
+
+ /* enable global mode- snooping data buffers and BDs */
+ out_be32(&pram->mode, PRAM_MODE_GLOBAL);
+
+ /* init the Tx queue descriptor pionter */
+ out_be32(&pram->txqd_ptr, pram_page_offset + 0x40);
+
+ /* alloc Tx buffer descriptors from main memory */
+ tx_bd_ring_base = dma_alloc_coherent(sizeof(struct fm_port_bd)
+ * TX_BD_RING_SIZE, DMA_ADDRESS_BROKEN);
+ if (!tx_bd_ring_base)
+ return -ENOMEM;
+
+ memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd)
+ * TX_BD_RING_SIZE);
+ /* save it to fm_eth */
+ fm_eth->tx_bd_ring = tx_bd_ring_base;
+ fm_eth->cur_txbd = tx_bd_ring_base;
+
+ /* init Tx BDs ring */
+ txbd = tx_bd_ring_base;
+ for (i = 0; i < TX_BD_RING_SIZE; i++) {
+ muram_writew(&txbd->status, TxBD_LAST);
+ muram_writew(&txbd->len, 0);
+ muram_writew(&txbd->buf_ptr_hi, 0);
+ out_be32(&txbd->buf_ptr_lo, 0);
+ txbd++;
+ }
+
+ /* set the Tx queue decriptor */
+ txqd = &pram->txqd;
+ bd_ring_base_hi = upper_32_bits(virt_to_phys(tx_bd_ring_base));
+ bd_ring_base_lo = lower_32_bits(virt_to_phys(tx_bd_ring_base));
+ muram_writew(&txqd->bd_ring_base_hi, (u16)bd_ring_base_hi);
+ out_be32(&txqd->bd_ring_base_lo, bd_ring_base_lo);
+ muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd)
+ * TX_BD_RING_SIZE);
+ muram_writew(&txqd->offset_in, 0);
+ muram_writew(&txqd->offset_out, 0);
+
+ /* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */
+ out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset);
+
+ return 0;
+}
+
+static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth)
+{
+ struct fm_port_global_pram *pram;
+
+ pram = fm_eth->tx_pram;
+ /* graceful stop transmission of frames */
+ setbits_be32(&pram->mode, PRAM_MODE_GRACEFUL_STOP);
+}
+
+static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth)
+{
+ struct fm_port_global_pram *pram;
+
+ pram = fm_eth->tx_pram;
+ /* re-enable transmission of frames */
+ clrbits_be32(&pram->mode, PRAM_MODE_GRACEFUL_STOP);
+}
+
+static void memac_adjust_link_speed(struct eth_device *edev)
+{
+ struct fm_eth *fm_eth = to_fm_eth(edev);
+ struct memac *regs = fm_eth->regs;
+ int speed = edev->phydev->speed;
+ u32 if_mode;
+ phy_interface_t type = fm_eth->enet_if;
+
+ if_mode = in_be32(&regs->if_mode);
+
+ if (type == PHY_INTERFACE_MODE_RGMII ||
+ type == PHY_INTERFACE_MODE_RGMII_ID ||
+ type == PHY_INTERFACE_MODE_RGMII_TXID) {
+ if_mode &= ~IF_MODE_EN_AUTO;
+ if_mode &= ~IF_MODE_SETSP_MASK;
+
+ switch (speed) {
+ case SPEED_1000:
+ if_mode |= IF_MODE_SETSP_1000M;
+ break;
+ case SPEED_100:
+ if_mode |= IF_MODE_SETSP_100M;
+ break;
+ case SPEED_10:
+ if_mode |= IF_MODE_SETSP_10M;
+ default:
+ break;
+ }
+ }
+
+ out_be32(&regs->if_mode, if_mode);
+
+ return;
+}
+
+static int fm_eth_open(struct eth_device *edev)
+{
+ struct fm_eth *fm_eth = to_fm_eth(edev);
+ struct memac *regs = fm_eth->regs;
+ int ret;
+
+ ret = phy_device_connect(edev, NULL, -1, memac_adjust_link_speed, 0,
+ fm_eth->enet_if);
+ if (ret)
+ return ret;
+
+ /* enable bmi Rx port */
+ setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN);
+ /* enable MAC rx/tx port */
+ setbits_be32(&regs->command_config,
+ MEMAC_CMD_CFG_RXTX_EN | MEMAC_CMD_CFG_NO_LEN_CHK);
+
+ /* enable bmi Tx port */
+ setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN);
+ /* re-enable transmission of frame */
+ fmc_tx_port_graceful_stop_disable(fm_eth);
+
+ return 0;
+}
+
+static void memac_disable_mac(struct fm_eth *fm_eth)
+{
+ struct memac *regs = fm_eth->regs;
+
+ clrbits_be32(&regs->command_config, MEMAC_CMD_CFG_RXTX_EN);
+}
+
+static void fm_eth_halt(struct eth_device *edev)
+{
+ struct fm_eth *fm_eth = to_fm_eth(edev);
+
+ /* graceful stop the transmission of frames */
+ fmc_tx_port_graceful_stop_enable(fm_eth);
+ /* disable bmi Tx port */
+ bmi_tx_port_disable(fm_eth->tx_port);
+ /* disable MAC rx/tx port */
+ memac_disable_mac(fm_eth);
+ /* disable bmi Rx port */
+ bmi_rx_port_disable(fm_eth->rx_port);
+}
+
+static int fm_eth_send(struct eth_device *edev, void *buf, int len)
+{
+ struct fm_eth *fm_eth = to_fm_eth(edev);
+ struct fm_port_global_pram *pram;
+ struct fm_port_bd *txbd, *txbd_base;
+ u16 offset_in;
+ int i;
+ dma_addr_t dma;
+
+ pram = fm_eth->tx_pram;
+ txbd = fm_eth->cur_txbd;
+
+ /* find one empty TxBD */
+ for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) {
+ udelay(100);
+ if (i > 0x1000) {
+ dev_err(&edev->dev, "Tx buffer not ready, txbd->status = 0x%x\n",
+ muram_readw(&txbd->status));
+ return -EIO;
+ }
+ }
+
+ dma = dma_map_single(fm_eth->dev, buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(fm_eth->dev, dma))
+ return -EFAULT;
+
+ /* setup TxBD */
+ muram_writew(&txbd->buf_ptr_hi, (u16)upper_32_bits(dma));
+ out_be32(&txbd->buf_ptr_lo, lower_32_bits(dma));
+ muram_writew(&txbd->len, len);
+ muram_writew(&txbd->status, TxBD_READY | TxBD_LAST);
+
+ /* update TxQD, let RISC to send the packet */
+ offset_in = muram_readw(&pram->txqd.offset_in);
+ offset_in += sizeof(struct fm_port_bd);
+ if (offset_in >= muram_readw(&pram->txqd.bd_ring_size))
+ offset_in = 0;
+ muram_writew(&pram->txqd.offset_in, offset_in);
+
+ /* wait for buffer to be transmitted */
+ for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) {
+ udelay(10);
+ if (i > 0x10000) {
+ dev_err(&edev->dev, "Tx error, txbd->status = 0x%x\n",
+ muram_readw(&txbd->status));
+ return -EIO;
+ }
+ }
+
+ dma_unmap_single(fm_eth->dev, dma, len, DMA_TO_DEVICE);
+
+ /* advance the TxBD */
+ txbd++;
+ txbd_base = fm_eth->tx_bd_ring;
+ if (txbd >= (txbd_base + TX_BD_RING_SIZE))
+ txbd = txbd_base;
+ /* update current txbd */
+ fm_eth->cur_txbd = (void *)txbd;
+
+ return 0;
+}
+
+static int fm_eth_recv(struct eth_device *edev)
+{
+ struct fm_eth *fm_eth = to_fm_eth(edev);
+ struct fm_port_global_pram *pram;
+ struct fm_port_bd *rxbd, *rxbd_base;
+ u16 status, len;
+ u32 buf_lo, buf_hi;
+ u8 *data;
+ u16 offset_out;
+ int ret = 1;
+
+ pram = fm_eth->rx_pram;
+ rxbd = fm_eth->cur_rxbd;
+ status = muram_readw(&rxbd->status);
+
+ while (!(status & RxBD_EMPTY)) {
+ if (!(status & RxBD_ERROR)) {
+ buf_hi = muram_readw(&rxbd->buf_ptr_hi);
+ buf_lo = in_be32(&rxbd->buf_ptr_lo);
+ data = (u8 *)((unsigned long)(buf_hi << 16) << 16 | buf_lo);
+ len = muram_readw(&rxbd->len);
+
+ dma_sync_single_for_cpu((unsigned long)data,
+ len,
+ DMA_FROM_DEVICE);
+
+ net_receive(edev, data, len);
+
+ dma_sync_single_for_device((unsigned long)data,
+ len,
+ DMA_FROM_DEVICE);
+ } else {
+ dev_err(&edev->dev, "Rx error\n");
+ ret = 0;
+ }
+
+ /* clear the RxBDs */
+ muram_writew(&rxbd->status, RxBD_EMPTY);
+ muram_writew(&rxbd->len, 0);
+
+ /* advance RxBD */
+ rxbd++;
+ rxbd_base = fm_eth->rx_bd_ring;
+ if (rxbd >= (rxbd_base + RX_BD_RING_SIZE))
+ rxbd = rxbd_base;
+ /* read next status */
+ status = muram_readw(&rxbd->status);
+
+ /* update RxQD */
+ offset_out = muram_readw(&pram->rxqd.offset_out);
+ offset_out += sizeof(struct fm_port_bd);
+ if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size))
+ offset_out = 0;
+ muram_writew(&pram->rxqd.offset_out, offset_out);
+ }
+ fm_eth->cur_rxbd = rxbd;
+
+ return ret;
+}
+
+static void memac_init_mac(struct fm_eth *fm_eth)
+{
+ struct memac *regs = fm_eth->regs;
+
+ /* mask all interrupt */
+ out_be32(&regs->imask, IMASK_MASK_ALL);
+
+ /* clear all events */
+ out_be32(&regs->ievent, IEVENT_CLEAR_ALL);
+
+ /* set the max receive length */
+ out_be32(&regs->maxfrm, MAX_RXBUF_LEN);
+
+ /* multicast frame reception for the hash entry disable */
+ out_be32(&regs->hashtable_ctrl, 0);
+}
+
+static int memac_set_ethaddr(struct eth_device *edev, const unsigned char *adr)
+{
+ struct fm_eth *fm_eth = to_fm_eth(edev);
+ struct memac *regs = fm_eth->regs;
+ u32 mac_addr0, mac_addr1;
+
+ /*
+ * if a station address of 0x12345678ABCD, perform a write to
+ * MAC_ADDR0 of 0x78563412, MAC_ADDR1 of 0x0000CDAB
+ */
+ mac_addr0 = (adr[3] << 24) | (adr[2] << 16) | \
+ (adr[1] << 8) | (adr[0]);
+ out_be32(&regs->mac_addr_0, mac_addr0);
+
+ mac_addr1 = ((adr[5] << 8) | adr[4]) & 0x0000ffff;
+ out_be32(&regs->mac_addr_1, mac_addr1);
+
+ return 0;
+}
+
+static int memac_get_ethaddr(struct eth_device *edev, unsigned char *adr)
+{
+ struct fm_eth *fm_eth = to_fm_eth(edev);
+ struct memac *regs = fm_eth->regs;
+ u32 mac_addr0, mac_addr1;
+
+ mac_addr0 = in_be32(&regs->mac_addr_0);
+ mac_addr1 = in_be32(&regs->mac_addr_1);
+
+ adr[0] = mac_addr0 & 0xff;
+ adr[1] = (mac_addr0 >> 8) & 0xff;
+ adr[2] = (mac_addr0 >> 16) & 0xff;
+ adr[3] = (mac_addr0 >> 24) & 0xff;
+ adr[4] = mac_addr1 & 0xff;
+ adr[5] = (mac_addr1 >> 8) & 0xff;
+
+ return 0;
+}
+
+static void memac_set_interface_mode(struct fm_eth *fm_eth,
+ phy_interface_t type)
+{
+ struct memac *regs = fm_eth->regs;
+ u32 if_mode;
+
+ /* clear all bits relative with interface mode */
+ if_mode = in_be32(&regs->if_mode) & ~IF_MODE_MASK;
+
+ /* set interface mode */
+ switch (type) {
+ case PHY_INTERFACE_MODE_GMII:
+ if_mode |= IF_MODE_GMII | IF_MODE_EN_AUTO;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if_mode |= IF_MODE_GMII | IF_MODE_RG | IF_MODE_EN_AUTO;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ if_mode |= IF_MODE_GMII | IF_MODE_EN_AUTO;
+ break;
+ case PHY_INTERFACE_MODE_XGMII:
+ if_mode |= IF_MODE_XGMII;
+ break;
+ default:
+ break;
+ }
+
+ out_be32(&regs->if_mode, if_mode);
+}
+
+static int fm_eth_startup(struct fm_eth *fm_eth)
+{
+ int ret;
+
+ ret = fm_eth_rx_port_parameter_init(fm_eth);
+ if (ret)
+ return ret;
+
+ ret = fm_eth_tx_port_parameter_init(fm_eth);
+ if (ret)
+ return ret;
+
+ /* setup the MAC controller */
+ memac_init_mac(fm_eth);
+
+ /* init bmi rx port, IM mode and disable */
+ bmi_rx_port_init(fm_eth->rx_port);
+
+ /* init bmi tx port, IM mode and disable */
+ bmi_tx_port_init(fm_eth->tx_port);
+
+ memac_set_interface_mode(fm_eth, fm_eth->enet_if);
+
+ return 0;
+}
+
+static int fsl_fman_mdio_probe(struct device_d *dev)
+{
+ struct resource *iores;
+ int ret;
+ struct fsl_fman_mdio *priv;
+
+ dev_dbg(dev, "probe\n");
+
+ iores = dev_request_mem_resource(dev, 0);
+ if (IS_ERR(iores))
+ return PTR_ERR(iores);
+
+ priv = xzalloc(sizeof(*priv));
+
+ priv->bus.read = memac_mdio_read;
+ priv->bus.write = memac_mdio_write;
+ priv->bus.parent = dev;
+ priv->regs = IOMEM(iores->start);
+
+ memac_setbits_32(&priv->regs->mdio_stat,
+ MDIO_STAT_CLKDIV(258) | MDIO_STAT_NEG);
+
+ ret = mdiobus_register(&priv->bus);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int fsl_fman_port_probe(struct device_d *dev)
+{
+ struct resource *iores;
+ int ret;
+ struct fsl_fman_port *port;
+ unsigned long type;
+
+ dev_dbg(dev, "probe\n");
+
+ ret = dev_get_drvdata(dev, (const void **)&type);
+ if (ret)
+ return ret;
+
+ iores = dev_request_mem_resource(dev, 0);
+ if (IS_ERR(iores))
+ return PTR_ERR(iores);
+
+ port = xzalloc(sizeof(*port));
+
+ port->regs = IOMEM(iores->start);
+ port->type = type;
+
+ if (type == FMAN_PORT_TYPE_RX)
+ port->rxport = port->regs;
+ else
+ port->txport = port->regs;
+
+ dev->priv = port;
+
+ return 0;
+}
+
+static int fsl_fman_memac_port_bind(struct fm_eth *fm_eth, enum fman_port_type type)
+{
+ struct device_node *macnp = fm_eth->dev->device_node;
+ struct device_node *portnp;
+ struct device_d *portdev;
+ struct fsl_fman_port *port;
+
+ portnp = of_parse_phandle(macnp, "fsl,fman-ports", type);
+ if (!portnp) {
+ dev_err(fm_eth->dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
+ macnp->full_name);
+ return -EINVAL;
+ }
+
+ portdev = of_find_device_by_node(portnp);
+ if (!portdev)
+ return -ENOENT;
+
+ port = portdev->priv;
+ if (!port)
+ return -EINVAL;
+
+ if (type == FMAN_PORT_TYPE_TX)
+ fm_eth->tx_port = port->txport;
+ else
+ fm_eth->rx_port = port->rxport;
+
+ return 0;
+}
+
+static int fsl_fman_memac_probe(struct device_d *dev)
+{
+ struct resource *iores;
+ struct fm_eth *fm_eth;
+ struct eth_device *edev;
+ int ret;
+ int phy_mode;
+
+ dev_dbg(dev, "probe\n");
+
+ iores = dev_request_mem_resource(dev, 0);
+ if (IS_ERR(iores))
+ return PTR_ERR(iores);
+
+ /* alloc the FMan ethernet private struct */
+ fm_eth = xzalloc(sizeof(*fm_eth));
+
+ fm_eth->dev = dev;
+
+ ret = fsl_fman_memac_port_bind(fm_eth, FMAN_PORT_TYPE_TX);
+ if (ret)
+ return ret;
+
+ ret = fsl_fman_memac_port_bind(fm_eth, FMAN_PORT_TYPE_RX);
+ if (ret)
+ return ret;
+
+ phy_mode = of_get_phy_mode(dev->device_node);
+ if (phy_mode < 0)
+ return phy_mode;
+
+ fm_eth->enet_if = phy_mode;
+
+ fm_eth->regs = IOMEM(iores->start);
+
+ dev->priv = fm_eth;
+
+ edev = &fm_eth->edev;
+ edev->open = fm_eth_open;
+ edev->halt = fm_eth_halt;
+ edev->send = fm_eth_send;
+ edev->recv = fm_eth_recv;
+ edev->get_ethaddr = memac_get_ethaddr;
+ edev->set_ethaddr = memac_set_ethaddr;
+ edev->parent = dev;
+
+ /* startup the FM im */
+ ret = fm_eth_startup(fm_eth);
+ if (ret)
+ return ret;
+
+ ret = eth_register(edev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int fsl_fman_muram_probe(struct device_d *dev)
+{
+ struct resource *iores;
+
+ dev_dbg(dev, "probe\n");
+
+ iores = dev_request_mem_resource(dev, 0);
+ if (IS_ERR(iores))
+ return PTR_ERR(iores);
+
+ muram.base = IOMEM(iores->start);
+ muram.size = resource_size(iores);
+ muram.alloc = muram.base + FM_MURAM_RES_SIZE;
+ muram.top = muram.base + muram.size;
+
+ return 0;
+}
+
+static struct of_device_id fsl_fman_mdio_dt_ids[] = {
+ {
+ .compatible = "fsl,fman-memac-mdio",
+ }, {
+ }
+};
+
+static struct driver_d fman_mdio_driver = {
+ .name = "fsl-fman-mdio",
+ .probe = fsl_fman_mdio_probe,
+ .of_compatible = DRV_OF_COMPAT(fsl_fman_mdio_dt_ids),
+};
+
+static struct of_device_id fsl_fman_port_dt_ids[] = {
+ {
+ .compatible = "fsl,fman-v3-port-rx",
+ .data = (void *)FMAN_PORT_TYPE_RX,
+ }, {
+ .compatible = "fsl,fman-v3-port-tx",
+ .data = (void *)FMAN_PORT_TYPE_TX,
+ }, {
+ }
+};
+
+static struct driver_d fman_port_driver = {
+ .name = "fsl-fman-port",
+ .probe = fsl_fman_port_probe,
+ .of_compatible = DRV_OF_COMPAT(fsl_fman_port_dt_ids),
+};
+
+static struct of_device_id fsl_fman_memac_dt_ids[] = {
+ {
+ .compatible = "fsl,fman-memac",
+ }, {
+ }
+};
+
+static struct driver_d fman_memac_driver = {
+ .name = "fsl-fman-memac",
+ .probe = fsl_fman_memac_probe,
+ .of_compatible = DRV_OF_COMPAT(fsl_fman_memac_dt_ids),
+};
+
+static struct of_device_id fsl_fman_muram_dt_ids[] = {
+ {
+ .compatible = "fsl,fman-muram",
+ }, {
+ }
+};
+
+static struct driver_d fman_muram_driver = {
+ .name = "fsl-fman-muram",
+ .probe = fsl_fman_muram_probe,
+ .of_compatible = DRV_OF_COMPAT(fsl_fman_muram_dt_ids),
+};
+
+static int fsl_fman_probe(struct device_d *dev)
+{
+ struct resource *iores;
+ struct ccsr_fman *reg;
+ int ret;
+
+ dev_dbg(dev, "----------------> probe\n");
+
+ iores = dev_get_resource(dev, IORESOURCE_MEM, 0);
+ if (IS_ERR(iores))
+ return PTR_ERR(iores);
+
+ reg = IOMEM(iores->start);
+
+ ret = of_platform_populate(dev->device_node, NULL, dev);
+ if (ret)
+ return ret;
+
+ platform_driver_register(&fman_muram_driver);
+ platform_driver_register(&fman_mdio_driver);
+ platform_driver_register(&fman_port_driver);
+ platform_driver_register(&fman_memac_driver);
+
+ ret = fm_init_common(dev, reg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct of_device_id fsl_fman_dt_ids[] = {
+ {
+ .compatible = "fsl,fman",
+ }, {
+ }
+};
+
+static struct driver_d fman_driver = {
+ .name = "fsl-fman",
+ .probe = fsl_fman_probe,
+ .of_compatible = DRV_OF_COMPAT(fsl_fman_dt_ids),
+};
+device_platform_driver(fman_driver);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 2793ee93d9..04efb1a3c8 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -48,7 +48,7 @@ config WATCHDOG_MXS28
config WATCHDOG_IMX
bool "i.MX watchdog"
- depends on ARCH_IMX
+ depends on ARCH_IMX || ARCH_LAYERSCAPE
help
Add support for watchdog found on Freescale i.MX SoCs.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 69189ba1f3..6c8d36c8b8 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_WATCHDOG_MXS28) += im28wd.o
obj-$(CONFIG_WATCHDOG_DW) += dw_wdt.o
obj-$(CONFIG_WATCHDOG_JZ4740) += jz4740.o
obj-$(CONFIG_WATCHDOG_IMX_RESET_SOURCE) += imxwd.o
+obj-$(CONFIG_WATCHDOG_IMX) += imxwd.o
obj-$(CONFIG_WATCHDOG_ORION) += orion_wdt.o
obj-$(CONFIG_ARCH_BCM283X) += bcm2835_wdt.o
obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
diff --git a/drivers/watchdog/imxwd.c b/drivers/watchdog/imxwd.c
index 8dba662392..77a3bd76ce 100644
--- a/drivers/watchdog/imxwd.c
+++ b/drivers/watchdog/imxwd.c
@@ -38,6 +38,7 @@ struct imx_wd {
const struct imx_wd_ops *ops;
struct restart_handler restart;
bool ext_reset;
+ bool bigendian;
};
#define to_imx_wd(h) container_of(h, struct imx_wd, wd)
@@ -66,6 +67,22 @@ struct imx_wd {
/* valid for i.MX27, i.MX31, always '0' on i.MX25, i.MX35, i.MX51 */
#define WSTR_COLDSTART (1 << 4)
+static void imxwd_write(struct imx_wd *priv, int reg, uint16_t val)
+{
+ if (priv->bigendian)
+ out_be16(priv->base + reg, val);
+ else
+ writew(val, priv->base + reg);
+}
+
+static uint16_t imxwd_read(struct imx_wd *priv, int reg)
+{
+ if (priv->bigendian)
+ return in_be16(priv->base + reg);
+ else
+ return readw(priv->base + reg);
+}
+
static int imx1_watchdog_set_timeout(struct imx_wd *priv, unsigned timeout)
{
u16 val;
@@ -73,18 +90,18 @@ static int imx1_watchdog_set_timeout(struct imx_wd *priv, unsigned timeout)
dev_dbg(priv->dev, "%s: %d\n", __func__, timeout);
if (!timeout) {
- writew(IMX1_WDOG_WCR_WHALT, priv->base + IMX1_WDOG_WCR);
+ imxwd_write(priv, IMX1_WDOG_WCR, IMX1_WDOG_WCR_WHALT);
return 0;
}
val = (timeout * 2 - 1) << 8;
- writew(val, priv->base + IMX1_WDOG_WCR);
- writew(IMX1_WDOG_WCR_WDE | val, priv->base + IMX1_WDOG_WCR);
+ imxwd_write(priv, IMX1_WDOG_WCR, val);
+ imxwd_write(priv, IMX1_WDOG_WCR, IMX1_WDOG_WCR_WDE | val);
/* Write Service Sequence */
- writew(0x5555, priv->base + IMX1_WDOG_WSR);
- writew(0xaaaa, priv->base + IMX1_WDOG_WSR);
+ imxwd_write(priv, IMX1_WDOG_WSR, 0x5555);
+ imxwd_write(priv, IMX1_WDOG_WSR, 0xaaaa);
return 0;
}
@@ -113,13 +130,13 @@ static int imx21_watchdog_set_timeout(struct imx_wd *priv, unsigned timeout)
* set time and some write once bits first prior enabling the
* watchdog according to the datasheet
*/
- writew(val, priv->base + IMX21_WDOG_WCR);
+ imxwd_write(priv, IMX21_WDOG_WCR, val);
- writew(IMX21_WDOG_WCR_WDE | val, priv->base + IMX21_WDOG_WCR);
+ imxwd_write(priv, IMX21_WDOG_WCR, IMX21_WDOG_WCR_WDE | val);
/* Write Service Sequence */
- writew(0x5555, priv->base + IMX21_WDOG_WSR);
- writew(0xaaaa, priv->base + IMX21_WDOG_WSR);
+ imxwd_write(priv, IMX21_WDOG_WSR, 0x5555);
+ imxwd_write(priv, IMX21_WDOG_WSR, 0xaaaa);
return 0;
}
@@ -134,11 +151,11 @@ static void imx21_soc_reset(struct imx_wd *priv)
else
val |= IMX21_WDOG_WCR_WDA; /* do not assert ext-reset */
- writew(val, priv->base + IMX21_WDOG_WCR);
+ imxwd_write(priv, IMX21_WDOG_WCR, val);
/* Two additional writes due to errata ERR004346 */
- writew(val, priv->base + IMX21_WDOG_WCR);
- writew(val, priv->base + IMX21_WDOG_WCR);
+ imxwd_write(priv, IMX21_WDOG_WCR, val);
+ imxwd_write(priv, IMX21_WDOG_WCR, val);
}
static int imx_watchdog_set_timeout(struct watchdog *wd, unsigned timeout)
@@ -161,7 +178,7 @@ static void __noreturn imxwd_force_soc_reset(struct restart_handler *rst)
static void imx_watchdog_detect_reset_source(struct imx_wd *priv)
{
- u16 val = readw(priv->base + IMX21_WDOG_WSTR);
+ u16 val = imxwd_read(priv, IMX21_WDOG_WSTR);
int priority = RESET_SOURCE_DEFAULT_PRIORITY;
if (reset_source_get() == RESET_WDG)
@@ -192,7 +209,7 @@ static int imx21_wd_init(struct imx_wd *priv)
/*
* Disable watchdog powerdown counter
*/
- writew(0x0, priv->base + IMX21_WDOG_WMCR);
+ imxwd_write(priv, IMX21_WDOG_WMCR, 0x0);
return 0;
}
@@ -220,6 +237,7 @@ static int imx_wd_probe(struct device_d *dev)
priv->wd.timeout_max = priv->ops->timeout_max;
priv->wd.hwdev = dev;
priv->dev = dev;
+ priv->bigendian = of_device_is_big_endian(dev->device_node);
priv->ext_reset = of_property_read_bool(dev->device_node,
"fsl,ext-reset-output");
diff --git a/firmware/Makefile b/firmware/Makefile
index f238ce2538..306c006e23 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -11,6 +11,8 @@ firmware-$(CONFIG_FIRMWARE_IMX_LPDDR4_PMU_TRAIN) += \
firmware-$(CONFIG_FIRMWARE_IMX8MQ_ATF) += imx8mq-bl31.bin
+firmware-$(CONFIG_DRIVER_NET_FSL_FMAN) += fsl_fman_ucode_ls1046_r1.0_106_4_18.bin
+
# Create $(fwabs) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a
# leading /, it's relative to $(srctree).
fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR))
diff --git a/images/Makefile b/images/Makefile
index 59b81f9b6d..542cfdb22c 100644
--- a/images/Makefile
+++ b/images/Makefile
@@ -68,6 +68,38 @@ $(obj)/%.pblb: $(obj)/%.pbl FORCE
$(call if_changed,objcopy_bin,$(*F))
$(call cmd,check_file_size,$@,$(CONFIG_BAREBOX_MAX_IMAGE_SIZE))
+#
+# For each start symbol create three variables containing the
+# relevant sizes in the image:
+# PBL_CODE_SIZE_$(symbol) - contains the pure code size of the image
+# PBL_MEMORY_SIZE_$(symbol) - contains the code size + bss size of the
+# image
+# PBL_IMAGE_SIZE_$(symbol) - contains the full image size including the
+# compressed payload
+#
+ $(eval PBL_CODE_SIZE_$* = \
+ $(shell $(srctree)/scripts/extract_symbol_offset pbl_code_size $^))
+ $(eval PBL_MEMORY_SIZE_$*= \
+ $(shell $(srctree)/scripts/extract_symbol_offset pbl_memory_size $^))
+ $(eval PBL_IMAGE_SIZE_$*= \
+ $(shell $(srctree)/scripts/extract_symbol_offset pbl_image_size $^))
+
+#
+# if MAX_PBL_xxx_SIZE_$(symbol) is defined it contains the maximum size the
+# code/memory/image for this PBL may get. Check these values.
+#
+ $(if $(MAX_PBL_CODE_SIZE_$*), \
+ $(call cmd,check_size, $(PBL_CODE_SIZE_$*), $(MAX_PBL_CODE_SIZE_$*)) \
+ )
+
+ $(if $(MAX_PBL_MEMORY_SIZE_$*), \
+ $(call cmd,check_size, $(PBL_MEMORY_SIZE_$*), $(MAX_PBL_MEMORY_SIZE_$*)) \
+ )
+
+ $(if $(MAX_PBL_IMAGE_SIZE_$*), \
+ $(call cmd,check_size, $(PBL_IMAGE_SIZE_$*), $(MAX_PBL_IMAGE_SIZE_$*)) \
+ )
+
$(obj)/%.s: $(obj)/% FORCE
$(call if_changed,disasm)
@@ -113,6 +145,7 @@ include $(srctree)/images/Makefile.vexpress
include $(srctree)/images/Makefile.xburst
include $(srctree)/images/Makefile.at91
include $(srctree)/images/Makefile.zynqmp
+include $(srctree)/images/Makefile.layerscape
ifneq ($(pblx-y)$(pblx-),)
$(error pblx- has been removed. Please use pblb- instead.)
@@ -147,5 +180,5 @@ $(flash-list): $(image-y-path)
clean-files := *.pbl *.pblb *.map start_*.imximg *.img barebox.z start_*.kwbimg \
start_*.kwbuartimg *.socfpgaimg *.mlo *.t20img *.t20img.cfg *.t30img \
*.t30img.cfg *.t124img *.t124img.cfg *.mlospi *.mlo *.mxsbs *.mxssd \
- start_*.simximg start_*.usimximg *.imx-sram-img
+ start_*.simximg start_*.usimximg *.imx-sram-img *.image
clean-files += pbl.lds
diff --git a/images/Makefile.imx b/images/Makefile.imx
index c83f14aa51..e6f741cdb4 100644
--- a/images/Makefile.imx
+++ b/images/Makefile.imx
@@ -554,15 +554,18 @@ image-$(CONFIG_MACH_ZII_IMX7D_RPU2) += barebox-zii-imx7d-rpu2.img
# ----------------------- i.MX8mq based boards --------------------------
pblb-$(CONFIG_MACH_NXP_IMX8MQ_EVK) += start_nxp_imx8mq_evk
CFG_start_nxp_imx8mq_evk.pblb.imximg = $(board)/nxp-imx8mq-evk/flash-header-imx8mq-evk.imxcfg
+MAX_PBL_MEMORY_SIZE_start_nxp_imx8mq_evk = 0x3f000
FILE_barebox-nxp-imx8mq-evk.img = start_nxp_imx8mq_evk.pblb.imximg
image-$(CONFIG_MACH_NXP_IMX8MQ_EVK) += barebox-nxp-imx8mq-evk.img
pblb-$(CONFIG_MACH_ZII_IMX8MQ_DEV) += start_zii_imx8mq_dev
CFG_start_zii_imx8mq_dev.pblb.imximg = $(board)/zii-imx8mq-dev/flash-header-zii-imx8mq-dev.imxcfg
+MAX_PBL_MEMORY_SIZE_start_zii_imx8mq_dev.pbl = 0x3f000
FILE_barebox-zii-imx8mq-dev.img = start_zii_imx8mq_dev.pblb.imximg
image-$(CONFIG_MACH_ZII_IMX8MQ_DEV) += barebox-zii-imx8mq-dev.img
pblb-$(CONFIG_MACH_PHYTEC_SOM_IMX8MQ) += start_phytec_phycore_imx8mq
CFG_start_phytec_phycore_imx8mq.pblb.imximg = $(board)/phytec-som-imx8mq/flash-header-phycore-imx8mq.imxcfg
+MAX_PBL_MEMORY_SIZE_start_phytec_phycore_imx8mq.pbl = 0x3f000
FILE_barebox-phytec-phycore-imx8mq.img = start_phytec_phycore_imx8mq.pblb.imximg
image-$(CONFIG_MACH_PHYTEC_SOM_IMX8MQ) += barebox-phytec-phycore-imx8mq.img
diff --git a/images/Makefile.layerscape b/images/Makefile.layerscape
new file mode 100644
index 0000000000..47df3777f0
--- /dev/null
+++ b/images/Makefile.layerscape
@@ -0,0 +1,59 @@
+#
+# barebox image generation Makefile for NXP Layerscape images
+#
+
+lspbl_cfg_cpp_flags = -Wp,-MD,$(depfile) -nostdinc -x assembler-with-cpp \
+ -I $(srctree)/include -include include/generated/autoconf.h
+
+lspbl-rcw-tmp = $(subst $(comma),_,$(dot-target).lspbl_rcw.tmp)
+lspbl-pbi-tmp = $(subst $(comma),_,$(dot-target).lspbl_pbi.tmp)
+
+quiet_cmd_lspbl_image = LSPBL-IMG $@
+ cmd_lspbl_image = $(CPP) $(lspbl_cfg_cpp_flags) -o $(lspbl-rcw-tmp) $(word 2,$^) ; \
+ $(CPP) $(lspbl_cfg_cpp_flags) -o $(lspbl-pbi-tmp) $(word 3,$^) ; \
+ $(objtree)/scripts/pblimage -o $@ -r $(lspbl-rcw-tmp) \
+ -m $($(patsubst $(obj)/%.pblb,PBL_CODE_SIZE_%,$<)) -p $(lspbl-pbi-tmp) -i $<
+
+pbl-$(CONFIG_MACH_LS1046ARDB) += start_ls1046ardb.pbl
+$(obj)/barebox-ls1046ardb-2nd.image: $(obj)/start_ls1046ardb.pblb
+ $(call if_changed,shipped)
+
+$(obj)/barebox-ls1046ardb-sd.image: $(obj)/start_ls1046ardb.pblb \
+ $(board)/ls1046ardb/ls1046ardb_rcw_sd.cfg \
+ $(board)/ls1046ardb/ls1046ardb_pbi.cfg
+ $(call if_changed,lspbl_image)
+
+$(obj)/barebox-ls1046ardb-emmc.image: $(obj)/start_ls1046ardb.pblb \
+ $(board)/ls1046ardb/ls1046ardb_rcw_emmc.cfg \
+ $(board)/ls1046ardb/ls1046ardb_pbi.cfg
+ $(call if_changed,lspbl_image)
+
+$(obj)/barebox-ls1046ardb-qspi.image: $(obj)/start_ls1046ardb.pblb \
+ $(board)/ls1046ardb/ls1046ardb_rcw_qspi.cfg \
+ $(board)/ls1046ardb/ls1046ardb_pbi.cfg
+ $(call if_changed,lspbl_image)
+
+image-$(CONFIG_MACH_LS1046ARDB) += barebox-ls1046ardb-sd.image barebox-ls1046ardb-qspi.image \
+ barebox-ls1046ardb-emmc.image barebox-ls1046ardb-2nd.image
+
+pbl-$(CONFIG_MACH_TQMLS1046A) += start_tqmls1046a.pbl
+$(obj)/barebox-tqmls1046a-2nd.image: $(obj)/start_tqmls1046a.pblb
+ $(call if_changed,shipped)
+
+$(obj)/barebox-tqmls1046a-sd.image: $(obj)/start_tqmls1046a.pblb \
+ $(board)/tqmls1046a/tqmls1046a_rcw_sd_3333_5559.cfg \
+ $(board)/tqmls1046a/tqmls1046a_pbi_sd.cfg
+ $(call if_changed,lspbl_image)
+
+$(obj)/barebox-tqmls1046a-emmc.image: $(obj)/start_tqmls1046a.pblb \
+ $(board)/tqmls1046a/tqmls1046a_rcw_emmc_3333_5559.cfg \
+ $(board)/tqmls1046a/tqmls1046a_pbi_sd.cfg
+ $(call if_changed,lspbl_image)
+
+$(obj)/barebox-tqmls1046a-qspi.image: $(obj)/start_tqmls1046a.pblb \
+ $(board)/tqmls1046a/tqmls1046a_rcw_qspi_3333_5559.cfg \
+ $(board)/tqmls1046a/tqmls1046a_pbi_qspi.cfg
+ $(call if_changed,lspbl_image)
+
+image-$(CONFIG_MACH_TQMLS1046A) += barebox-tqmls1046a-sd.image barebox-tqmls1046a-emmc.image \
+ barebox-tqmls1046a-qspi.image barebox-tqmls1046a-2nd.image
diff --git a/include/soc/fsl/fsl_ddr_sdram.h b/include/soc/fsl/fsl_ddr_sdram.h
new file mode 100644
index 0000000000..07d0af96fc
--- /dev/null
+++ b/include/soc/fsl/fsl_ddr_sdram.h
@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2008-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP Semiconductor
+ */
+
+#ifndef FSL_DDR_MEMCTL_H
+#define FSL_DDR_MEMCTL_H
+
+#include <ddr_spd.h>
+#include <soc/fsl/fsl_immap.h>
+
+struct common_timing_params {
+ /* parameters to constrict */
+
+ unsigned int tckmin_x_ps;
+ unsigned int tckmax_ps;
+ unsigned int trcd_ps;
+ unsigned int trp_ps;
+ unsigned int tras_ps;
+ unsigned int taamin_ps; /* ddr3, ddr4 */
+
+ unsigned int trfc1_ps; /* ddr4 */
+ unsigned int trfc2_ps; /* ddr4 */
+ unsigned int trfc4_ps; /* ddr4 */
+ unsigned int trrds_ps; /* ddr4 */
+ unsigned int trrdl_ps; /* ddr4 */
+ unsigned int tccdl_ps; /* ddr4 */
+ unsigned int trfc_slr_ps; /* ddr4 */
+ unsigned int twtr_ps; /* !ddr4, maximum = 63750 ps */
+ unsigned int trfc_ps; /* !ddr4, maximum = 255 ns + 256 ns + .75 ns
+ = 511750 ps */
+
+ unsigned int trrd_ps; /* !ddr4, maximum = 63750 ps */
+ unsigned int trtp_ps; /* !ddr4, byte 38, spd->trtp */
+ unsigned int twr_ps; /* maximum = 63750 ps */
+ unsigned int trc_ps; /* maximum = 254 ns + .75 ns = 254750 ps */
+
+ unsigned int refresh_rate_ps;
+ unsigned int extended_op_srt;
+
+ unsigned int tis_ps; /* ddr1, ddr2, byte 32, spd->ca_setup */
+ unsigned int tih_ps; /* ddr1, ddr2, byte 33, spd->ca_hold */
+ unsigned int tds_ps; /* ddr1, ddr2, byte 34, spd->data_setup */
+ unsigned int tdh_ps; /* ddr1, ddr2, byte 35, spd->data_hold */
+ unsigned int tdqsq_max_ps; /* ddr1, ddr2, byte 44, spd->tdqsq */
+ unsigned int tqhs_ps; /* ddr1, ddr2, byte 45, spd->tqhs */
+
+ unsigned int ndimms_present;
+ unsigned int lowest_common_spd_caslat;
+ unsigned int highest_common_derated_caslat;
+ unsigned int additive_latency;
+ unsigned int all_dimms_burst_lengths_bitmask;
+ unsigned int all_dimms_registered;
+ unsigned int all_dimms_unbuffered;
+ unsigned int all_dimms_ecc_capable;
+
+ unsigned long long total_mem;
+ unsigned long long base_address;
+
+ /* DDR3 RDIMM */
+ unsigned char rcw[16]; /* Register Control Word 0-15 */
+};
+
+enum sdram_type {
+ SDRAM_TYPE_DDR1 = 2,
+ SDRAM_TYPE_DDR2 = 3,
+ SDRAM_TYPE_LPDDR1 = 6,
+ SDRAM_TYPE_DDR3 = 7,
+ SDRAM_TYPE_DDR4 = 5,
+};
+
+#define DDR_BL4 4 /* burst length 4 */
+#define DDR_BC4 DDR_BL4 /* burst chop for ddr3 */
+#define DDR_OTF 6 /* on-the-fly BC4 and BL8 */
+#define DDR_BL8 8 /* burst length 8 */
+
+#define DDR3_RTT_OFF 0
+#define DDR3_RTT_60_OHM 1 /* RTT_Nom = RZQ/4 */
+#define DDR3_RTT_120_OHM 2 /* RTT_Nom = RZQ/2 */
+#define DDR3_RTT_40_OHM 3 /* RTT_Nom = RZQ/6 */
+#define DDR3_RTT_20_OHM 4 /* RTT_Nom = RZQ/12 */
+#define DDR3_RTT_30_OHM 5 /* RTT_Nom = RZQ/8 */
+
+#define DDR4_RTT_OFF 0
+#define DDR4_RTT_60_OHM 1 /* RZQ/4 */
+#define DDR4_RTT_120_OHM 2 /* RZQ/2 */
+#define DDR4_RTT_40_OHM 3 /* RZQ/6 */
+#define DDR4_RTT_240_OHM 4 /* RZQ/1 */
+#define DDR4_RTT_48_OHM 5 /* RZQ/5 */
+#define DDR4_RTT_80_OHM 6 /* RZQ/3 */
+#define DDR4_RTT_34_OHM 7 /* RZQ/7 */
+
+#define DDR2_RTT_OFF 0
+#define DDR2_RTT_75_OHM 1
+#define DDR2_RTT_150_OHM 2
+#define DDR2_RTT_50_OHM 3
+
+#define FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR1 1
+#define FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR2 3
+
+#define FSL_DDR_ODT_NEVER 0x0
+#define FSL_DDR_ODT_CS 0x1
+#define FSL_DDR_ODT_ALL_OTHER_CS 0x2
+#define FSL_DDR_ODT_OTHER_DIMM 0x3
+#define FSL_DDR_ODT_ALL 0x4
+#define FSL_DDR_ODT_SAME_DIMM 0x5
+#define FSL_DDR_ODT_CS_AND_OTHER_DIMM 0x6
+#define FSL_DDR_ODT_OTHER_CS_ONSAMEDIMM 0x7
+
+/* define bank(chip select) interleaving mode */
+#define FSL_DDR_CS0_CS1 0x40
+#define FSL_DDR_CS2_CS3 0x20
+#define FSL_DDR_CS0_CS1_AND_CS2_CS3 (FSL_DDR_CS0_CS1 | FSL_DDR_CS2_CS3)
+#define FSL_DDR_CS0_CS1_CS2_CS3 (FSL_DDR_CS0_CS1_AND_CS2_CS3 | 0x04)
+
+/* define memory controller interleaving mode */
+#define FSL_DDR_CACHE_LINE_INTERLEAVING 0x0
+#define FSL_DDR_PAGE_INTERLEAVING 0x1
+#define FSL_DDR_BANK_INTERLEAVING 0x2
+#define FSL_DDR_SUPERBANK_INTERLEAVING 0x3
+#define FSL_DDR_256B_INTERLEAVING 0x8
+#define FSL_DDR_3WAY_1KB_INTERLEAVING 0xA
+#define FSL_DDR_3WAY_4KB_INTERLEAVING 0xC
+#define FSL_DDR_3WAY_8KB_INTERLEAVING 0xD
+/* placeholder for 4-way interleaving */
+#define FSL_DDR_4WAY_1KB_INTERLEAVING 0x1A
+#define FSL_DDR_4WAY_4KB_INTERLEAVING 0x1C
+#define FSL_DDR_4WAY_8KB_INTERLEAVING 0x1D
+
+#define SDRAM_CS_CONFIG_EN 0x80000000
+
+/* DDR_SDRAM_CFG - DDR SDRAM Control Configuration
+ */
+#define SDRAM_CFG_MEM_EN 0x80000000
+#define SDRAM_CFG_SREN 0x40000000
+#define SDRAM_CFG_ECC_EN 0x20000000
+#define SDRAM_CFG_RD_EN 0x10000000
+#define SDRAM_CFG_SDRAM_TYPE_DDR1 0x02000000
+#define SDRAM_CFG_SDRAM_TYPE_DDR2 0x03000000
+#define SDRAM_CFG_SDRAM_TYPE_MASK 0x07000000
+#define SDRAM_CFG_SDRAM_TYPE_SHIFT 24
+#define SDRAM_CFG_DYN_PWR 0x00200000
+#define SDRAM_CFG_DBW_MASK 0x00180000
+#define SDRAM_CFG_DBW_SHIFT 19
+#define SDRAM_CFG_32_BE 0x00080000
+#define SDRAM_CFG_16_BE 0x00100000
+#define SDRAM_CFG_8_BE 0x00040000
+#define SDRAM_CFG_NCAP 0x00020000
+#define SDRAM_CFG_2T_EN 0x00008000
+#define SDRAM_CFG_BI 0x00000001
+
+#define SDRAM_CFG2_FRC_SR 0x80000000
+#define SDRAM_CFG2_D_INIT 0x00000010
+#define SDRAM_CFG2_AP_EN 0x00000020
+#define SDRAM_CFG2_ODT_CFG_MASK 0x00600000
+#define SDRAM_CFG2_ODT_NEVER 0
+#define SDRAM_CFG2_ODT_ONLY_WRITE 1
+#define SDRAM_CFG2_ODT_ONLY_READ 2
+#define SDRAM_CFG2_ODT_ALWAYS 3
+
+#define SDRAM_INTERVAL_BSTOPRE 0x3FFF
+#define TIMING_CFG_2_CPO_MASK 0x0F800000
+
+#define RD_TO_PRE_MASK 0xf
+#define RD_TO_PRE_SHIFT 13
+#define WR_DATA_DELAY_MASK 0xf
+#define WR_DATA_DELAY_SHIFT 9
+
+/* DDR_EOR register */
+#define DDR_EOR_RD_REOD_DIS 0x07000000
+#define DDR_EOR_WD_REOD_DIS 0x00100000
+
+/* DDR_MD_CNTL */
+#define MD_CNTL_MD_EN 0x80000000
+#define MD_CNTL_CS_SEL_CS0 0x00000000
+#define MD_CNTL_CS_SEL_CS1 0x10000000
+#define MD_CNTL_CS_SEL_CS2 0x20000000
+#define MD_CNTL_CS_SEL_CS3 0x30000000
+#define MD_CNTL_CS_SEL_CS0_CS1 0x40000000
+#define MD_CNTL_CS_SEL_CS2_CS3 0x50000000
+#define MD_CNTL_MD_SEL_MR 0x00000000
+#define MD_CNTL_MD_SEL_EMR 0x01000000
+#define MD_CNTL_MD_SEL_EMR2 0x02000000
+#define MD_CNTL_MD_SEL_EMR3 0x03000000
+#define MD_CNTL_SET_REF 0x00800000
+#define MD_CNTL_SET_PRE 0x00400000
+#define MD_CNTL_CKE_CNTL_LOW 0x00100000
+#define MD_CNTL_CKE_CNTL_HIGH 0x00200000
+#define MD_CNTL_WRCW 0x00080000
+#define MD_CNTL_MD_VALUE(x) (x & 0x0000FFFF)
+#define MD_CNTL_CS_SEL(x) (((x) & 0x7) << 28)
+#define MD_CNTL_MD_SEL(x) (((x) & 0xf) << 24)
+
+/* DDR_CDR1 */
+#define DDR_CDR1_DHC_EN 0x80000000
+#define DDR_CDR1_V0PT9_EN 0x40000000
+#define DDR_CDR1_ODT_SHIFT 17
+#define DDR_CDR1_ODT_MASK 0x6
+#define DDR_CDR2_ODT_MASK 0x1
+#define DDR_CDR1_ODT(x) ((x & DDR_CDR1_ODT_MASK) << DDR_CDR1_ODT_SHIFT)
+#define DDR_CDR2_ODT(x) (x & DDR_CDR2_ODT_MASK)
+#define DDR_CDR2_VREF_OVRD(x) (0x00008080 | ((((x) - 37) & 0x3F) << 8))
+#define DDR_CDR2_VREF_TRAIN_EN 0x00000080
+#define DDR_CDR2_VREF_RANGE_2 0x00000040
+
+/* DDR ERR_DISABLE */
+#define DDR_ERR_DISABLE_APED (1 << 8) /* Address parity error disable */
+
+/* Mode Registers */
+#define DDR_MR5_CA_PARITY_LAT_4_CLK 0x1 /* for DDR4-1600/1866/2133 */
+#define DDR_MR5_CA_PARITY_LAT_5_CLK 0x2 /* for DDR4-2400 */
+
+/* DEBUG_26 register */
+#define DDR_CAS_TO_PRE_SUB_MASK 0x0000f000 /* CAS to preamble subtract value */
+#define DDR_CAS_TO_PRE_SUB_SHIFT 12
+
+/* DEBUG_29 register */
+#define DDR_TX_BD_DIS (1 << 10) /* Transmit Bit Deskew Disable */
+
+
+#define DDR4_CDR_ODT_OFF 0x0
+#define DDR4_CDR_ODT_100ohm 0x1
+#define DDR4_CDR_ODT_120ohm 0x2
+#define DDR4_CDR_ODT_80ohm 0x3
+#define DDR4_CDR_ODT_60ohm 0x4
+#define DDR4_CDR_ODT_40ohm 0x5
+#define DDR4_CDR_ODT_50ohm 0x6
+#define DDR4_CDR_ODT_30ohm 0x7
+
+#define DDR123_CDR_ODT_OFF 0x0
+#define DDR123_CDR_ODT_120ohm 0x1
+#define DDR123_CDR_ODT_180ohm 0x2
+#define DDR123_CDR_ODT_75ohm 0x3
+#define DDR123_CDR_ODT_110ohm 0x4
+#define DDR123_CDR_ODT_60hm 0x5
+#define DDR123_CDR_ODT_70ohm 0x6
+#define DDR123_CDR_ODT_47ohm 0x7
+
+#define DDR_INIT_ADDR_EXT_UIA (1 << 31)
+
+#define MAX_CHIP_SELECTS_PER_CTRL 4
+#define MAX_DIMM_SLOTS_PER_CTRL 2
+
+/* Record of register values computed */
+typedef struct fsl_ddr_cfg_regs_s {
+ struct {
+ unsigned int bnds;
+ unsigned int config;
+ unsigned int config_2;
+ } cs[MAX_CHIP_SELECTS_PER_CTRL];
+ unsigned int timing_cfg_3;
+ unsigned int timing_cfg_0;
+ unsigned int timing_cfg_1;
+ unsigned int timing_cfg_2;
+ unsigned int ddr_sdram_cfg;
+ unsigned int ddr_sdram_cfg_2;
+ unsigned int ddr_sdram_cfg_3;
+ unsigned int ddr_sdram_mode;
+ unsigned int ddr_sdram_mode_2;
+ unsigned int ddr_sdram_mode_3;
+ unsigned int ddr_sdram_mode_4;
+ unsigned int ddr_sdram_mode_5;
+ unsigned int ddr_sdram_mode_6;
+ unsigned int ddr_sdram_mode_7;
+ unsigned int ddr_sdram_mode_8;
+ unsigned int ddr_sdram_mode_9;
+ unsigned int ddr_sdram_mode_10;
+ unsigned int ddr_sdram_mode_11;
+ unsigned int ddr_sdram_mode_12;
+ unsigned int ddr_sdram_mode_13;
+ unsigned int ddr_sdram_mode_14;
+ unsigned int ddr_sdram_mode_15;
+ unsigned int ddr_sdram_mode_16;
+ unsigned int ddr_sdram_md_cntl;
+ unsigned int ddr_sdram_interval;
+ unsigned int ddr_data_init;
+ unsigned int ddr_sdram_clk_cntl;
+ unsigned int ddr_init_addr;
+ unsigned int ddr_init_ext_addr;
+ unsigned int timing_cfg_4;
+ unsigned int timing_cfg_5;
+ unsigned int timing_cfg_6;
+ unsigned int timing_cfg_7;
+ unsigned int timing_cfg_8;
+ unsigned int timing_cfg_9;
+ unsigned int ddr_zq_cntl;
+ unsigned int ddr_wrlvl_cntl;
+ unsigned int ddr_wrlvl_cntl_2;
+ unsigned int ddr_wrlvl_cntl_3;
+ unsigned int ddr_sr_cntr;
+ unsigned int ddr_sdram_rcw_1;
+ unsigned int ddr_sdram_rcw_2;
+ unsigned int ddr_sdram_rcw_3;
+ unsigned int ddr_sdram_rcw_4;
+ unsigned int ddr_sdram_rcw_5;
+ unsigned int ddr_sdram_rcw_6;
+ unsigned int dq_map_0;
+ unsigned int dq_map_1;
+ unsigned int dq_map_2;
+ unsigned int dq_map_3;
+ unsigned int ddr_eor;
+ unsigned int ddr_cdr1;
+ unsigned int ddr_cdr2;
+ unsigned int err_disable;
+ unsigned int err_int_en;
+ unsigned int debug[64];
+} fsl_ddr_cfg_regs_t;
+
+#define DDR_DATA_BUS_WIDTH_64 0
+#define DDR_DATA_BUS_WIDTH_32 1
+#define DDR_DATA_BUS_WIDTH_16 2
+#define DDR_CSWL_CS0 0x04000001
+/*
+ * Generalized parameters for memory controller configuration,
+ * might be a little specific to the FSL memory controller
+ */
+typedef struct memctl_options_s {
+ enum sdram_type ddrtype;
+
+ /*
+ * Memory organization parameters
+ *
+ * if DIMM is present in the system
+ * where DIMMs are with respect to chip select
+ * where chip selects are with respect to memory boundaries
+ */
+ unsigned int registered_dimm_en; /* use registered DIMM support */
+
+ /* Options local to a Chip Select */
+ struct cs_local_opts_s {
+ unsigned int auto_precharge;
+ unsigned int odt_rd_cfg;
+ unsigned int odt_wr_cfg;
+ unsigned int odt_rtt_norm;
+ unsigned int odt_rtt_wr;
+ } cs_local_opts[MAX_CHIP_SELECTS_PER_CTRL];
+
+ /* Special configurations for chip select */
+ unsigned int memctl_interleaving;
+ unsigned int memctl_interleaving_mode;
+ unsigned int ba_intlv_ctl;
+ unsigned int addr_hash;
+
+ /* Operational mode parameters */
+ unsigned int ecc_mode; /* Use ECC? */
+ /* Initialize ECC using memory controller? */
+ unsigned int ecc_init_using_memctl;
+ unsigned int dqs_config; /* Use DQS? maybe only with DDR2? */
+ /* SREN - self-refresh during sleep */
+ unsigned int self_refresh_in_sleep;
+ /* SR_IE - Self-refresh interrupt enable */
+ unsigned int self_refresh_interrupt_en;
+ unsigned int dynamic_power; /* DYN_PWR */
+ /* memory data width to use (16-bit, 32-bit, 64-bit) */
+ unsigned int data_bus_width;
+ unsigned int burst_length; /* BL4, OTF and BL8 */
+ /* On-The-Fly Burst Chop enable */
+ unsigned int otf_burst_chop_en;
+ /* mirrior DIMMs for DDR3 */
+ unsigned int mirrored_dimm;
+ unsigned int quad_rank_present;
+ unsigned int ap_en; /* address parity enable for RDIMM/DDR4-UDIMM */
+ unsigned int x4_en; /* enable x4 devices */
+ unsigned int package_3ds;
+
+ /* Global Timing Parameters */
+ unsigned int cas_latency_override;
+ unsigned int cas_latency_override_value;
+ unsigned int use_derated_caslat;
+ unsigned int additive_latency_override;
+ unsigned int additive_latency_override_value;
+
+ unsigned int clk_adjust; /* */
+ unsigned int cpo_override; /* override timing_cfg_2[CPO]*/
+ unsigned int cpo_sample; /* optimize debug_29[24:31] */
+ unsigned int write_data_delay; /* DQS adjust */
+
+ unsigned int cswl_override;
+ unsigned int wrlvl_override;
+ unsigned int wrlvl_sample; /* Write leveling */
+ unsigned int wrlvl_start;
+ unsigned int wrlvl_ctl_2;
+ unsigned int wrlvl_ctl_3;
+
+ unsigned int half_strength_driver_enable;
+ unsigned int twot_en;
+ unsigned int threet_en;
+ unsigned int bstopre;
+ unsigned int tfaw_window_four_activates_ps; /* tFAW -- FOUR_ACT */
+
+ /* Rtt impedance */
+ unsigned int rtt_override; /* rtt_override enable */
+ unsigned int rtt_override_value; /* that is Rtt_Nom for DDR3 */
+ unsigned int rtt_wr_override_value; /* this is Rtt_WR for DDR3 */
+
+ /* Automatic self refresh */
+ unsigned int auto_self_refresh_en;
+ unsigned int sr_it;
+ /* ZQ calibration */
+ unsigned int zq_en;
+ /* Write leveling */
+ unsigned int wrlvl_en;
+ /* RCW override for RDIMM */
+ unsigned int rcw_override;
+ unsigned int rcw_1;
+ unsigned int rcw_2;
+ unsigned int rcw_3;
+ /* control register 1 */
+ unsigned int ddr_cdr1;
+ unsigned int ddr_cdr2;
+
+ unsigned int trwt_override;
+ unsigned int trwt; /* read-to-write turnaround */
+} memctl_options_t;
+
+#define EDC_DATA_PARITY 1
+#define EDC_ECC 2
+#define EDC_AC_PARITY 4
+
+/* Parameters for a DDR dimm computed from the SPD */
+struct dimm_params {
+
+ /* DIMM organization parameters */
+ char mpart[19]; /* guaranteed null terminated */
+
+ unsigned int n_ranks;
+ unsigned int die_density;
+ unsigned long long rank_density;
+ unsigned long long capacity;
+ unsigned int data_width;
+ unsigned int primary_sdram_width;
+ unsigned int ec_sdram_width;
+ unsigned int registered_dimm;
+ unsigned int package_3ds; /* number of dies in 3DS DIMM */
+ unsigned int device_width; /* x4, x8, x16 components */
+
+ /* SDRAM device parameters */
+ unsigned int n_row_addr;
+ unsigned int n_col_addr;
+ unsigned int edc_config; /* 0 = none, 1 = parity, 2 = ECC */
+ unsigned int bank_addr_bits; /* DDR4 */
+ unsigned int bank_group_bits; /* DDR4 */
+ unsigned int n_banks_per_sdram_device; /* !DDR4 */
+ unsigned int burst_lengths_bitmask; /* BL=4 bit 2, BL=8 = bit 3 */
+
+ /* used in computing base address of DIMMs */
+ unsigned long long base_address;
+ /* mirrored DIMMs */
+ unsigned int mirrored_dimm; /* only for ddr3 */
+
+ /* DIMM timing parameters */
+
+ int mtb_ps; /* medium timebase ps */
+ int ftb_10th_ps; /* fine timebase, in 1/10 ps */
+ int taa_ps; /* minimum CAS latency time */
+ int tfaw_ps; /* four active window delay */
+
+ /*
+ * SDRAM clock periods
+ * The range for these are 1000-10000 so a short should be sufficient
+ */
+ int tckmin_x_ps;
+ int tckmin_x_minus_1_ps;
+ int tckmin_x_minus_2_ps;
+ int tckmax_ps;
+
+ /* SPD-defined CAS latencies */
+ unsigned int caslat_x;
+ unsigned int caslat_x_minus_1;
+ unsigned int caslat_x_minus_2;
+
+ unsigned int caslat_lowest_derated; /* Derated CAS latency */
+
+ /* basic timing parameters */
+ int trcd_ps;
+ int trp_ps;
+ int tras_ps;
+
+ int trfc1_ps; /* DDR4 */
+ int trfc2_ps; /* DDR4 */
+ int trfc4_ps; /* DDR4 */
+ int trrds_ps; /* DDR4 */
+ int trrdl_ps; /* DDR4 */
+ int tccdl_ps; /* DDR4 */
+ int trfc_slr_ps; /* DDR4 */
+ int twr_ps; /* !DDR4, maximum = 63750 ps */
+ int trfc_ps; /* max = 255 ns + 256 ns + .75 ns
+ = 511750 ps */
+ int trrd_ps; /* !DDR4, maximum = 63750 ps */
+ int twtr_ps; /* !DDR4, maximum = 63750 ps */
+ int trtp_ps; /* !DDR4, byte 38, spd->trtp */
+
+ int trc_ps; /* maximum = 254 ns + .75 ns = 254750 ps */
+
+ int refresh_rate_ps;
+ int extended_op_srt;
+
+ int tis_ps; /* DDR1, DDR2, byte 32, spd->ca_setup */
+ int tih_ps; /* DDR1, DDR2, byte 33, spd->ca_hold */
+ int tds_ps; /* DDR1, DDR2, byte 34, spd->data_setup */
+ int tdh_ps; /* DDR1, DDR2, byte 35, spd->data_hold */
+ int tdqsq_max_ps; /* DDR1, DDR2, byte 44, spd->tdqsq */
+ int tqhs_ps; /* DDR1, DDR2, byte 45, spd->tqhs */
+
+ /* DDR3 & DDR4 RDIMM */
+ unsigned char rcw[16]; /* Register Control Word 0-15 */
+ unsigned int dq_mapping[18]; /* DDR4 */
+ unsigned int dq_mapping_ors; /* DDR4 */
+};
+
+struct fsl_ddr_controller {
+ int num;
+ unsigned long ddr_freq;
+ struct ccsr_ddr __iomem *base;
+ struct spd_eeprom *spd_installed_dimms;
+ struct dimm_params *dimm_params;
+ memctl_options_t memctl_opts;
+ struct common_timing_params common_timing_params;
+ fsl_ddr_cfg_regs_t fsl_ddr_config_reg;
+ unsigned int dbw_capacity_adjust;
+ int chip_selects_per_ctrl;
+ int dimm_slots_per_ctrl;
+ bool erratum_A009663;
+ bool erratum_A008511;
+ bool erratum_A009803;
+ bool erratum_A010165;
+ bool erratum_A009801;
+ bool erratum_A004508;
+ bool erratum_A008378;
+ bool erratum_A009942;
+ void (*board_options)(memctl_options_t *popts, struct dimm_params *pdimm,
+ struct fsl_ddr_controller *c);
+};
+
+struct fsl_ddr_info {
+ struct fsl_ddr_controller *c;
+ unsigned int num_ctrls;
+ unsigned long long mem_base;
+};
+
+phys_size_t fsl_ddr_sdram(struct fsl_ddr_info *pinfo);
+
+#ifdef CONFIG_SYS_FSL_DDR_LE
+#define ddr_in32(a) in_le32(a)
+#define ddr_out32(a, v) out_le32(a, v)
+#define ddr_setbits32(a, v) setbits_le32(a, v)
+#define ddr_clrbits32(a, v) clrbits_le32(a, v)
+#define ddr_clrsetbits32(a, clear, set) clrsetbits_le32(a, clear, set)
+#else
+#define ddr_in32(a) in_be32(a)
+#define ddr_out32(a, v) out_be32(a, v)
+#define ddr_setbits32(a, v) setbits_be32(a, v)
+#define ddr_clrbits32(a, v) clrbits_be32(a, v)
+#define ddr_clrsetbits32(a, clear, set) clrsetbits_be32(a, clear, set)
+#endif
+
+#endif
diff --git a/include/soc/fsl/fsl_fman.h b/include/soc/fsl/fsl_fman.h
new file mode 100644
index 0000000000..96d61298ef
--- /dev/null
+++ b/include/soc/fsl/fsl_fman.h
@@ -0,0 +1,439 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * MPC85xx Internal Memory Map
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ */
+
+#ifndef __FSL_FMAN_H__
+#define __FSL_FMAN_H__
+
+#include <linux/types.h>
+
+struct fm_bmi_common {
+ u32 fmbm_init; /* BMI initialization */
+ u32 fmbm_cfg1; /* BMI configuration1 */
+ u32 fmbm_cfg2; /* BMI configuration2 */
+ u32 res0[0x5];
+ u32 fmbm_ievr; /* interrupt event register */
+ u32 fmbm_ier; /* interrupt enable register */
+ u32 fmbm_ifr; /* interrupt force register */
+ u32 res1[0x5];
+ u32 fmbm_arb[0x8]; /* BMI arbitration */
+ u32 res2[0x28];
+ u32 fmbm_gde; /* global debug enable */
+ u32 fmbm_pp[0x3f]; /* BMI port parameters */
+ u32 res3;
+ u32 fmbm_pfs[0x3f]; /* BMI port FIFO size */
+ u32 res4;
+ u32 fmbm_ppid[0x3f];/* port partition ID */
+};
+
+struct fm_qmi_common {
+ u32 fmqm_gc; /* general configuration register */
+ u32 res0;
+ u32 fmqm_eie; /* error interrupt event register */
+ u32 fmqm_eien; /* error interrupt enable register */
+ u32 fmqm_eif; /* error interrupt force register */
+ u32 fmqm_ie; /* interrupt event register */
+ u32 fmqm_ien; /* interrupt enable register */
+ u32 fmqm_if; /* interrupt force register */
+ u32 fmqm_gs; /* global status register */
+ u32 fmqm_ts; /* task status register */
+ u32 fmqm_etfc; /* enqueue total frame counter */
+ u32 fmqm_dtfc; /* dequeue total frame counter */
+ u32 fmqm_dc0; /* dequeue counter 0 */
+ u32 fmqm_dc1; /* dequeue counter 1 */
+ u32 fmqm_dc2; /* dequeue counter 2 */
+ u32 fmqm_dc3; /* dequeue counter 3 */
+ u32 fmqm_dfnoc; /* dequeue FQID not override counter */
+ u32 fmqm_dfcc; /* dequeue FQID from context counter */
+ u32 fmqm_dffc; /* dequeue FQID from FD counter */
+ u32 fmqm_dcc; /* dequeue confirm counter */
+ u32 res1[0xc];
+ u32 fmqm_dtrc; /* debug trap configuration register */
+ u32 fmqm_efddd; /* enqueue frame descriptor dynamic debug */
+ u32 res3[0x2];
+ u32 res4[0xdc]; /* missing debug regs */
+};
+
+struct fm_bmi {
+ u8 res[1024];
+};
+
+struct fm_qmi {
+ u8 res[1024];
+};
+
+struct fm_bmi_rx_port {
+ u32 fmbm_rcfg; /* Rx configuration */
+ u32 fmbm_rst; /* Rx status */
+ u32 fmbm_rda; /* Rx DMA attributes */
+ u32 fmbm_rfp; /* Rx FIFO parameters */
+ u32 fmbm_rfed; /* Rx frame end data */
+ u32 fmbm_ricp; /* Rx internal context parameters */
+ u32 fmbm_rim; /* Rx internal margins */
+ u32 fmbm_rebm; /* Rx external buffer margins */
+ u32 fmbm_rfne; /* Rx frame next engine */
+ u32 fmbm_rfca; /* Rx frame command attributes */
+ u32 fmbm_rfpne; /* Rx frame parser next engine */
+ u32 fmbm_rpso; /* Rx parse start offset */
+ u32 fmbm_rpp; /* Rx policer profile */
+ u32 fmbm_rccb; /* Rx coarse classification base */
+ u32 res1[0x2];
+ u32 fmbm_rprai[0x8]; /* Rx parse results array Initialization */
+ u32 fmbm_rfqid; /* Rx frame queue ID */
+ u32 fmbm_refqid; /* Rx error frame queue ID */
+ u32 fmbm_rfsdm; /* Rx frame status discard mask */
+ u32 fmbm_rfsem; /* Rx frame status error mask */
+ u32 fmbm_rfene; /* Rx frame enqueue next engine */
+ u32 res2[0x23];
+ u32 fmbm_ebmpi[0x8]; /* buffer manager pool information */
+ u32 fmbm_acnt[0x8]; /* allocate counter */
+ u32 res3[0x8];
+ u32 fmbm_cgm[0x8]; /* congestion group map */
+ u32 fmbm_mpd; /* BMan pool depletion */
+ u32 res4[0x1F];
+ u32 fmbm_rstc; /* Rx statistics counters */
+ u32 fmbm_rfrc; /* Rx frame counters */
+ u32 fmbm_rfbc; /* Rx bad frames counter */
+ u32 fmbm_rlfc; /* Rx large frames counter */
+ u32 fmbm_rffc; /* Rx filter frames counter */
+ u32 fmbm_rfdc; /* Rx frame discard counter */
+ u32 fmbm_rfldec; /* Rx frames list DMA error counter */
+ u32 fmbm_rodc; /* Rx out of buffers discard counter */
+ u32 fmbm_rbdc; /* Rx buffers deallocate counter */
+ u32 res5[0x17];
+ u32 fmbm_rpc; /* Rx performance counters */
+ u32 fmbm_rpcp; /* Rx performance count parameters */
+ u32 fmbm_rccn; /* Rx cycle counter */
+ u32 fmbm_rtuc; /* Rx tasks utilization counter */
+ u32 fmbm_rrquc; /* Rx receive queue utilization counter */
+ u32 fmbm_rduc; /* Rx DMA utilization counter */
+ u32 fmbm_rfuc; /* Rx FIFO utilization counter */
+ u32 fmbm_rpac; /* Rx pause activation counter */
+ u32 res6[0x18];
+ u32 fmbm_rdbg; /* Rx debug configuration */
+};
+
+/* FMBM_RCFG - Rx configuration */
+#define FMBM_RCFG_EN 0x80000000 /* port is enabled to receive data */
+#define FMBM_RCFG_FDOVR 0x02000000 /* frame discard override */
+#define FMBM_RCFG_IM 0x01000000 /* independent mode */
+
+/* FMBM_RST - Rx status */
+#define FMBM_RST_BSY 0x80000000 /* Rx port is busy */
+
+/* FMBM_RFCA - Rx frame command attributes */
+#define FMBM_RFCA_ORDER 0x80000000
+#define FMBM_RFCA_MR_MASK 0x003f0000
+#define FMBM_RFCA_MR(x) ((x << 16) & FMBM_RFCA_MR_MASK)
+
+/* FMBM_RSTC - Rx statistics */
+#define FMBM_RSTC_EN 0x80000000 /* statistics counters enable */
+
+struct fm_bmi_tx_port {
+ u32 fmbm_tcfg; /* Tx configuration */
+ u32 fmbm_tst; /* Tx status */
+ u32 fmbm_tda; /* Tx DMA attributes */
+ u32 fmbm_tfp; /* Tx FIFO parameters */
+ u32 fmbm_tfed; /* Tx frame end data */
+ u32 fmbm_ticp; /* Tx internal context parameters */
+ u32 fmbm_tfne; /* Tx frame next engine */
+ u32 fmbm_tfca; /* Tx frame command attributes */
+ u32 fmbm_tcfqid;/* Tx confirmation frame queue ID */
+ u32 fmbm_tfeqid;/* Tx error frame queue ID */
+ u32 fmbm_tfene; /* Tx frame enqueue next engine */
+ u32 fmbm_trlmts;/* Tx rate limiter scale */
+ u32 fmbm_trlmt; /* Tx rate limiter */
+ u32 res0[0x73];
+ u32 fmbm_tstc; /* Tx statistics counters */
+ u32 fmbm_tfrc; /* Tx frame counter */
+ u32 fmbm_tfdc; /* Tx frames discard counter */
+ u32 fmbm_tfledc;/* Tx frame length error discard counter */
+ u32 fmbm_tfufdc;/* Tx frame unsupported format discard counter */
+ u32 fmbm_tbdc; /* Tx buffers deallocate counter */
+ u32 res1[0x1a];
+ u32 fmbm_tpc; /* Tx performance counters */
+ u32 fmbm_tpcp; /* Tx performance count parameters */
+ u32 fmbm_tccn; /* Tx cycle counter */
+ u32 fmbm_ttuc; /* Tx tasks utilization counter */
+ u32 fmbm_ttcquc;/* Tx transmit confirm queue utilization counter */
+ u32 fmbm_tduc; /* Tx DMA utilization counter */
+ u32 fmbm_tfuc; /* Tx FIFO utilization counter */
+ u32 res2[0x19];
+ u32 fmbm_tdcfg; /* Tx debug configuration */
+};
+
+/* FMBM_TCFG - Tx configuration */
+#define FMBM_TCFG_EN 0x80000000 /* port is enabled to transmit data */
+#define FMBM_TCFG_IM 0x01000000 /* independent mode enable */
+
+/* FMBM_TST - Tx status */
+#define FMBM_TST_BSY 0x80000000 /* Tx port is busy */
+
+/* FMBM_TFCA - Tx frame command attributes */
+#define FMBM_TFCA_ORDER 0x80000000
+#define FMBM_TFCA_MR_MASK 0x003f0000
+#define FMBM_TFCA_MR(x) ((x << 16) & FMBM_TFCA_MR_MASK)
+
+/* FMBM_TSTC - Tx statistics counters */
+#define FMBM_TSTC_EN 0x80000000
+
+/* FMBM_INIT - BMI initialization register */
+#define FMBM_INIT_START 0x80000000 /* init internal buffers */
+
+/* FMBM_CFG1 - BMI configuration 1 */
+#define FMBM_CFG1_FBPS_MASK 0x03ff0000 /* Free buffer pool size */
+#define FMBM_CFG1_FBPS_SHIFT 16
+#define FMBM_CFG1_FBPO_MASK 0x000003ff /* Free buffer pool offset */
+
+/* FMBM_IEVR - interrupt event */
+#define FMBM_IEVR_PEC 0x80000000 /* pipeline table ECC err detected */
+#define FMBM_IEVR_LEC 0x40000000 /* linked list RAM ECC error */
+#define FMBM_IEVR_SEC 0x20000000 /* statistics count RAM ECC error */
+#define FMBM_IEVR_CLEAR_ALL (FMBM_IEVR_PEC | FMBM_IEVR_LEC | FMBM_IEVR_SEC)
+
+/* FMBM_IER - interrupt enable */
+#define FMBM_IER_PECE 0x80000000 /* PEC interrupt enable */
+#define FMBM_IER_LECE 0x40000000 /* LEC interrupt enable */
+#define FMBM_IER_SECE 0x20000000 /* SEC interrupt enable */
+
+#define FMBM_IER_DISABLE_ALL 0x00000000
+
+/* FMBM_PP - BMI Port Parameters */
+#define FMBM_PP_MXT_MASK 0x3f000000 /* Max # tasks */
+#define FMBM_PP_MXT(x) (((x-1) << 24) & FMBM_PP_MXT_MASK)
+#define FMBM_PP_MXD_MASK 0x00000f00 /* Max DMA */
+#define FMBM_PP_MXD(x) (((x-1) << 8) & FMBM_PP_MXD_MASK)
+
+/* FMBM_PFS - BMI Port FIFO Size */
+#define FMBM_PFS_IFSZ_MASK 0x000003ff /* Internal Fifo Size */
+#define FMBM_PFS_IFSZ(x) (x & FMBM_PFS_IFSZ_MASK)
+
+/* FMQM_GC - global configuration */
+#define FMQM_GC_ENQ_EN 0x80000000 /* enqueue enable */
+#define FMQM_GC_DEQ_EN 0x40000000 /* dequeue enable */
+#define FMQM_GC_STEN 0x10000000 /* enable global stat counters */
+#define FMQM_GC_ENQ_THR_MASK 0x00003f00 /* max number of enqueue Tnum */
+#define FMQM_GC_ENQ(x) ((x << 8) & FMQM_GC_ENQ_THR_MAS)
+#define FMQM_GC_DEQ_THR_MASK 0x0000003f /* max number of dequeue Tnum */
+#define FMQM_GC_DEQ(x) (x & FMQM_GC_DEQ_THR_MASK)
+
+/* FMQM_EIE - error interrupt event register */
+#define FMQM_EIE_DEE 0x80000000 /* double-bit ECC error */
+#define FMQM_EIE_DFUPE 0x40000000 /* dequeue from unknown PortID */
+#define FMQM_EIE_CLEAR_ALL (FMQM_EIE_DEE | FMQM_EIE_DFUPE)
+
+/* FMQM_EIEN - error interrupt enable register */
+#define FMQM_EIEN_DEEN 0x80000000 /* double-bit ECC error */
+#define FMQM_EIEN_DFUPEN 0x40000000 /* dequeue from unknown PortID */
+#define FMQM_EIEN_DISABLE_ALL 0x00000000
+
+/* FMQM_IE - interrupt event register */
+#define FMQM_IE_SEE 0x80000000 /* single-bit ECC error detected */
+#define FMQM_IE_CLEAR_ALL FMQM_IE_SEE
+
+/* FMQM_IEN - interrupt enable register */
+#define FMQM_IEN_SEE 0x80000000 /* single-bit ECC err IRQ enable */
+#define FMQM_IEN_DISABLE_ALL 0x00000000
+
+/* NIA - next invoked action */
+#define NIA_ENG_RISC 0x00000000
+#define NIA_ENG_MASK 0x007c0000
+
+/* action code */
+#define NIA_RISC_AC_CC 0x00000006
+#define NIA_RISC_AC_IM_TX 0x00000008 /* independent mode Tx */
+#define NIA_RISC_AC_IM_RX 0x0000000a /* independent mode Rx */
+#define NIA_RISC_AC_HC 0x0000000c
+
+struct fm_parser {
+ u8 res[1024];
+};
+
+struct fm_policer {
+ u8 res[4*1024];
+};
+
+struct fm_keygen {
+ u8 res[4*1024];
+};
+
+struct fm_dma {
+ u32 fmdmsr; /* status register */
+ u32 fmdmmr; /* mode register */
+ u32 fmdmtr; /* bus threshold register */
+ u32 fmdmhy; /* bus hysteresis register */
+ u32 fmdmsetr; /* SOS emergency threshold register */
+ u32 fmdmtah; /* transfer bus address high register */
+ u32 fmdmtal; /* transfer bus address low register */
+ u32 fmdmtcid; /* transfer bus communication ID register */
+ u32 fmdmra; /* DMA bus internal ram address register */
+ u32 fmdmrd; /* DMA bus internal ram data register */
+ u32 res0[0xb];
+ u32 fmdmdcr; /* debug counter */
+ u32 fmdmemsr; /* emrgency smoother register */
+ u32 res1;
+ u32 fmdmplr[32]; /* FM DMA PID-LIODN # register */
+ u32 res[0x3c8];
+};
+
+/* FMDMSR - Fman DMA status register */
+#define FMDMSR_CMDQNE 0x10000000 /* command queue not empty */
+#define FMDMSR_BER 0x08000000 /* bus err event occurred on bus */
+#define FMDMSR_RDB_ECC 0x04000000 /* read buffer ECC error */
+#define FMDMSR_WRB_SECC 0x02000000 /* write buf ECC err sys side */
+#define FMDMSR_WRB_FECC 0x01000000 /* write buf ECC err Fman side */
+#define FMDMSR_DPEXT_SECC 0x00800000 /* DP external ECC err sys side */
+#define FMDMSR_DPEXT_FECC 0x00400000 /* DP external ECC err Fman side */
+#define FMDMSR_DPDAT_SECC 0x00200000 /* DP data ECC err on sys side */
+#define FMDMSR_DPDAT_FECC 0x00100000 /* DP data ECC err on Fman side */
+#define FMDMSR_SPDAT_FECC 0x00080000 /* SP data ECC error Fman side */
+
+#define FMDMSR_CLEAR_ALL (FMDMSR_BER | FMDMSR_RDB_ECC \
+ | FMDMSR_WRB_SECC | FMDMSR_WRB_FECC \
+ | FMDMSR_DPEXT_SECC | FMDMSR_DPEXT_FECC \
+ | FMDMSR_DPDAT_SECC | FMDMSR_DPDAT_FECC \
+ | FMDMSR_SPDAT_FECC)
+
+/* FMDMMR - FMan DMA mode register */
+#define FMDMMR_SBER 0x10000000 /* stop the DMA if a bus error */
+
+struct fm_fpm {
+ u32 fpmtnc; /* TNUM control */
+ u32 fpmprc; /* Port_ID control */
+ u32 res0;
+ u32 fpmflc; /* flush control */
+ u32 fpmdis1; /* dispatch thresholds1 */
+ u32 fpmdis2; /* dispatch thresholds2 */
+ u32 fmepi; /* error pending interrupts */
+ u32 fmrie; /* rams interrupt enable */
+ u32 fpmfcevent[0x4];/* FMan controller event 0-3 */
+ u32 res1[0x4];
+ u32 fpmfcmask[0x4]; /* FMan controller mask 0-3 */
+ u32 res2[0x4];
+ u32 fpmtsc1; /* timestamp control1 */
+ u32 fpmtsc2; /* timestamp control2 */
+ u32 fpmtsp; /* time stamp */
+ u32 fpmtsf; /* time stamp fraction */
+ u32 fpmrcr; /* rams control and event */
+ u32 res3[0x3];
+ u32 fpmdrd[0x4]; /* data_ram data 0-3 */
+ u32 res4[0xc];
+ u32 fpmdra; /* data ram access */
+ u32 fm_ip_rev_1; /* IP block revision 1 */
+ u32 fm_ip_rev_2; /* IP block revision 2 */
+ u32 fmrstc; /* reset command */
+ u32 fmcld; /* classifier debug control */
+ u32 fmnpi; /* normal pending interrupts */
+ u32 res5;
+ u32 fmfpee; /* event and enable */
+ u32 fpmcev[0x4]; /* CPU event 0-3 */
+ u32 res6[0x4];
+ u32 fmfp_ps[0x40]; /* port status */
+ u32 res7[0x260];
+ u32 fpmts[0x80]; /* task status */
+ u32 res8[0xa0];
+};
+
+/* FMFP_PRC - FPM Port_ID Control Register */
+#define FMFPPRC_PORTID_MASK 0x3f000000
+#define FMFPPRC_PORTID_SHIFT 24
+#define FMFPPRC_ORA_SHIFT 16
+#define FMFPPRC_RISC1 0x00000001
+#define FMFPPRC_RISC2 0x00000002
+#define FMFPPRC_RISC_ALL (FMFPPRC_RISC1 | FMFPPRC_RSIC2)
+
+/* FPM Flush Control Register */
+#define FMFP_FLC_DISP_LIM_NONE 0x00000000 /* no dispatch limitation */
+
+/* FMFP_EE - FPM event and enable register */
+#define FMFPEE_DECC 0x80000000 /* double ECC err on FPM ram */
+#define FMFPEE_STL 0x40000000 /* stall of task ... */
+#define FMFPEE_SECC 0x20000000 /* single ECC error */
+#define FMFPEE_RFM 0x00010000 /* release FMan */
+#define FMFPEE_DECC_EN 0x00008000 /* double ECC interrupt enable */
+#define FMFPEE_STL_EN 0x00004000 /* stall of task interrupt enable */
+#define FMFPEE_SECC_EN 0x00002000 /* single ECC err interrupt enable */
+#define FMFPEE_EHM 0x00000008 /* external halt enable */
+#define FMFPEE_UEC 0x00000004 /* FMan is not halted */
+#define FMFPEE_CER 0x00000002 /* only errornous task stalled */
+#define FMFPEE_DER 0x00000001 /* DMA error is just reported */
+
+#define FMFPEE_CLEAR_EVENT (FMFPEE_DECC | FMFPEE_STL | FMFPEE_SECC | \
+ FMFPEE_EHM | FMFPEE_UEC | FMFPEE_CER | \
+ FMFPEE_DER | FMFPEE_RFM)
+
+/* FMFP_RCR - FMan Rams Control and Event */
+#define FMFP_RCR_MDEC 0x00008000 /* double ECC error in muram */
+#define FMFP_RCR_IDEC 0x00004000 /* double ECC error in iram */
+
+struct fm_imem {
+ u32 iadd; /* instruction address register */
+ u32 idata; /* instruction data register */
+ u32 itcfg; /* timing config register */
+ u32 iready; /* ready register */
+ u8 res[0xff0];
+};
+#define IRAM_IADD_AIE 0x80000000 /* address auto increase enable */
+#define IRAM_READY 0x80000000 /* ready to use */
+
+struct fm_soft_parser {
+ u8 res[4*1024];
+};
+
+struct fm_dtesc {
+ u8 res[4*1024];
+};
+
+struct fm_mdio {
+ u8 res0[0x120];
+ u32 miimcfg; /* MII management configuration reg */
+ u32 miimcom; /* MII management command reg */
+ u32 miimadd; /* MII management address reg */
+ u32 miimcon; /* MII management control reg */
+ u32 miimstat; /* MII management status reg */
+ u32 miimind; /* MII management indication reg */
+ u8 res1[0x1000 - 0x138];
+};
+
+struct fm_10gec {
+ u8 res[4*1024];
+};
+
+struct fm_10gec_mdio {
+ u8 res[4*1024];
+};
+
+struct fm_memac {
+ u8 res[4*1024];
+};
+
+struct fm_memac_mdio {
+ u8 res[4*1024];
+};
+
+struct fm_1588 {
+ u8 res[4*1024];
+};
+
+struct ccsr_fman {
+ u8 muram[0x80000];
+ struct fm_bmi_common fm_bmi_common;
+ struct fm_qmi_common fm_qmi_common;
+ u8 res0[2048];
+ struct {
+ struct fm_bmi fm_bmi;
+ struct fm_qmi fm_qmi;
+ struct fm_parser fm_parser;
+ u8 res[1024];
+ } port[63];
+ struct fm_policer fm_policer;
+ struct fm_keygen fm_keygen;
+ struct fm_dma fm_dma;
+ struct fm_fpm fm_fpm;
+ struct fm_imem fm_imem;
+};
+
+#endif /*__FSL_FMAN_H__*/
diff --git a/include/soc/fsl/fsl_immap.h b/include/soc/fsl/fsl_immap.h
new file mode 100644
index 0000000000..93dd6f67bd
--- /dev/null
+++ b/include/soc/fsl/fsl_immap.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Common internal memory map for some Freescale SoCs
+ *
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ */
+
+#ifndef __FSL_IMMAP_H
+#define __FSL_IMMAP_H
+/*
+ * DDR memory controller registers
+ * This structure works for mpc83xx (DDR2 and DDR3), mpc85xx, mpc86xx.
+ */
+struct ccsr_ddr {
+ u32 cs0_bnds; /* Chip Select 0 Memory Bounds */
+ u8 res_04[4];
+ u32 cs1_bnds; /* Chip Select 1 Memory Bounds */
+ u8 res_0c[4];
+ u32 cs2_bnds; /* Chip Select 2 Memory Bounds */
+ u8 res_14[4];
+ u32 cs3_bnds; /* Chip Select 3 Memory Bounds */
+ u8 res_1c[100];
+ u32 cs0_config; /* Chip Select Configuration */
+ u32 cs1_config; /* Chip Select Configuration */
+ u32 cs2_config; /* Chip Select Configuration */
+ u32 cs3_config; /* Chip Select Configuration */
+ u8 res_90[48];
+ u32 cs0_config_2; /* Chip Select Configuration 2 */
+ u32 cs1_config_2; /* Chip Select Configuration 2 */
+ u32 cs2_config_2; /* Chip Select Configuration 2 */
+ u32 cs3_config_2; /* Chip Select Configuration 2 */
+ u8 res_d0[48];
+ u32 timing_cfg_3; /* SDRAM Timing Configuration 3 */
+ u32 timing_cfg_0; /* SDRAM Timing Configuration 0 */
+ u32 timing_cfg_1; /* SDRAM Timing Configuration 1 */
+ u32 timing_cfg_2; /* SDRAM Timing Configuration 2 */
+ u32 sdram_cfg; /* SDRAM Control Configuration */
+ u32 sdram_cfg_2; /* SDRAM Control Configuration 2 */
+ u32 sdram_mode; /* SDRAM Mode Configuration */
+ u32 sdram_mode_2; /* SDRAM Mode Configuration 2 */
+ u32 sdram_md_cntl; /* SDRAM Mode Control */
+ u32 sdram_interval; /* SDRAM Interval Configuration */
+ u32 sdram_data_init; /* SDRAM Data initialization */
+ u8 res_12c[4];
+ u32 sdram_clk_cntl; /* SDRAM Clock Control */
+ u8 res_134[20];
+ u32 init_addr; /* training init addr */
+ u32 init_ext_addr; /* training init extended addr */
+ u8 res_150[16];
+ u32 timing_cfg_4; /* SDRAM Timing Configuration 4 */
+ u32 timing_cfg_5; /* SDRAM Timing Configuration 5 */
+ u32 timing_cfg_6; /* SDRAM Timing Configuration 6 */
+ u32 timing_cfg_7; /* SDRAM Timing Configuration 7 */
+ u32 ddr_zq_cntl; /* ZQ calibration control*/
+ u32 ddr_wrlvl_cntl; /* write leveling control*/
+ u8 reg_178[4];
+ u32 ddr_sr_cntr; /* self refresh counter */
+ u32 ddr_sdram_rcw_1; /* Control Words 1 */
+ u32 ddr_sdram_rcw_2; /* Control Words 2 */
+ u8 reg_188[8];
+ u32 ddr_wrlvl_cntl_2; /* write leveling control 2 */
+ u32 ddr_wrlvl_cntl_3; /* write leveling control 3 */
+ u8 res_198[0x1a0-0x198];
+ u32 ddr_sdram_rcw_3;
+ u32 ddr_sdram_rcw_4;
+ u32 ddr_sdram_rcw_5;
+ u32 ddr_sdram_rcw_6;
+ u8 res_1b0[0x200-0x1b0];
+ u32 sdram_mode_3; /* SDRAM Mode Configuration 3 */
+ u32 sdram_mode_4; /* SDRAM Mode Configuration 4 */
+ u32 sdram_mode_5; /* SDRAM Mode Configuration 5 */
+ u32 sdram_mode_6; /* SDRAM Mode Configuration 6 */
+ u32 sdram_mode_7; /* SDRAM Mode Configuration 7 */
+ u32 sdram_mode_8; /* SDRAM Mode Configuration 8 */
+ u8 res_218[0x220-0x218];
+ u32 sdram_mode_9; /* SDRAM Mode Configuration 9 */
+ u32 sdram_mode_10; /* SDRAM Mode Configuration 10 */
+ u32 sdram_mode_11; /* SDRAM Mode Configuration 11 */
+ u32 sdram_mode_12; /* SDRAM Mode Configuration 12 */
+ u32 sdram_mode_13; /* SDRAM Mode Configuration 13 */
+ u32 sdram_mode_14; /* SDRAM Mode Configuration 14 */
+ u32 sdram_mode_15; /* SDRAM Mode Configuration 15 */
+ u32 sdram_mode_16; /* SDRAM Mode Configuration 16 */
+ u8 res_240[0x250-0x240];
+ u32 timing_cfg_8; /* SDRAM Timing Configuration 8 */
+ u32 timing_cfg_9; /* SDRAM Timing Configuration 9 */
+ u8 res_258[0x260-0x258];
+ u32 sdram_cfg_3;
+ u8 res_264[0x400-0x264];
+ u32 dq_map_0;
+ u32 dq_map_1;
+ u32 dq_map_2;
+ u32 dq_map_3;
+ u8 res_410[0xb20-0x410];
+ u32 ddr_dsr1; /* Debug Status 1 */
+ u32 ddr_dsr2; /* Debug Status 2 */
+ u32 ddr_cdr1; /* Control Driver 1 */
+ u32 ddr_cdr2; /* Control Driver 2 */
+ u8 res_b30[200];
+ u32 ip_rev1; /* IP Block Revision 1 */
+ u32 ip_rev2; /* IP Block Revision 2 */
+ u32 eor; /* Enhanced Optimization Register */
+ u8 res_c04[252];
+ u32 mtcr; /* Memory Test Control Register */
+ u8 res_d04[28];
+ u32 mtp1; /* Memory Test Pattern 1 */
+ u32 mtp2; /* Memory Test Pattern 2 */
+ u32 mtp3; /* Memory Test Pattern 3 */
+ u32 mtp4; /* Memory Test Pattern 4 */
+ u32 mtp5; /* Memory Test Pattern 5 */
+ u32 mtp6; /* Memory Test Pattern 6 */
+ u32 mtp7; /* Memory Test Pattern 7 */
+ u32 mtp8; /* Memory Test Pattern 8 */
+ u32 mtp9; /* Memory Test Pattern 9 */
+ u32 mtp10; /* Memory Test Pattern 10 */
+ u8 res_d48[184];
+ u32 data_err_inject_hi; /* Data Path Err Injection Mask High */
+ u32 data_err_inject_lo; /* Data Path Err Injection Mask Low */
+ u32 ecc_err_inject; /* Data Path Err Injection Mask ECC */
+ u8 res_e0c[20];
+ u32 capture_data_hi; /* Data Path Read Capture High */
+ u32 capture_data_lo; /* Data Path Read Capture Low */
+ u32 capture_ecc; /* Data Path Read Capture ECC */
+ u8 res_e2c[20];
+ u32 err_detect; /* Error Detect */
+ u32 err_disable; /* Error Disable */
+ u32 err_int_en;
+ u32 capture_attributes; /* Error Attrs Capture */
+ u32 capture_address; /* Error Addr Capture */
+ u32 capture_ext_address; /* Error Extended Addr Capture */
+ u32 err_sbe; /* Single-Bit ECC Error Management */
+ u8 res_e5c[164];
+ u32 debug[64]; /* debug_1 to debug_64 */
+};
+
+#define CCI400_CTRLORD_TERM_BARRIER 0x00000008
+#define CCI400_CTRLORD_EN_BARRIER 0
+#define CCI400_SHAORD_NON_SHAREABLE 0x00000002
+#define CCI400_DVM_MESSAGE_REQ_EN 0x00000002
+#define CCI400_SNOOP_REQ_EN 0x00000001
+
+/* CCI-400 registers */
+struct ccsr_cci400 {
+ u32 ctrl_ord; /* Control Override */
+ u32 spec_ctrl; /* Speculation Control */
+ u32 secure_access; /* Secure Access */
+ u32 status; /* Status */
+ u32 impr_err; /* Imprecise Error */
+ u8 res_14[0x100 - 0x14];
+ u32 pmcr; /* Performance Monitor Control */
+ u8 res_104[0xfd0 - 0x104];
+ u32 pid[8]; /* Peripheral ID */
+ u32 cid[4]; /* Component ID */
+ struct {
+ u32 snoop_ctrl; /* Snoop Control */
+ u32 sha_ord; /* Shareable Override */
+ u8 res_1008[0x1100 - 0x1008];
+ u32 rc_qos_ord; /* read channel QoS Value Override */
+ u32 wc_qos_ord; /* read channel QoS Value Override */
+ u8 res_1108[0x110c - 0x1108];
+ u32 qos_ctrl; /* QoS Control */
+ u32 max_ot; /* Max OT */
+ u8 res_1114[0x1130 - 0x1114];
+ u32 target_lat; /* Target Latency */
+ u32 latency_regu; /* Latency Regulation */
+ u32 qos_range; /* QoS Range */
+ u8 res_113c[0x2000 - 0x113c];
+ } slave[5]; /* Slave Interface */
+ u8 res_6000[0x9004 - 0x6000];
+ u32 cycle_counter; /* Cycle counter */
+ u32 count_ctrl; /* Count Control */
+ u32 overflow_status; /* Overflow Flag Status */
+ u8 res_9010[0xa000 - 0x9010];
+ struct {
+ u32 event_select; /* Event Select */
+ u32 event_count; /* Event Count */
+ u32 counter_ctrl; /* Counter Control */
+ u32 overflow_status; /* Overflow Flag Status */
+ u8 res_a010[0xb000 - 0xa010];
+ } pcounter[4]; /* Performance Counter */
+ u8 res_e004[0x10000 - 0xe004];
+};
+
+#endif /* __FSL_IMMAP_H */
diff --git a/include/soc/fsl/fsl_memac.h b/include/soc/fsl/fsl_memac.h
new file mode 100644
index 0000000000..a0b8314f92
--- /dev/null
+++ b/include/soc/fsl/fsl_memac.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Roy Zang <tie-fei.zang@freescale.com>
+ */
+
+#ifndef __MEMAC_H__
+#define __MEMAC_H__
+
+struct memac {
+ /* memac general control and status registers */
+ u32 res_0[2];
+ u32 command_config; /* Control and configuration register */
+ u32 mac_addr_0; /* Lower 32 bits of 48-bit MAC address */
+ u32 mac_addr_1; /* Upper 16 bits of 48-bit MAC address */
+ u32 maxfrm; /* Maximum frame length register */
+ u32 res_18[5];
+ u32 hashtable_ctrl; /* Hash table control register */
+ u32 res_30[4];
+ u32 ievent; /* Interrupt event register */
+ u32 tx_ipg_length; /* Transmitter inter-packet-gap register */
+ u32 res_48;
+ u32 imask; /* interrupt mask register */
+ u32 res_50;
+ u32 cl_pause_quanta[4]; /* CL01-CL67 pause quanta register */
+ u32 cl_pause_thresh[4]; /* CL01-CL67 pause thresh register */
+ u32 rx_pause_status; /* Receive pause status register */
+ u32 res_78[2];
+ u32 mac_addr[14]; /* MAC address */
+ u32 lpwake_timer; /* EEE low power wakeup timer register */
+ u32 sleep_timer; /* Transmit EEE Low Power Timer register */
+ u32 res_c0[8];
+ u32 statn_config; /* Statistics configuration register */
+ u32 res_e4[7];
+
+ /* memac statistics counter registers */
+ u32 rx_eoct_l; /* Rx ethernet octests lower */
+ u32 rx_eoct_u; /* Rx ethernet octests upper */
+ u32 rx_oct_l; /* Rx octests lower */
+ u32 rx_oct_u; /* Rx octests upper */
+ u32 rx_align_err_l; /* Rx alignment error lower */
+ u32 rx_align_err_u; /* Rx alignment error upper */
+ u32 rx_pause_frame_l; /* Rx valid pause frame upper */
+ u32 rx_pause_frame_u; /* Rx valid pause frame upper */
+ u32 rx_frame_l; /* Rx frame counter lower */
+ u32 rx_frame_u; /* Rx frame counter upper */
+ u32 rx_frame_crc_err_l; /* Rx frame check sequence error lower */
+ u32 rx_frame_crc_err_u; /* Rx frame check sequence error upper */
+ u32 rx_vlan_l; /* Rx VLAN frame lower */
+ u32 rx_vlan_u; /* Rx VLAN frame upper */
+ u32 rx_err_l; /* Rx frame error lower */
+ u32 rx_err_u; /* Rx frame error upper */
+ u32 rx_uni_l; /* Rx unicast frame lower */
+ u32 rx_uni_u; /* Rx unicast frame upper */
+ u32 rx_multi_l; /* Rx multicast frame lower */
+ u32 rx_multi_u; /* Rx multicast frame upper */
+ u32 rx_brd_l; /* Rx broadcast frame lower */
+ u32 rx_brd_u; /* Rx broadcast frame upper */
+ u32 rx_drop_l; /* Rx dropped packets lower */
+ u32 rx_drop_u; /* Rx dropped packets upper */
+ u32 rx_pkt_l; /* Rx packets lower */
+ u32 rx_pkt_u; /* Rx packets upper */
+ u32 rx_undsz_l; /* Rx undersized packet lower */
+ u32 rx_undsz_u; /* Rx undersized packet upper */
+ u32 rx_64_l; /* Rx 64 oct packet lower */
+ u32 rx_64_u; /* Rx 64 oct packet upper */
+ u32 rx_127_l; /* Rx 65 to 127 oct packet lower */
+ u32 rx_127_u; /* Rx 65 to 127 oct packet upper */
+ u32 rx_255_l; /* Rx 128 to 255 oct packet lower */
+ u32 rx_255_u; /* Rx 128 to 255 oct packet upper */
+ u32 rx_511_l; /* Rx 256 to 511 oct packet lower */
+ u32 rx_511_u; /* Rx 256 to 511 oct packet upper */
+ u32 rx_1023_l; /* Rx 512 to 1023 oct packet lower */
+ u32 rx_1023_u; /* Rx 512 to 1023 oct packet upper */
+ u32 rx_1518_l; /* Rx 1024 to 1518 oct packet lower */
+ u32 rx_1518_u; /* Rx 1024 to 1518 oct packet upper */
+ u32 rx_1519_l; /* Rx 1519 to max oct packet lower */
+ u32 rx_1519_u; /* Rx 1519 to max oct packet upper */
+ u32 rx_oversz_l; /* Rx oversized packet lower */
+ u32 rx_oversz_u; /* Rx oversized packet upper */
+ u32 rx_jabber_l; /* Rx Jabber packet lower */
+ u32 rx_jabber_u; /* Rx Jabber packet upper */
+ u32 rx_frag_l; /* Rx Fragment packet lower */
+ u32 rx_frag_u; /* Rx Fragment packet upper */
+ u32 rx_cnp_l; /* Rx control packet lower */
+ u32 rx_cnp_u; /* Rx control packet upper */
+ u32 rx_drntp_l; /* Rx dripped not truncated packet lower */
+ u32 rx_drntp_u; /* Rx dripped not truncated packet upper */
+ u32 res_1d0[0xc];
+
+ u32 tx_eoct_l; /* Tx ethernet octests lower */
+ u32 tx_eoct_u; /* Tx ethernet octests upper */
+ u32 tx_oct_l; /* Tx octests lower */
+ u32 tx_oct_u; /* Tx octests upper */
+ u32 res_210[0x2];
+ u32 tx_pause_frame_l; /* Tx valid pause frame lower */
+ u32 tx_pause_frame_u; /* Tx valid pause frame upper */
+ u32 tx_frame_l; /* Tx frame counter lower */
+ u32 tx_frame_u; /* Tx frame counter upper */
+ u32 tx_frame_crc_err_l; /* Tx frame check sequence error lower */
+ u32 tx_frame_crc_err_u; /* Tx frame check sequence error upper */
+ u32 tx_vlan_l; /* Tx VLAN frame lower */
+ u32 tx_vlan_u; /* Tx VLAN frame upper */
+ u32 tx_frame_err_l; /* Tx frame error lower */
+ u32 tx_frame_err_u; /* Tx frame error upper */
+ u32 tx_uni_l; /* Tx unicast frame lower */
+ u32 tx_uni_u; /* Tx unicast frame upper */
+ u32 tx_multi_l; /* Tx multicast frame lower */
+ u32 tx_multi_u; /* Tx multicast frame upper */
+ u32 tx_brd_l; /* Tx broadcast frame lower */
+ u32 tx_brd_u; /* Tx broadcast frame upper */
+ u32 res_258[0x2];
+ u32 tx_pkt_l; /* Tx packets lower */
+ u32 tx_pkt_u; /* Tx packets upper */
+ u32 tx_undsz_l; /* Tx undersized packet lower */
+ u32 tx_undsz_u; /* Tx undersized packet upper */
+ u32 tx_64_l; /* Tx 64 oct packet lower */
+ u32 tx_64_u; /* Tx 64 oct packet upper */
+ u32 tx_127_l; /* Tx 65 to 127 oct packet lower */
+ u32 tx_127_u; /* Tx 65 to 127 oct packet upper */
+ u32 tx_255_l; /* Tx 128 to 255 oct packet lower */
+ u32 tx_255_u; /* Tx 128 to 255 oct packet upper */
+ u32 tx_511_l; /* Tx 256 to 511 oct packet lower */
+ u32 tx_511_u; /* Tx 256 to 511 oct packet upper */
+ u32 tx_1023_l; /* Tx 512 to 1023 oct packet lower */
+ u32 tx_1023_u; /* Tx 512 to 1023 oct packet upper */
+ u32 tx_1518_l; /* Tx 1024 to 1518 oct packet lower */
+ u32 tx_1518_u; /* Tx 1024 to 1518 oct packet upper */
+ u32 tx_1519_l; /* Tx 1519 to max oct packet lower */
+ u32 tx_1519_u; /* Tx 1519 to max oct packet upper */
+ u32 res_2a8[0x6];
+ u32 tx_cnp_l; /* Tx control packet lower */
+ u32 tx_cnp_u; /* Tx control packet upper */
+ u32 res_2c8[0xe];
+
+ /* Line interface control register */
+ u32 if_mode; /* interface mode control */
+ u32 if_status; /* interface status */
+ u32 res_308[0xe];
+
+ /* HiGig/2 Register */
+ u32 hg_config; /* HiGig2 control and configuration */
+ u32 res_344[0x3];
+ u32 hg_pause_quanta; /* HiGig2 pause quanta */
+ u32 res_354[0x3];
+ u32 hg_pause_thresh; /* HiGig2 pause quanta threshold */
+ u32 res_364[0x3];
+ u32 hgrx_pause_status; /* HiGig2 rx pause quanta status */
+ u32 hg_fifos_status; /* HiGig2 fifos status */
+ u32 rhm; /* Rx HiGig2 message counter register */
+ u32 thm;/* Tx HiGig2 message counter register */
+ u32 res_380[0x320];
+};
+
+/* COMMAND_CONFIG - command and configuration register */
+#define MEMAC_CMD_CFG_RX_EN 0x00000002 /* MAC Rx path enable */
+#define MEMAC_CMD_CFG_TX_EN 0x00000001 /* MAC Tx path enable */
+#define MEMAC_CMD_CFG_RXTX_EN (MEMAC_CMD_CFG_RX_EN | MEMAC_CMD_CFG_TX_EN)
+#define MEMAC_CMD_CFG_NO_LEN_CHK 0x20000 /* Payload length check disable */
+
+/* HASHTABLE_CTRL - Hashtable control register */
+#define HASHTABLE_CTRL_MCAST_EN 0x00000200 /* enable mulitcast Rx hash */
+#define HASHTABLE_CTRL_ADDR_MASK 0x000001ff
+
+/* TX_IPG_LENGTH - Transmit inter-packet gap length register */
+#define TX_IPG_LENGTH_IPG_LEN_MASK 0x000003ff
+
+/* IMASK - interrupt mask register */
+#define IMASK_MDIO_SCAN_EVENT 0x00010000 /* MDIO scan event mask */
+#define IMASK_MDIO_CMD_CMPL 0x00008000 /* MDIO cmd completion mask */
+#define IMASK_REM_FAULT 0x00004000 /* remote fault mask */
+#define IMASK_LOC_FAULT 0x00002000 /* local fault mask */
+#define IMASK_TX_ECC_ER 0x00001000 /* Tx frame ECC error mask */
+#define IMASK_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow mask */
+#define IMASK_TX_ER 0x00000200 /* Tx frame error mask */
+#define IMASK_RX_FIFO_OVFL 0x00000100 /* Rx FIFO overflow mask */
+#define IMASK_RX_ECC_ER 0x00000080 /* Rx frame ECC error mask */
+#define IMASK_RX_JAB_FRM 0x00000040 /* Rx jabber frame mask */
+#define IMASK_RX_OVRSZ_FRM 0x00000020 /* Rx oversized frame mask */
+#define IMASK_RX_RUNT_FRM 0x00000010 /* Rx runt frame mask */
+#define IMASK_RX_FRAG_FRM 0x00000008 /* Rx fragment frame mask */
+#define IMASK_RX_LEN_ER 0x00000004 /* Rx payload length error mask */
+#define IMASK_RX_CRC_ER 0x00000002 /* Rx CRC error mask */
+#define IMASK_RX_ALIGN_ER 0x00000001 /* Rx alignment error mask */
+
+#define IMASK_MASK_ALL 0x00000000
+
+/* IEVENT - interrupt event register */
+#define IEVENT_MDIO_SCAN_EVENT 0x00010000 /* MDIO scan event */
+#define IEVENT_MDIO_CMD_CMPL 0x00008000 /* MDIO cmd completion */
+#define IEVENT_REM_FAULT 0x00004000 /* remote fault */
+#define IEVENT_LOC_FAULT 0x00002000 /* local fault */
+#define IEVENT_TX_ECC_ER 0x00001000 /* Tx frame ECC error */
+#define IEVENT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
+#define IEVENT_TX_ER 0x00000200 /* Tx frame error */
+#define IEVENT_RX_FIFO_OVFL 0x00000100 /* Rx FIFO overflow */
+#define IEVENT_RX_ECC_ER 0x00000080 /* Rx frame ECC error */
+#define IEVENT_RX_JAB_FRM 0x00000040 /* Rx jabber frame */
+#define IEVENT_RX_OVRSZ_FRM 0x00000020 /* Rx oversized frame */
+#define IEVENT_RX_RUNT_FRM 0x00000010 /* Rx runt frame */
+#define IEVENT_RX_FRAG_FRM 0x00000008 /* Rx fragment frame */
+#define IEVENT_RX_LEN_ER 0x00000004 /* Rx payload length error */
+#define IEVENT_RX_CRC_ER 0x00000002 /* Rx CRC error */
+#define IEVENT_RX_ALIGN_ER 0x00000001 /* Rx alignment error */
+
+#define IEVENT_CLEAR_ALL 0xffffffff
+
+/* IF_MODE - Interface Mode Register */
+#define IF_MODE_EN_AUTO 0x00008000 /* 1 - Enable automatic speed selection */
+#define IF_MODE_SETSP_100M 0x00000000 /* 00 - 100Mbps RGMII */
+#define IF_MODE_SETSP_10M 0x00002000 /* 01 - 10Mbps RGMII */
+#define IF_MODE_SETSP_1000M 0x00004000 /* 10 - 1000Mbps RGMII */
+#define IF_MODE_SETSP_MASK 0x00006000 /* setsp mask bits */
+#define IF_MODE_XGMII 0x00000000 /* 00- XGMII(10) interface mode */
+#define IF_MODE_GMII 0x00000002 /* 10- GMII interface mode */
+#define IF_MODE_MASK 0x00000003 /* mask for mode interface mode */
+#define IF_MODE_RG 0x00000004 /* 1- RGMII */
+
+#define IF_DEFAULT (IF_GMII)
+
+/* Internal PHY Registers - SGMII */
+#define PHY_SGMII_CR_PHY_RESET 0x8000
+#define PHY_SGMII_CR_RESET_AN 0x0200
+#define PHY_SGMII_CR_DEF_VAL 0x1140
+#define PHY_SGMII_IF_SPEED_GIGABIT 0x0008
+#define PHY_SGMII_DEV_ABILITY_SGMII 0x4001
+#define PHY_SGMII_IF_MODE_AN 0x0002
+#define PHY_SGMII_IF_MODE_SGMII 0x0001
+
+struct memac_mdio_controller {
+ u32 res0[0xc];
+ u32 mdio_stat; /* MDIO configuration and status */
+ u32 mdio_ctl; /* MDIO control */
+ u32 mdio_data; /* MDIO data */
+ u32 mdio_addr; /* MDIO address */
+};
+
+#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY (1 << 0)
+#define MDIO_STAT_RD_ER (1 << 1)
+#define MDIO_STAT_PRE (1 << 5)
+#define MDIO_STAT_ENC (1 << 6)
+#define MDIO_STAT_HOLD_15_CLK (7 << 2)
+#define MDIO_STAT_NEG (1 << 23)
+
+#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS (1 << 10)
+#define MDIO_CTL_SCAN_EN (1 << 11)
+#define MDIO_CTL_POST_INC (1 << 14)
+#define MDIO_CTL_READ (1 << 15)
+
+#define MDIO_DATA(x) (x & 0xffff)
+#define MDIO_DATA_BSY (1 << 31)
+
+#endif
diff --git a/include/soc/fsl/fsl_qbman.h b/include/soc/fsl/fsl_qbman.h
new file mode 100644
index 0000000000..4687eb9bb1
--- /dev/null
+++ b/include/soc/fsl/fsl_qbman.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2017 NXP
+ */
+
+#ifndef __FSL_QBMAN_H__
+#define __FSL_QBMAN_H__
+void fdt_fixup_qportals(void *blob);
+void fdt_fixup_bportals(void *blob);
+void inhibit_portals(void __iomem *addr, int max_portals,
+ int arch_max_portals, int portal_cinh_size);
+void setup_qbman_portals(void);
+
+struct ccsr_qman {
+#ifdef CONFIG_SYS_FSL_QMAN_V3
+ u8 res0[0x200];
+#else
+ struct {
+ u32 qcsp_lio_cfg; /* 0x0 - SW Portal n LIO cfg */
+ u32 qcsp_io_cfg; /* 0x4 - SW Portal n IO cfg */
+ u32 res;
+ u32 qcsp_dd_cfg; /* 0xc - SW Portal Dynamic Debug cfg */
+ } qcsp[32];
+#endif
+ /* Not actually reserved, but irrelevant to u-boot */
+ u8 res[0xbf8 - 0x200];
+ u32 ip_rev_1;
+ u32 ip_rev_2;
+ u32 fqd_bare; /* FQD Extended Base Addr Register */
+ u32 fqd_bar; /* FQD Base Addr Register */
+ u8 res1[0x8];
+ u32 fqd_ar; /* FQD Attributes Register */
+ u8 res2[0xc];
+ u32 pfdr_bare; /* PFDR Extended Base Addr Register */
+ u32 pfdr_bar; /* PFDR Base Addr Register */
+ u8 res3[0x8];
+ u32 pfdr_ar; /* PFDR Attributes Register */
+ u8 res4[0x4c];
+ u32 qcsp_bare; /* QCSP Extended Base Addr Register */
+ u32 qcsp_bar; /* QCSP Base Addr Register */
+ u8 res5[0x78];
+ u32 ci_sched_cfg; /* Initiator Scheduling Configuration */
+ u32 srcidr; /* Source ID Register */
+ u32 liodnr; /* LIODN Register */
+ u8 res6[4];
+ u32 ci_rlm_cfg; /* Initiator Read Latency Monitor Cfg */
+ u32 ci_rlm_avg; /* Initiator Read Latency Monitor Avg */
+ u8 res7[0x2e8];
+#ifdef CONFIG_SYS_FSL_QMAN_V3
+ struct {
+ u32 qcsp_lio_cfg; /* 0x0 - SW Portal n LIO cfg */
+ u32 qcsp_io_cfg; /* 0x4 - SW Portal n IO cfg */
+ u32 res;
+ u32 qcsp_dd_cfg; /* 0xc - SW Portal n Dynamic Debug cfg*/
+ } qcsp[50];
+#endif
+};
+
+struct ccsr_bman {
+ /* Not actually reserved, but irrelevant to u-boot */
+ u8 res[0xbf8];
+ u32 ip_rev_1;
+ u32 ip_rev_2;
+ u32 fbpr_bare; /* FBPR Extended Base Addr Register */
+ u32 fbpr_bar; /* FBPR Base Addr Register */
+ u8 res1[0x8];
+ u32 fbpr_ar; /* FBPR Attributes Register */
+ u8 res2[0xf0];
+ u32 srcidr; /* Source ID Register */
+ u32 liodnr; /* LIODN Register */
+ u8 res7[0x2f4];
+};
+
+#endif /* __FSL_QBMAN_H__ */
diff --git a/include/soc/fsl/immap_lsch2.h b/include/soc/fsl/immap_lsch2.h
new file mode 100644
index 0000000000..4eb6658788
--- /dev/null
+++ b/include/soc/fsl/immap_lsch2.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2013-2015 Freescale Semiconductor, Inc.
+ */
+
+#ifndef __ARCH_FSL_LSCH2_IMMAP_H__
+#define __ARCH_FSL_LSCH2_IMMAP_H__
+
+#define gur_in32(a) in_be32(a)
+#define gur_out32(a, v) out_be32(a, v)
+
+#define LSCH2_IMMR 0x01000000
+
+#define LSCH2_DDR_ADDR (LSCH2_IMMR + 0x00080000)
+#define LSCH2_CCI400_ADDR (LSCH2_IMMR + 0x00180000)
+#define LSCH2_GIC400_ADDR (LSCH2_IMMR + 0x00400000)
+#define LSCH2_IFC_ADDR (LSCH2_IMMR + 0x00530000)
+#define LSCH2_QSPI0_BASE_ADDR (LSCH2_IMMR + 0x00550000)
+#define LSCH2_ESDHC_ADDR (LSCH2_IMMR + 0x00560000)
+#define LSCH2_CSU_ADDR (LSCH2_IMMR + 0x00510000)
+#define LSCH2_SCFG_ADDR (LSCH2_IMMR + 0x00570000)
+#define LSCH2_BMAN_ADDR (LSCH2_IMMR + 0x00890000)
+#define LSCH2_QMAN_ADDR (LSCH2_IMMR + 0x00880000)
+#define LSCH2_FM1_ADDR (LSCH2_IMMR + 0x00a00000)
+#define LSCH2_FM1_DTSEC1_ADDR (LSCH2_IMMR + 0x00ae0000)
+#define LSCH2_SEC_MON_ADDR (LSCH2_IMMR + 0x00e90000)
+#define LSCH2_SFP_ADDR (LSCH2_IMMR + 0x00e80200)
+#define LSCH2_GUTS_ADDR (LSCH2_IMMR + 0x00ee0000)
+#define LSCH2_RST_ADDR (LSCH2_IMMR + 0x00ee00b0)
+#define LSCH2_FMAN_ADDR (LSCH2_IMMR + 0x00a00000)
+#define LSCH2_SERDES_ADDR (LSCH2_IMMR + 0x00ea0000)
+#define LSCH2_DCFG_ADDR (LSCH2_IMMR + 0x00ee0000)
+#define LSCH2_CLK_ADDR (LSCH2_IMMR + 0x00ee1000)
+#define LSCH2_DSPI1_BASE_ADDR (LSCH2_IMMR + 0x01100000)
+#define LSCH2_I2C1_BASE_ADDR (LSCH2_IMMR + 0x01180000)
+#define LSCH2_I2C2_BASE_ADDR (LSCH2_IMMR + 0x01190000)
+#define LSCH2_I2C3_BASE_ADDR (LSCH2_IMMR + 0x011a0000)
+#define LSCH2_I2C4_BASE_ADDR (LSCH2_IMMR + 0x011b0000)
+#define LSCH2_NS16550_COM1 (LSCH2_IMMR + 0x011c0500)
+#define LSCH2_NS16550_COM2 (LSCH2_IMMR + 0x011c0600)
+#define LSCH2_NS16550_COM3 (LSCH2_IMMR + 0x011d0500)
+#define LSCH2_NS16550_COM4 (LSCH2_IMMR + 0x011d0600)
+#define LSCH2_GPIO1_BASE_ADDR (LSCH2_IMMR + 0x01300000)
+#define LSCH2_GPIO2_BASE_ADDR (LSCH2_IMMR + 0x01310000)
+#define LSCH2_GPIO3_BASE_ADDR (LSCH2_IMMR + 0x01320000)
+#define LSCH2_GPIO4_BASE_ADDR (LSCH2_IMMR + 0x01330000)
+#define LSCH2_QE_BASE_ADDR (LSCH2_IMMR + 0x01400000)
+#define LSCH2_LPUART_BASE (LSCH2_IMMR + 0x01950000)
+#define LSCH2_WDOG1_BASE_ADDR (LSCH2_IMMR + 0x01ad0000)
+#define LSCH2_SYS_COUNTER_ADDR (LSCH2_IMMR + 0x01b00000)
+#define LSCH2_EDMA_BASE_ADDR (LSCH2_IMMR + 0x01c00000)
+#define LSCH2_XHCI_USB1_ADDR (LSCH2_IMMR + 0x01f00000)
+#define LSCH2_XHCI_USB2_ADDR (LSCH2_IMMR + 0x02000000)
+#define LSCH2_XHCI_USB3_ADDR (LSCH2_IMMR + 0x02100000)
+#define LSCH2_HCI_BASE_ADDR (LSCH2_IMMR + 0x02200000)
+#define LSCH2_PCIE1_ADDR (LSCH2_IMMR + 0x02400000)
+#define LSCH2_PCIE2_ADDR (LSCH2_IMMR + 0x02500000)
+#define LSCH2_PCIE3_ADDR (LSCH2_IMMR + 0x02600000)
+#define LSCH2_QDMA_BASE_ADDR (LSCH2_IMMR + 0x07380000)
+#define LSCH2_EHCI_USB1_ADDR (LSCH2_IMMR + 0x07600000)
+
+struct ccsr_gur {
+ u32 porsr1; /* POR status 1 */
+#define FSL_CHASSIS2_CCSR_PORSR1_RCW_MASK 0xFF800000
+ u32 porsr2; /* POR status 2 */
+ u8 res_008[0x20-0x8];
+ u32 gpporcr1; /* General-purpose POR configuration */
+ u32 gpporcr2;
+#define FSL_CHASSIS2_DCFG_FUSESR_VID_SHIFT 25
+#define FSL_CHASSIS2_DCFG_FUSESR_VID_MASK 0x1F
+#define FSL_CHASSIS2_DCFG_FUSESR_ALTVID_SHIFT 20
+#define FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK 0x1F
+ u32 dcfg_fusesr; /* Fuse status register */
+ u8 res_02c[0x70-0x2c];
+ u32 devdisr; /* Device disable control */
+#define FSL_CHASSIS2_DEVDISR2_DTSEC1_1 0x80000000
+#define FSL_CHASSIS2_DEVDISR2_DTSEC1_2 0x40000000
+#define FSL_CHASSIS2_DEVDISR2_DTSEC1_3 0x20000000
+#define FSL_CHASSIS2_DEVDISR2_DTSEC1_4 0x10000000
+#define FSL_CHASSIS2_DEVDISR2_DTSEC1_5 0x08000000
+#define FSL_CHASSIS2_DEVDISR2_DTSEC1_6 0x04000000
+#define FSL_CHASSIS2_DEVDISR2_DTSEC1_9 0x00800000
+#define FSL_CHASSIS2_DEVDISR2_DTSEC1_10 0x00400000
+#define FSL_CHASSIS2_DEVDISR2_10GEC1_1 0x00800000
+#define FSL_CHASSIS2_DEVDISR2_10GEC1_2 0x00400000
+#define FSL_CHASSIS2_DEVDISR2_10GEC1_3 0x80000000
+#define FSL_CHASSIS2_DEVDISR2_10GEC1_4 0x40000000
+ u32 devdisr2; /* Device disable control 2 */
+ u32 devdisr3; /* Device disable control 3 */
+ u32 devdisr4; /* Device disable control 4 */
+ u32 devdisr5; /* Device disable control 5 */
+ u32 devdisr6; /* Device disable control 6 */
+ u32 devdisr7; /* Device disable control 7 */
+ u8 res_08c[0x94-0x8c];
+ u32 coredisru; /* uppper portion for support of 64 cores */
+ u32 coredisrl; /* lower portion for support of 64 cores */
+ u8 res_09c[0xa0-0x9c];
+ u32 pvr; /* Processor version */
+ u32 svr; /* System version */
+ u32 mvr; /* Manufacturing version */
+ u8 res_0ac[0xb0-0xac];
+ u32 rstcr; /* Reset control */
+ u32 rstrqpblsr; /* Reset request preboot loader status */
+ u8 res_0b8[0xc0-0xb8];
+ u32 rstrqmr1; /* Reset request mask */
+ u8 res_0c4[0xc8-0xc4];
+ u32 rstrqsr1; /* Reset request status */
+ u8 res_0cc[0xd4-0xcc];
+ u32 rstrqwdtmrl; /* Reset request WDT mask */
+ u8 res_0d8[0xdc-0xd8];
+ u32 rstrqwdtsrl; /* Reset request WDT status */
+ u8 res_0e0[0xe4-0xe0];
+ u32 brrl; /* Boot release */
+ u8 res_0e8[0x100-0xe8];
+ u32 rcwsr[16]; /* Reset control word status */
+#define FSL_CHASSIS2_RCWSR0_SYS_PLL_RAT_SHIFT 25
+#define FSL_CHASSIS2_RCWSR0_SYS_PLL_RAT_MASK 0x1f
+#define FSL_CHASSIS2_RCWSR0_MEM_PLL_RAT_SHIFT 16
+#define FSL_CHASSIS2_RCWSR0_MEM_PLL_RAT_MASK 0x3f
+#define FSL_CHASSIS2_RCWSR4_SRDS1_PRTCL_MASK 0xffff0000
+#define FSL_CHASSIS2_RCWSR4_SRDS1_PRTCL_SHIFT 16
+#define FSL_CHASSIS2_RCWSR4_SRDS2_PRTCL_MASK 0x0000ffff
+#define FSL_CHASSIS2_RCWSR4_SRDS2_PRTCL_SHIFT 0
+#define RCW_SB_EN_REG_INDEX 7
+#define RCW_SB_EN_MASK 0x00200000
+
+ u8 res_140[0x200-0x140];
+ u32 scratchrw[4]; /* Scratch Read/Write */
+ u8 res_210[0x300-0x210];
+ u32 scratchw1r[4]; /* Scratch Read (Write once) */
+ u8 res_310[0x400-0x310];
+ u32 crstsr[12];
+ u8 res_430[0x500-0x430];
+
+ /* PCI Express n Logical I/O Device Number register */
+ u32 dcfg_ccsr_pex1liodnr;
+ u32 dcfg_ccsr_pex2liodnr;
+ u32 dcfg_ccsr_pex3liodnr;
+ u32 dcfg_ccsr_pex4liodnr;
+ /* RIO n Logical I/O Device Number register */
+ u32 dcfg_ccsr_rio1liodnr;
+ u32 dcfg_ccsr_rio2liodnr;
+ u32 dcfg_ccsr_rio3liodnr;
+ u32 dcfg_ccsr_rio4liodnr;
+ /* USB Logical I/O Device Number register */
+ u32 dcfg_ccsr_usb1liodnr;
+ u32 dcfg_ccsr_usb2liodnr;
+ u32 dcfg_ccsr_usb3liodnr;
+ u32 dcfg_ccsr_usb4liodnr;
+ /* SD/MMC Logical I/O Device Number register */
+ u32 dcfg_ccsr_sdmmc1liodnr;
+ u32 dcfg_ccsr_sdmmc2liodnr;
+ u32 dcfg_ccsr_sdmmc3liodnr;
+ u32 dcfg_ccsr_sdmmc4liodnr;
+ /* RIO Message Unit Logical I/O Device Number register */
+ u32 dcfg_ccsr_riomaintliodnr;
+
+ u8 res_544[0x550-0x544];
+ u32 sataliodnr[4];
+ u8 res_560[0x570-0x560];
+
+ u32 dcfg_ccsr_misc1liodnr;
+ u32 dcfg_ccsr_misc2liodnr;
+ u32 dcfg_ccsr_misc3liodnr;
+ u32 dcfg_ccsr_misc4liodnr;
+ u32 dcfg_ccsr_dma1liodnr;
+ u32 dcfg_ccsr_dma2liodnr;
+ u32 dcfg_ccsr_dma3liodnr;
+ u32 dcfg_ccsr_dma4liodnr;
+ u32 dcfg_ccsr_spare1liodnr;
+ u32 dcfg_ccsr_spare2liodnr;
+ u32 dcfg_ccsr_spare3liodnr;
+ u32 dcfg_ccsr_spare4liodnr;
+ u8 res_5a0[0x600-0x5a0];
+ u32 dcfg_ccsr_pblsr;
+
+ u32 pamubypenr;
+ u32 dmacr1;
+
+ u8 res_60c[0x610-0x60c];
+ u32 dcfg_ccsr_gensr1;
+ u32 dcfg_ccsr_gensr2;
+ u32 dcfg_ccsr_gensr3;
+ u32 dcfg_ccsr_gensr4;
+ u32 dcfg_ccsr_gencr1;
+ u32 dcfg_ccsr_gencr2;
+ u32 dcfg_ccsr_gencr3;
+ u32 dcfg_ccsr_gencr4;
+ u32 dcfg_ccsr_gencr5;
+ u32 dcfg_ccsr_gencr6;
+ u32 dcfg_ccsr_gencr7;
+ u8 res_63c[0x658-0x63c];
+ u32 dcfg_ccsr_cgensr1;
+ u32 dcfg_ccsr_cgensr0;
+ u8 res_660[0x678-0x660];
+ u32 dcfg_ccsr_cgencr1;
+
+ u32 dcfg_ccsr_cgencr0;
+ u8 res_680[0x700-0x680];
+ u32 dcfg_ccsr_sriopstecr;
+ u32 dcfg_ccsr_dcsrcr;
+
+ u8 res_708[0x740-0x708]; /* add more registers when needed */
+ u32 tp_ityp[64]; /* Topology Initiator Type Register */
+ struct {
+ u32 upper;
+ u32 lower;
+ } tp_cluster[16];
+ u8 res_8c0[0xa00-0x8c0]; /* add more registers when needed */
+ u32 dcfg_ccsr_qmbm_warmrst;
+ u8 res_a04[0xa20-0xa04]; /* add more registers when needed */
+ u32 dcfg_ccsr_reserved0;
+ u32 dcfg_ccsr_reserved1;
+};
+
+#define SCFG_QSPI_CLKSEL 0x40100000
+#define SCFG_USBDRVVBUS_SELCR_USB1 0x00000000
+#define SCFG_USBDRVVBUS_SELCR_USB2 0x00000001
+#define SCFG_USBDRVVBUS_SELCR_USB3 0x00000002
+#define SCFG_USBPWRFAULT_INACTIVE 0x00000000
+#define SCFG_USBPWRFAULT_SHARED 0x00000001
+#define SCFG_USBPWRFAULT_DEDICATED 0x00000002
+#define SCFG_USBPWRFAULT_USB3_SHIFT 4
+#define SCFG_USBPWRFAULT_USB2_SHIFT 2
+#define SCFG_USBPWRFAULT_USB1_SHIFT 0
+
+#define SCFG_USB3PRM1CR_USB1 0x070
+#define SCFG_USB3PRM2CR_USB1 0x074
+#define SCFG_USB3PRM1CR_USB2 0x07C
+#define SCFG_USB3PRM2CR_USB2 0x080
+#define SCFG_USB3PRM1CR_USB3 0x088
+#define SCFG_USB3PRM2CR_USB3 0x08c
+#define SCFG_USB_TXVREFTUNE 0x9
+#define SCFG_USB_SQRXTUNE_MASK 0x7
+#define SCFG_USB_PCSTXSWINGFULL 0x47
+#define SCFG_USB_PHY1 0x084F0000
+#define SCFG_USB_PHY2 0x08500000
+#define SCFG_USB_PHY3 0x08510000
+#define SCFG_USB_PHY_RX_OVRD_IN_HI 0x200c
+#define USB_PHY_RX_EQ_VAL_1 0x0000
+#define USB_PHY_RX_EQ_VAL_2 0x0080
+#define USB_PHY_RX_EQ_VAL_3 0x0380
+#define USB_PHY_RX_EQ_VAL_4 0x0b80
+
+#define SCFG_SNPCNFGCR_SECRDSNP 0x80000000
+#define SCFG_SNPCNFGCR_SECWRSNP 0x40000000
+#define SCFG_SNPCNFGCR_SATARDSNP 0x00800000
+#define SCFG_SNPCNFGCR_SATAWRSNP 0x00400000
+
+/* RGMIIPCR bit definitions*/
+#define SCFG_RGMIIPCR_EN_AUTO BIT(3)
+#define SCFG_RGMIIPCR_SETSP_1000M BIT(2)
+#define SCFG_RGMIIPCR_SETSP_100M 0
+#define SCFG_RGMIIPCR_SETSP_10M BIT(1)
+#define SCFG_RGMIIPCR_SETFD BIT(0)
+
+/* PFEASBCR bit definitions */
+#define SCFG_PFEASBCR_ARCACHE0 BIT(31)
+#define SCFG_PFEASBCR_AWCACHE0 BIT(30)
+#define SCFG_PFEASBCR_ARCACHE1 BIT(29)
+#define SCFG_PFEASBCR_AWCACHE1 BIT(28)
+#define SCFG_PFEASBCR_ARSNP BIT(27)
+#define SCFG_PFEASBCR_AWSNP BIT(26)
+
+/* WR_QoS1 PFE bit definitions */
+#define SCFG_WR_QOS1_PFE1_QOS GENMASK(27, 24)
+#define SCFG_WR_QOS1_PFE2_QOS GENMASK(23, 20)
+
+/* RD_QoS1 PFE bit definitions */
+#define SCFG_RD_QOS1_PFE1_QOS GENMASK(27, 24)
+#define SCFG_RD_QOS1_PFE2_QOS GENMASK(23, 20)
+
+/* Supplemental Configuration Unit */
+struct ccsr_scfg {
+ u8 res_000[0x100-0x000];
+ u32 usb2_icid;
+ u32 usb3_icid;
+ u8 res_108[0x114-0x108];
+ u32 dma_icid;
+ u32 sata_icid;
+ u32 usb1_icid;
+ u32 qe_icid;
+ u32 sdhc_icid;
+ u32 edma_icid;
+ u32 etr_icid;
+ u32 core_sft_rst[4];
+ u8 res_140[0x158-0x140];
+ u32 altcbar;
+ u32 qspi_cfg;
+ u8 res_160[0x164 - 0x160];
+ u32 wr_qos1;
+ u32 wr_qos2;
+ u32 rd_qos1;
+ u32 rd_qos2;
+ u8 res_174[0x180 - 0x174];
+ u32 dmamcr;
+ u8 res_184[0x188-0x184];
+ u32 gic_align;
+ u32 debug_icid;
+ u8 res_190[0x1a4-0x190];
+ u32 snpcnfgcr;
+ u8 res_1a8[0x1ac-0x1a8];
+ u32 intpcr;
+ u8 res_1b0[0x204-0x1b0];
+ u32 coresrencr;
+ u8 res_208[0x220-0x208];
+ u32 rvbar0_0;
+ u32 rvbar0_1;
+ u32 rvbar1_0;
+ u32 rvbar1_1;
+ u32 rvbar2_0;
+ u32 rvbar2_1;
+ u32 rvbar3_0;
+ u32 rvbar3_1;
+ u32 lpmcsr;
+ u8 res_244[0x400-0x244];
+ u32 qspidqscr;
+ u32 ecgtxcmcr;
+ u32 sdhciovselcr;
+ u32 rcwpmuxcr0;
+ u32 usbdrvvbus_selcr;
+ u32 usbpwrfault_selcr;
+ u32 usb_refclk_selcr1;
+ u32 usb_refclk_selcr2;
+ u32 usb_refclk_selcr3;
+ u8 res_424[0x434 - 0x424];
+ u32 rgmiipcr;
+ u32 res_438;
+ u32 rgmiipsr;
+ u32 pfepfcssr1;
+ u32 pfeintencr1;
+ u32 pfepfcssr2;
+ u32 pfeintencr2;
+ u32 pfeerrcr;
+ u32 pfeeerrintencr;
+ u32 pfeasbcr;
+ u32 pfebsbcr;
+ u8 res_460[0x484 - 0x460];
+ u32 mdioselcr;
+ u8 res_468[0x600 - 0x488];
+ u32 scratchrw[4];
+ u8 res_610[0x680-0x610];
+ u32 corebcr;
+ u8 res_684[0x1000-0x684];
+ u32 pex1msiir;
+ u32 pex1msir;
+ u8 res_1008[0x2000-0x1008];
+ u32 pex2;
+ u32 pex2msir;
+ u8 res_2008[0x3000-0x2008];
+ u32 pex3msiir;
+ u32 pex3msir;
+};
+
+#endif /* __ARCH_FSL_LSCH2_IMMAP_H__*/
diff --git a/include/soc/fsl/qe.h b/include/soc/fsl/qe.h
new file mode 100644
index 0000000000..26afd92471
--- /dev/null
+++ b/include/soc/fsl/qe.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2006-2009 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ */
+
+#ifndef __QE_H__
+#define __QE_H__
+
+#define QE_NUM_OF_BRGS 16
+#define UCC_MAX_NUM 8
+
+#define QE_DATAONLY_BASE 0
+#define QE_DATAONLY_SIZE (QE_MURAM_SIZE - QE_DATAONLY_BASE)
+
+/* QE threads SNUM
+*/
+typedef enum qe_snum_state {
+ QE_SNUM_STATE_USED, /* used */
+ QE_SNUM_STATE_FREE /* free */
+} qe_snum_state_e;
+
+typedef struct qe_snum {
+ u8 num; /* snum */
+ qe_snum_state_e state; /* state */
+} qe_snum_t;
+
+/* QE RISC allocation
+*/
+#define QE_RISC_ALLOCATION_RISC1 0x1 /* RISC 1 */
+#define QE_RISC_ALLOCATION_RISC2 0x2 /* RISC 2 */
+#define QE_RISC_ALLOCATION_RISC3 0x4 /* RISC 3 */
+#define QE_RISC_ALLOCATION_RISC4 0x8 /* RISC 4 */
+#define QE_RISC_ALLOCATION_RISC1_AND_RISC2 (QE_RISC_ALLOCATION_RISC1 | \
+ QE_RISC_ALLOCATION_RISC2)
+#define QE_RISC_ALLOCATION_FOUR_RISCS (QE_RISC_ALLOCATION_RISC1 | \
+ QE_RISC_ALLOCATION_RISC2 | \
+ QE_RISC_ALLOCATION_RISC3 | \
+ QE_RISC_ALLOCATION_RISC4)
+
+/* QE CECR commands for UCC fast.
+*/
+#define QE_CR_FLG 0x00010000
+#define QE_RESET 0x80000000
+#define QE_INIT_TX_RX 0x00000000
+#define QE_INIT_RX 0x00000001
+#define QE_INIT_TX 0x00000002
+#define QE_ENTER_HUNT_MODE 0x00000003
+#define QE_STOP_TX 0x00000004
+#define QE_GRACEFUL_STOP_TX 0x00000005
+#define QE_RESTART_TX 0x00000006
+#define QE_SWITCH_COMMAND 0x00000007
+#define QE_SET_GROUP_ADDRESS 0x00000008
+#define QE_INSERT_CELL 0x00000009
+#define QE_ATM_TRANSMIT 0x0000000a
+#define QE_CELL_POOL_GET 0x0000000b
+#define QE_CELL_POOL_PUT 0x0000000c
+#define QE_IMA_HOST_CMD 0x0000000d
+#define QE_ATM_MULTI_THREAD_INIT 0x00000011
+#define QE_ASSIGN_PAGE 0x00000012
+#define QE_START_FLOW_CONTROL 0x00000014
+#define QE_STOP_FLOW_CONTROL 0x00000015
+#define QE_ASSIGN_PAGE_TO_DEVICE 0x00000016
+#define QE_GRACEFUL_STOP_RX 0x0000001a
+#define QE_RESTART_RX 0x0000001b
+
+/* QE CECR Sub Block Code - sub block code of QE command.
+*/
+#define QE_CR_SUBBLOCK_INVALID 0x00000000
+#define QE_CR_SUBBLOCK_USB 0x03200000
+#define QE_CR_SUBBLOCK_UCCFAST1 0x02000000
+#define QE_CR_SUBBLOCK_UCCFAST2 0x02200000
+#define QE_CR_SUBBLOCK_UCCFAST3 0x02400000
+#define QE_CR_SUBBLOCK_UCCFAST4 0x02600000
+#define QE_CR_SUBBLOCK_UCCFAST5 0x02800000
+#define QE_CR_SUBBLOCK_UCCFAST6 0x02a00000
+#define QE_CR_SUBBLOCK_UCCFAST7 0x02c00000
+#define QE_CR_SUBBLOCK_UCCFAST8 0x02e00000
+#define QE_CR_SUBBLOCK_UCCSLOW1 0x00000000
+#define QE_CR_SUBBLOCK_UCCSLOW2 0x00200000
+#define QE_CR_SUBBLOCK_UCCSLOW3 0x00400000
+#define QE_CR_SUBBLOCK_UCCSLOW4 0x00600000
+#define QE_CR_SUBBLOCK_UCCSLOW5 0x00800000
+#define QE_CR_SUBBLOCK_UCCSLOW6 0x00a00000
+#define QE_CR_SUBBLOCK_UCCSLOW7 0x00c00000
+#define QE_CR_SUBBLOCK_UCCSLOW8 0x00e00000
+#define QE_CR_SUBBLOCK_MCC1 0x03800000
+#define QE_CR_SUBBLOCK_MCC2 0x03a00000
+#define QE_CR_SUBBLOCK_MCC3 0x03000000
+#define QE_CR_SUBBLOCK_IDMA1 0x02800000
+#define QE_CR_SUBBLOCK_IDMA2 0x02a00000
+#define QE_CR_SUBBLOCK_IDMA3 0x02c00000
+#define QE_CR_SUBBLOCK_IDMA4 0x02e00000
+#define QE_CR_SUBBLOCK_HPAC 0x01e00000
+#define QE_CR_SUBBLOCK_SPI1 0x01400000
+#define QE_CR_SUBBLOCK_SPI2 0x01600000
+#define QE_CR_SUBBLOCK_RAND 0x01c00000
+#define QE_CR_SUBBLOCK_TIMER 0x01e00000
+#define QE_CR_SUBBLOCK_GENERAL 0x03c00000
+
+/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command.
+*/
+#define QE_CR_PROTOCOL_UNSPECIFIED 0x00 /* For all other protocols */
+#define QE_CR_PROTOCOL_HDLC_TRANSPARENT 0x00
+#define QE_CR_PROTOCOL_ATM_POS 0x0A
+#define QE_CR_PROTOCOL_ETHERNET 0x0C
+#define QE_CR_PROTOCOL_L2_SWITCH 0x0D
+#define QE_CR_PROTOCOL_SHIFT 6
+
+/* QE ASSIGN PAGE command
+*/
+#define QE_CR_ASSIGN_PAGE_SNUM_SHIFT 17
+
+/* Communication Direction.
+*/
+typedef enum comm_dir {
+ COMM_DIR_NONE = 0,
+ COMM_DIR_RX = 1,
+ COMM_DIR_TX = 2,
+ COMM_DIR_RX_AND_TX = 3
+} comm_dir_e;
+
+/* Clocks and BRG's
+*/
+typedef enum qe_clock {
+ QE_CLK_NONE = 0,
+ QE_BRG1, /* Baud Rate Generator 1 */
+ QE_BRG2, /* Baud Rate Generator 2 */
+ QE_BRG3, /* Baud Rate Generator 3 */
+ QE_BRG4, /* Baud Rate Generator 4 */
+ QE_BRG5, /* Baud Rate Generator 5 */
+ QE_BRG6, /* Baud Rate Generator 6 */
+ QE_BRG7, /* Baud Rate Generator 7 */
+ QE_BRG8, /* Baud Rate Generator 8 */
+ QE_BRG9, /* Baud Rate Generator 9 */
+ QE_BRG10, /* Baud Rate Generator 10 */
+ QE_BRG11, /* Baud Rate Generator 11 */
+ QE_BRG12, /* Baud Rate Generator 12 */
+ QE_BRG13, /* Baud Rate Generator 13 */
+ QE_BRG14, /* Baud Rate Generator 14 */
+ QE_BRG15, /* Baud Rate Generator 15 */
+ QE_BRG16, /* Baud Rate Generator 16 */
+ QE_CLK1, /* Clock 1 */
+ QE_CLK2, /* Clock 2 */
+ QE_CLK3, /* Clock 3 */
+ QE_CLK4, /* Clock 4 */
+ QE_CLK5, /* Clock 5 */
+ QE_CLK6, /* Clock 6 */
+ QE_CLK7, /* Clock 7 */
+ QE_CLK8, /* Clock 8 */
+ QE_CLK9, /* Clock 9 */
+ QE_CLK10, /* Clock 10 */
+ QE_CLK11, /* Clock 11 */
+ QE_CLK12, /* Clock 12 */
+ QE_CLK13, /* Clock 13 */
+ QE_CLK14, /* Clock 14 */
+ QE_CLK15, /* Clock 15 */
+ QE_CLK16, /* Clock 16 */
+ QE_CLK17, /* Clock 17 */
+ QE_CLK18, /* Clock 18 */
+ QE_CLK19, /* Clock 19 */
+ QE_CLK20, /* Clock 20 */
+ QE_CLK21, /* Clock 21 */
+ QE_CLK22, /* Clock 22 */
+ QE_CLK23, /* Clock 23 */
+ QE_CLK24, /* Clock 24 */
+ QE_CLK_DUMMY
+} qe_clock_e;
+
+/* QE CMXGCR register
+*/
+#define QE_CMXGCR_MII_ENET_MNG_MASK 0x00007000
+#define QE_CMXGCR_MII_ENET_MNG_SHIFT 12
+
+/* QE CMXUCR registers
+ */
+#define QE_CMXUCR_TX_CLK_SRC_MASK 0x0000000F
+
+/* QE BRG configuration register
+*/
+#define QE_BRGC_ENABLE 0x00010000
+#define QE_BRGC_DIVISOR_SHIFT 1
+#define QE_BRGC_DIVISOR_MAX 0xFFF
+#define QE_BRGC_DIV16 1
+
+/* QE SDMA registers
+*/
+#define QE_SDSR_BER1 0x02000000
+#define QE_SDSR_BER2 0x01000000
+
+#define QE_SDMR_GLB_1_MSK 0x80000000
+#define QE_SDMR_ADR_SEL 0x20000000
+#define QE_SDMR_BER1_MSK 0x02000000
+#define QE_SDMR_BER2_MSK 0x01000000
+#define QE_SDMR_EB1_MSK 0x00800000
+#define QE_SDMR_ER1_MSK 0x00080000
+#define QE_SDMR_ER2_MSK 0x00040000
+#define QE_SDMR_CEN_MASK 0x0000E000
+#define QE_SDMR_SBER_1 0x00000200
+#define QE_SDMR_SBER_2 0x00000200
+#define QE_SDMR_EB1_PR_MASK 0x000000C0
+#define QE_SDMR_ER1_PR 0x00000008
+
+#define QE_SDMR_CEN_SHIFT 13
+#define QE_SDMR_EB1_PR_SHIFT 6
+
+#define QE_SDTM_MSNUM_SHIFT 24
+
+#define QE_SDEBCR_BA_MASK 0x01FFFFFF
+
+/* Communication Processor */
+#define QE_CP_CERCR_MEE 0x8000 /* Multi-user RAM ECC enable */
+#define QE_CP_CERCR_IEE 0x4000 /* Instruction RAM ECC enable */
+#define QE_CP_CERCR_CIR 0x0800 /* Common instruction RAM */
+
+/* I-RAM */
+#define QE_IRAM_IADD_AIE 0x80000000 /* Auto Increment Enable */
+#define QE_IRAM_IADD_BADDR 0x00080000 /* Base Address */
+#define QE_IRAM_READY 0x80000000
+
+/* Structure that defines QE firmware binary files.
+ *
+ * See doc/README.qe_firmware for a description of these fields.
+ */
+struct qe_firmware {
+ struct qe_header {
+ u32 length; /* Length of the entire structure, in bytes */
+ u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */
+ u8 version; /* Version of this layout. First ver is '1' */
+ } header;
+ u8 id[62]; /* Null-terminated identifier string */
+ u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */
+ u8 count; /* Number of microcode[] structures */
+ struct {
+ u16 model; /* The SOC model */
+ u8 major; /* The SOC revision major */
+ u8 minor; /* The SOC revision minor */
+ } __attribute__ ((packed)) soc;
+ u8 padding[4]; /* Reserved, for alignment */
+ u64 extended_modes; /* Extended modes */
+ u32 vtraps[8]; /* Virtual trap addresses */
+ u8 reserved[4]; /* Reserved, for future expansion */
+ struct qe_microcode {
+ u8 id[32]; /* Null-terminated identifier */
+ u32 traps[16]; /* Trap addresses, 0 == ignore */
+ u32 eccr; /* The value for the ECCR register */
+ u32 iram_offset;/* Offset into I-RAM for the code */
+ u32 count; /* Number of 32-bit words of the code */
+ u32 code_offset;/* Offset of the actual microcode */
+ u8 major; /* The microcode version major */
+ u8 minor; /* The microcode version minor */
+ u8 revision; /* The microcode version revision */
+ u8 padding; /* Reserved, for alignment */
+ u8 reserved[4]; /* Reserved, for future expansion */
+ } __attribute__ ((packed)) microcode[1];
+ /* All microcode binaries should be located here */
+ /* CRC32 should be located here, after the microcode binaries */
+} __attribute__ ((packed));
+
+int qe_validate_firmware(const struct qe_firmware *firmware, int size);
+
+#endif /* __QE_H__ */
diff --git a/lib/Kconfig b/lib/Kconfig
index b0839e6c6e..150d63b1ba 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -93,6 +93,10 @@ config IMAGE_SPARSE
config STMP_DEVICE
bool
+config FSL_QE_FIRMWARE
+ select CRC32
+ bool
+
config RATP
select CRC_ITU_T
bool "RATP protocol support"
diff --git a/lib/Makefile b/lib/Makefile
index 763516d41a..8dabf4ae77 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -68,6 +68,7 @@ obj-y += clz_ctz.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC8) += crc8.o
obj-$(CONFIG_NLS) += nls_base.o
+obj-$(CONFIG_FSL_QE_FIRMWARE) += fsl-qe-firmware.o
# GCC library routines
obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
diff --git a/lib/fsl-qe-firmware.c b/lib/fsl-qe-firmware.c
new file mode 100644
index 0000000000..960703f561
--- /dev/null
+++ b/lib/fsl-qe-firmware.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <crc.h>
+#include <soc/fsl/qe.h>
+
+/*
+ * qe_validate_firmware - Validate a QE firmware
+ *
+ * This function checks a QE firmware for validity. It checks the crc
+ * and consistency of various fields. Returns 0 for success or a negative
+ * error code otherwise.
+ */
+int qe_validate_firmware(const struct qe_firmware *firmware, int size)
+{
+ u32 crc;
+ size_t calc_size;
+ int i;
+ const struct qe_header *hdr = &firmware->header;
+
+ if (size < sizeof(*firmware)) {
+ pr_err("firmware is too small\n");
+ return -EINVAL;
+ }
+
+ if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+ (hdr->magic[2] != 'F')) {
+ pr_err("Data at %p is not a QE firmware\n", firmware);
+ return -EPERM;
+ }
+
+ if (size != be32_to_cpu(hdr->length)) {
+ pr_err("Firmware size doesn't match size in header\n");
+ return -EINVAL;
+ }
+
+ if (hdr->version != 1) {
+ pr_err("Unsupported firmware version %u\n", hdr->version);
+ return -EPERM;
+ }
+
+ calc_size = sizeof(*firmware) +
+ (firmware->count - 1) * sizeof(struct qe_microcode);
+
+ if (size < calc_size)
+ return -EINVAL;
+
+ for (i = 0; i < firmware->count; i++)
+ calc_size += sizeof(u32) *
+ be32_to_cpu(firmware->microcode[i].count);
+
+ if (size != calc_size + sizeof(u32)) {
+ pr_err("Invalid length in firmware header\n");
+ return -EPERM;
+ }
+
+ crc = be32_to_cpu(*(u32 *)((void *)firmware + calc_size));
+ if (crc != (crc32_no_comp(0, (const void *)firmware, calc_size))) {
+ pr_err("Firmware CRC is invalid\n");
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/scripts/Makefile b/scripts/Makefile
index ff0a3c2a07..712cf99217 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -20,6 +20,7 @@ hostprogs-$(CONFIG_ARCH_DAVINCI) += mkublheader
hostprogs-$(CONFIG_ARCH_ZYNQ) += zynq_mkimage
hostprogs-$(CONFIG_ARCH_SOCFPGA) += socfpga_mkimage
hostprogs-$(CONFIG_MXS_HOSTTOOLS)+= mxsimage mxsboot
+hostprogs-$(CONFIG_ARCH_LAYERSCAPE) += pblimage
HOSTCFLAGS += -I$(srctree)/scripts/include/
HOSTLDLIBS_mxsimage = `pkg-config --libs openssl`
HOSTCFLAGS_omap3-usb-loader.o = `pkg-config --cflags libusb-1.0`
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index def9248841..8c07a54d05 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -433,14 +433,24 @@ cmd_disasm = $(OBJDUMP) -d $< > $@
quiet_cmd_ln = LN $@
cmd_ln = ln -sf $< $@
+quiet_cmd_check_size = CHKSIZE $2
+ cmd_check_size = set -e; \
+ size=`printf "%d" $2`; \
+ max_size=`printf "%d" $3`; \
+ if [ $$size -gt $$max_size ] ; \
+ then \
+ echo "$@ size $$size > maximum size $$max_size" >&2; \
+ exit 1 ; \
+ fi;
+
# Check size of a file
-quiet_cmd_check_file_size = CHKSIZE $2
+quiet_cmd_check_file_size = CHKFILESIZE $2
cmd_check_file_size = set -e; \
size=`stat -c%s $2`; \
max_size=`printf "%d" $3`; \
if [ $$size -gt $$max_size ] ; \
then \
- echo "$@ size $$size > of the maximum size $$max_size" >&2; \
+ echo "$@ size $$size > maximum size $$max_size" >&2; \
exit 1 ; \
fi;
diff --git a/scripts/check_size b/scripts/check_size
new file mode 100755
index 0000000000..54f02a63c5
--- /dev/null
+++ b/scripts/check_size
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+symbol="$1"
+file="$2"
+max="$3"
+
+# extract symbol offset from file, remove leading zeros
+ofs=$(nm -t d $file | grep "$symbol" | cut -d ' ' -f1 | sed "s/^[0]*//")
+
+if [ -z "${ofs}" ]; then
+ echo "symbol $symbol not found in $file"
+ exit 1
+fi
+
+if [[ $ofs -gt $max ]]; then
+ echo "symbol $symbol (offset $ofs) in $file exceeds maximum offset $max"
+ exit 1
+fi
+
+exit 0
diff --git a/scripts/extract_symbol_offset b/scripts/extract_symbol_offset
new file mode 100755
index 0000000000..1a1260f526
--- /dev/null
+++ b/scripts/extract_symbol_offset
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+symbol="$1"
+file="$2"
+
+# extract symbol offset from file, remove leading zeros
+ofs=$(nm -t d $file | grep "$symbol" | cut -d ' ' -f1 | sed "s/^[0]*//")
+
+if [ -z "${ofs}" ]; then
+ echo "symbol $symbol not found in $file"
+ exit 1
+fi
+
+echo $ofs
diff --git a/scripts/pblimage.c b/scripts/pblimage.c
new file mode 100644
index 0000000000..6e83c523e5
--- /dev/null
+++ b/scripts/pblimage.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012-2014 Freescale Semiconductor, Inc.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdint.h>
+#include <getopt.h>
+#include <endian.h>
+
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define PBL_ACS_CONT_CMD 0x81000000
+#define PBL_ADDR_24BIT_MASK 0x00ffffff
+
+#define RCW_BYTES 64
+#define RCW_PREAMBLE 0xaa55aa55
+#define RCW_HEADER 0x010e0100
+
+/*
+ * The maximum PBL size we support. The PBL portion in the input image shouldn't
+ * get bigger than this
+ */
+#define MAX_PBL_SIZE (64 * 1024)
+
+/*
+ * The offset of the 2nd stage image in the output file. This must match with the
+ * offset barebox expects the 2nd stage image.
+ */
+#define BAREBOX_START (128 * 1024)
+
+/*
+ * Initialize to an invalid value.
+ */
+static uint32_t next_pbl_cmd = 0x82000000;
+/*
+ * need to store all bytes in memory for calculating crc32, then write the
+ * bytes to image file for PBL boot.
+ */
+static unsigned char mem_buf[1000000];
+static unsigned char *pmem_buf = mem_buf;
+static int pbl_size;
+static int pbl_end;
+static int image_size;
+static int out_fd;
+static int in_fd;
+
+static uint32_t pbl_cmd_initaddr;
+static uint32_t pbi_crc_cmd1;
+static uint32_t pbi_crc_cmd2;
+static uint32_t pbl_end_cmd[4];
+
+enum arch {
+ ARCH_ARM,
+ ARCH_POWERPC,
+};
+
+enum arch architecture;
+static char *rcwfile;
+static char *pbifile;
+static char *outfile;
+static unsigned long loadaddr = 0x10000000;
+static char *infile;
+
+static uint32_t crc_table[256];
+static int crc_table_valid;
+
+static void make_crc_table(void)
+{
+ uint32_t mask;
+ int i, j;
+ uint32_t poly; /* polynomial exclusive-or pattern */
+
+ if (crc_table_valid)
+ return;
+
+ /*
+ * the polynomial used by PBL is 1 + x1 + x2 + x4 + x5 + x7 + x8 + x10
+ * + x11 + x12 + x16 + x22 + x23 + x26 + x32.
+ */
+ poly = 0x04c11db7;
+
+ for (i = 0; i < 256; i++) {
+ mask = i << 24;
+ for (j = 0; j < 8; j++) {
+ if (mask & 0x80000000)
+ mask = (mask << 1) ^ poly;
+ else
+ mask <<= 1;
+ }
+ crc_table[i] = mask;
+ }
+
+ crc_table_valid = 1;
+}
+
+uint32_t pbl_crc32(uint32_t in_crc, const char *buf, uint32_t len)
+{
+ uint32_t crc32_val;
+ int i;
+
+ make_crc_table();
+
+ crc32_val = ~in_crc;
+
+ for (i = 0; i < len; i++)
+ crc32_val = (crc32_val << 8) ^
+ crc_table[(crc32_val >> 24) ^ (*buf++ & 0xff)];
+
+ return crc32_val;
+}
+
+/*
+ * The PBL can load up to 64 bytes at a time, so we split the image into 64 byte
+ * chunks. PBL needs a command for each piece, of the form "81xxxxxx", where
+ * "xxxxxx" is the offset. Calculate the start offset by subtracting the size of
+ * the image from the top of the allowable 24-bit range.
+ */
+static void generate_pbl_cmd(void)
+{
+ uint32_t val = next_pbl_cmd;
+ next_pbl_cmd += 0x40;
+ int i;
+
+ for (i = 3; i >= 0; i--) {
+ *pmem_buf++ = (val >> (i * 8)) & 0xff;
+ pbl_size++;
+ }
+}
+
+static void pbl_fget(size_t size, int fd)
+{
+ int r;
+
+ r = read(fd, pmem_buf, size);
+ if (r < 0) {
+ perror("read");
+ exit(EXIT_FAILURE);
+ }
+
+ pmem_buf += r;
+
+ if (r < size) {
+ memset(pmem_buf, 0xff, size - r);
+ pmem_buf += size - r;
+ }
+
+ pbl_size += size;
+}
+
+static void check_get_hexval(const char *filename, int lineno, char *token)
+{
+ uint32_t hexval;
+ int i;
+
+ if (!sscanf(token, "%x", &hexval)) {
+ fprintf(stderr, "Error:%s[%d] - Invalid hex data(%s)\n",
+ filename, lineno, token);
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 3; i >= 0; i--) {
+ *pmem_buf++ = (hexval >> (i * 8)) & 0xff;
+ pbl_size++;
+ }
+}
+
+static void pbl_parser(char *name)
+{
+ FILE *f = NULL;
+ char *line = NULL;
+ char *token, *saveptr1, *saveptr2;
+ size_t len = 0;
+ int lineno = 0;
+
+ f = fopen(name, "r");
+ if (!f) {
+ fprintf(stderr, "Error: Cannot open %s: %s\n", name,
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ while ((getline(&line, &len, f)) > 0) {
+ lineno++;
+ token = strtok_r(line, "\r\n", &saveptr1);
+ /* drop all lines with zero tokens (= empty lines) */
+ if (!token)
+ continue;
+ for (line = token;; line = NULL) {
+ token = strtok_r(line, " \t", &saveptr2);
+ if (!token)
+ break;
+ /* Drop all text starting with '#' as comments */
+ if (token[0] == '#')
+ break;
+ check_get_hexval(name, lineno, token);
+ }
+ }
+ if (line)
+ free(line);
+ fclose(f);
+}
+
+/* write end command and crc command to memory. */
+static void add_end_cmd(void)
+{
+ uint32_t crc32_pbl;
+ int i;
+ unsigned char *p = (unsigned char *)&pbl_end_cmd;
+
+ for (i = 0; i < 4; i++)
+ pbl_end_cmd[i] = htobe32(pbl_end_cmd[i]);
+
+ for (i = 0; i < 16; i++) {
+ *pmem_buf++ = *p++;
+ pbl_size++;
+ }
+
+ /* Add PBI CRC command. */
+ *pmem_buf++ = 0x08;
+ *pmem_buf++ = pbi_crc_cmd1;
+ *pmem_buf++ = pbi_crc_cmd2;
+ *pmem_buf++ = 0x40;
+ pbl_size += 4;
+
+ /* calculated CRC32 and write it to memory. */
+ crc32_pbl = pbl_crc32(0, (const char *)mem_buf, pbl_size);
+ *pmem_buf++ = (crc32_pbl >> 24) & 0xff;
+ *pmem_buf++ = (crc32_pbl >> 16) & 0xff;
+ *pmem_buf++ = (crc32_pbl >> 8) & 0xff;
+ *pmem_buf++ = (crc32_pbl) & 0xff;
+ pbl_size += 4;
+}
+
+static void pbl_load_image(void)
+{
+ int size;
+
+ /* parse the rcw.cfg file. */
+ pbl_parser(rcwfile);
+
+ /* parse the pbi.cfg file. */
+ if (pbifile)
+ pbl_parser(pbifile);
+
+ next_pbl_cmd = pbl_cmd_initaddr - image_size;
+ while (next_pbl_cmd < pbl_cmd_initaddr) {
+ generate_pbl_cmd();
+ pbl_fget(64, in_fd);
+ }
+
+ add_end_cmd();
+
+ size = pbl_size;
+
+ if (write(out_fd, (const void *)&mem_buf, size) != size) {
+ fprintf(stderr, "Write error on %s: %s\n",
+ outfile, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+}
+
+static int pblimage_check_params(void)
+{
+ struct stat st;
+
+ in_fd = open(infile, O_RDONLY);
+ if (in_fd < 0) {
+ fprintf(stderr, "Error: Cannot open %s: %s\n", infile,
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if (fstat(in_fd, &st) == -1) {
+ fprintf(stderr, "Error: Could not determine u-boot image size. %s\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ /* For the variable size, pad it to 64 byte boundary */
+ image_size = roundup(pbl_end, 64);
+
+ if (image_size > MAX_PBL_SIZE) {
+ fprintf(stderr, "Error: pbl size %d in %s exceeds maximum size %d\n",
+ pbl_end, infile, MAX_PBL_SIZE);
+ exit(EXIT_FAILURE);
+ }
+
+ if (architecture == ARCH_ARM) {
+ pbi_crc_cmd1 = 0x61;
+ pbi_crc_cmd2 = 0;
+ pbl_cmd_initaddr = loadaddr & PBL_ADDR_24BIT_MASK;
+ pbl_cmd_initaddr |= PBL_ACS_CONT_CMD;
+ pbl_cmd_initaddr += image_size;
+ pbl_end_cmd[0] = 0x09610000;
+ pbl_end_cmd[1] = 0x00000000;
+ pbl_end_cmd[2] = 0x096100c0;
+ pbl_end_cmd[3] = 0x00000000;
+ } else {
+ pbi_crc_cmd1 = 0x13;
+ pbi_crc_cmd2 = 0x80;
+ pbl_cmd_initaddr = 0x82000000;
+ pbl_end_cmd[0] = 0x091380c0;
+ pbl_end_cmd[1] = 0x00000000;
+ pbl_end_cmd[2] = 0x091380c0;
+ pbl_end_cmd[3] = 0x00000000;
+ }
+
+ next_pbl_cmd = pbl_cmd_initaddr;
+ return 0;
+};
+
+static int copy_fd(int in, int out)
+{
+ int bs = 4096;
+ void *buf = malloc(bs);
+
+ if (!buf)
+ exit(EXIT_FAILURE);
+
+ while (1) {
+ int now, wr;
+
+ now = read(in, buf, bs);
+ if (now < 0) {
+ fprintf(stderr, "read failed with %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if (!now)
+ break;
+
+ wr = write(out, buf, now);
+ if (wr < 0) {
+ fprintf(stderr, "write failed with %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if (wr != now) {
+ fprintf(stderr, "short write\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ free(buf);
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int opt, ret;
+ off_t pos;
+
+ while ((opt = getopt(argc, argv, "i:r:p:o:m:")) != -1) {
+ switch (opt) {
+ case 'i':
+ infile = optarg;
+ break;
+ case 'r':
+ rcwfile = optarg;
+ break;
+ case 'p':
+ pbifile = optarg;
+ break;
+ case 'o':
+ outfile = optarg;
+ break;
+ case 'm':
+ pbl_end = atoi(optarg);
+ break;
+ default:
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (!infile) {
+ fprintf(stderr, "No infile given\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!outfile) {
+ fprintf(stderr, "No outfile given\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!rcwfile) {
+ fprintf(stderr, "No rcwfile given\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pblimage_check_params();
+
+ out_fd = open(outfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (out_fd < 0) {
+ fprintf(stderr, "Cannot open %s for writing: %s\n",
+ outfile, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ pbl_load_image();
+
+ ret = ftruncate(out_fd, BAREBOX_START);
+ if (ret) {
+ fprintf(stderr, "Cannot truncate\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pos = lseek(out_fd, BAREBOX_START, SEEK_SET);
+ if (pos == (off_t)-1) {
+ fprintf(stderr, "Cannot lseek 1\n");
+ exit(EXIT_FAILURE);
+ }
+
+ pos = lseek(in_fd, 0, SEEK_SET);
+ if (pos == (off_t)-1) {
+ fprintf(stderr, "Cannot lseek 2\n");
+ exit(EXIT_FAILURE);
+ }
+
+ copy_fd(in_fd, out_fd);
+
+ close(in_fd);
+ close(out_fd);
+
+ exit(EXIT_SUCCESS);
+}