From 70a86eb2fd8247904818e57776b4570b470eeabe Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Mon, 3 Apr 2017 12:55:17 +0200 Subject: ARM: socfpga: rename socfpga->cyclone5 Prepare the SoCFPGA code base for different system types (Arria10, Stratix10,...). Signed-off-by: Steffen Trumtrar Signed-off-by: Sascha Hauer --- arch/arm/boards/altera-socdk/board.c | 2 +- .../boards/altera-socdk/iocsr_config_cyclone5.c | 2 +- arch/arm/boards/altera-socdk/lowlevel.c | 4 +- arch/arm/boards/ebv-socrates/board.c | 2 +- .../boards/ebv-socrates/iocsr_config_cyclone5.c | 2 +- arch/arm/boards/ebv-socrates/lowlevel.c | 4 +- arch/arm/boards/terasic-de0-nano-soc/board.c | 2 +- .../terasic-de0-nano-soc/iocsr_config_cyclone5.c | 2 +- arch/arm/boards/terasic-de0-nano-soc/lowlevel.c | 4 +- arch/arm/boards/terasic-sockit/board.c | 1 - .../boards/terasic-sockit/iocsr_config_cyclone5.c | 2 +- arch/arm/boards/terasic-sockit/lowlevel.c | 4 +- arch/arm/mach-socfpga/Makefile | 6 +- arch/arm/mach-socfpga/bootsource.c | 57 - arch/arm/mach-socfpga/clock-manager.c | 298 -- arch/arm/mach-socfpga/cyclone5-bootsource.c | 57 + arch/arm/mach-socfpga/cyclone5-clock-manager.c | 298 ++ arch/arm/mach-socfpga/cyclone5-freeze-controller.c | 218 + arch/arm/mach-socfpga/cyclone5-generic.c | 210 + arch/arm/mach-socfpga/cyclone5-init.c | 58 + arch/arm/mach-socfpga/cyclone5-reset-manager.c | 61 + arch/arm/mach-socfpga/cyclone5-scan-manager.c | 220 + arch/arm/mach-socfpga/cyclone5-system-manager.c | 33 + arch/arm/mach-socfpga/freeze-controller.c | 218 - arch/arm/mach-socfpga/include/mach/clock-manager.h | 200 - .../include/mach/cyclone5-clock-manager.h | 200 + .../include/mach/cyclone5-freeze-controller.h | 85 + arch/arm/mach-socfpga/include/mach/cyclone5-regs.h | 22 + .../include/mach/cyclone5-reset-manager.h | 93 + .../include/mach/cyclone5-scan-manager.h | 131 + .../include/mach/cyclone5-sdram-config.h | 161 + .../arm/mach-socfpga/include/mach/cyclone5-sdram.h | 399 ++ .../mach-socfpga/include/mach/cyclone5-sequencer.c | 5241 +++++++++++++++++++ .../mach-socfpga/include/mach/cyclone5-sequencer.h | 447 ++ .../include/mach/cyclone5-system-manager.h | 68 + .../mach-socfpga/include/mach/freeze-controller.h | 85 - arch/arm/mach-socfpga/include/mach/pll_config.h | 2 +- arch/arm/mach-socfpga/include/mach/reset-manager.h | 93 - arch/arm/mach-socfpga/include/mach/scan-manager.h | 131 - arch/arm/mach-socfpga/include/mach/sdram.h | 399 -- arch/arm/mach-socfpga/include/mach/sdram_config.h | 161 - arch/arm/mach-socfpga/include/mach/sdram_io.h | 2 +- arch/arm/mach-socfpga/include/mach/sequencer.c | 5243 -------------------- arch/arm/mach-socfpga/include/mach/sequencer.h | 447 -- .../mach-socfpga/include/mach/sequencer_defines.h | 6 - arch/arm/mach-socfpga/include/mach/socfpga-regs.h | 22 - .../arm/mach-socfpga/include/mach/system-manager.h | 68 - arch/arm/mach-socfpga/include/mach/system.h | 0 arch/arm/mach-socfpga/include/mach/tclrpt.h | 2 +- arch/arm/mach-socfpga/init.c | 58 - arch/arm/mach-socfpga/nic301.c | 2 +- arch/arm/mach-socfpga/reset-manager.c | 61 - arch/arm/mach-socfpga/scan-manager.c | 220 - arch/arm/mach-socfpga/system-manager.c | 33 - arch/arm/mach-socfpga/xload.c | 4 +- drivers/firmware/socfpga.c | 8 +- scripts/socfpga_import_preloader | 2 +- 57 files changed, 8031 insertions(+), 7830 deletions(-) delete mode 100644 arch/arm/mach-socfpga/bootsource.c delete mode 100644 arch/arm/mach-socfpga/clock-manager.c create mode 100644 arch/arm/mach-socfpga/cyclone5-bootsource.c create mode 100644 arch/arm/mach-socfpga/cyclone5-clock-manager.c create mode 100644 arch/arm/mach-socfpga/cyclone5-freeze-controller.c create mode 100644 arch/arm/mach-socfpga/cyclone5-generic.c create mode 100644 arch/arm/mach-socfpga/cyclone5-init.c create mode 100644 arch/arm/mach-socfpga/cyclone5-reset-manager.c create mode 100644 arch/arm/mach-socfpga/cyclone5-scan-manager.c create mode 100644 arch/arm/mach-socfpga/cyclone5-system-manager.c delete mode 100644 arch/arm/mach-socfpga/freeze-controller.c delete mode 100644 arch/arm/mach-socfpga/include/mach/clock-manager.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-clock-manager.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-freeze-controller.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-regs.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-reset-manager.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-scan-manager.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-sdram-config.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-sdram.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-sequencer.c create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-sequencer.h create mode 100644 arch/arm/mach-socfpga/include/mach/cyclone5-system-manager.h delete mode 100644 arch/arm/mach-socfpga/include/mach/freeze-controller.h delete mode 100644 arch/arm/mach-socfpga/include/mach/reset-manager.h delete mode 100644 arch/arm/mach-socfpga/include/mach/scan-manager.h delete mode 100644 arch/arm/mach-socfpga/include/mach/sdram.h delete mode 100644 arch/arm/mach-socfpga/include/mach/sdram_config.h mode change 100755 => 100644 arch/arm/mach-socfpga/include/mach/sdram_io.h delete mode 100644 arch/arm/mach-socfpga/include/mach/sequencer.c delete mode 100644 arch/arm/mach-socfpga/include/mach/sequencer.h delete mode 100644 arch/arm/mach-socfpga/include/mach/sequencer_defines.h delete mode 100644 arch/arm/mach-socfpga/include/mach/socfpga-regs.h delete mode 100644 arch/arm/mach-socfpga/include/mach/system-manager.h mode change 100755 => 100644 arch/arm/mach-socfpga/include/mach/system.h mode change 100755 => 100644 arch/arm/mach-socfpga/include/mach/tclrpt.h delete mode 100644 arch/arm/mach-socfpga/init.c delete mode 100644 arch/arm/mach-socfpga/reset-manager.c delete mode 100644 arch/arm/mach-socfpga/scan-manager.c delete mode 100644 arch/arm/mach-socfpga/system-manager.c diff --git a/arch/arm/boards/altera-socdk/board.c b/arch/arm/boards/altera-socdk/board.c index d7fb923a04..f4b1dcd324 100644 --- a/arch/arm/boards/altera-socdk/board.c +++ b/arch/arm/boards/altera-socdk/board.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include static int ksz9021rn_phy_fixup(struct phy_device *dev) { diff --git a/arch/arm/boards/altera-socdk/iocsr_config_cyclone5.c b/arch/arm/boards/altera-socdk/iocsr_config_cyclone5.c index 07a4485f1f..9777d15dfe 100644 --- a/arch/arm/boards/altera-socdk/iocsr_config_cyclone5.c +++ b/arch/arm/boards/altera-socdk/iocsr_config_cyclone5.c @@ -27,7 +27,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include +#include static const unsigned long iocsr_scan_chain0_table[((CONFIG_HPS_IOCSR_SCANCHAIN0_LENGTH / 32) + 1)] = { diff --git a/arch/arm/boards/altera-socdk/lowlevel.c b/arch/arm/boards/altera-socdk/lowlevel.c index 02c995fe45..8cfe839159 100644 --- a/arch/arm/boards/altera-socdk/lowlevel.c +++ b/arch/arm/boards/altera-socdk/lowlevel.c @@ -7,13 +7,13 @@ #include #include #include "sdram_config.h" -#include +#include #include "pinmux_config.c" #include "pll_config.h" #include #include "sequencer_defines.h" #include "sequencer_auto.h" -#include +#include #include "sequencer_auto_inst_init.c" #include "sequencer_auto_ac_init.c" #include "iocsr_config_cyclone5.c" diff --git a/arch/arm/boards/ebv-socrates/board.c b/arch/arm/boards/ebv-socrates/board.c index f3207b88ef..965150f9a3 100644 --- a/arch/arm/boards/ebv-socrates/board.c +++ b/arch/arm/boards/ebv-socrates/board.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include static int phy_fixup(struct phy_device *dev) { diff --git a/arch/arm/boards/ebv-socrates/iocsr_config_cyclone5.c b/arch/arm/boards/ebv-socrates/iocsr_config_cyclone5.c index ab6733f92b..9a814cba79 100644 --- a/arch/arm/boards/ebv-socrates/iocsr_config_cyclone5.c +++ b/arch/arm/boards/ebv-socrates/iocsr_config_cyclone5.c @@ -27,7 +27,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include +#include static const unsigned long iocsr_scan_chain0_table[((CONFIG_HPS_IOCSR_SCANCHAIN0_LENGTH / 32) + 1)] = { 0x00000000, diff --git a/arch/arm/boards/ebv-socrates/lowlevel.c b/arch/arm/boards/ebv-socrates/lowlevel.c index ea4e1d746a..9643269f8e 100644 --- a/arch/arm/boards/ebv-socrates/lowlevel.c +++ b/arch/arm/boards/ebv-socrates/lowlevel.c @@ -7,13 +7,13 @@ #include #include #include "sdram_config.h" -#include +#include #include "pinmux_config.c" #include "pll_config.h" #include #include "sequencer_defines.h" #include "sequencer_auto.h" -#include +#include #include "sequencer_auto_inst_init.c" #include "sequencer_auto_ac_init.c" #include "iocsr_config_cyclone5.c" diff --git a/arch/arm/boards/terasic-de0-nano-soc/board.c b/arch/arm/boards/terasic-de0-nano-soc/board.c index 919bfc8c54..8e69319d17 100644 --- a/arch/arm/boards/terasic-de0-nano-soc/board.c +++ b/arch/arm/boards/terasic-de0-nano-soc/board.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include static int phy_fixup(struct phy_device *dev) { diff --git a/arch/arm/boards/terasic-de0-nano-soc/iocsr_config_cyclone5.c b/arch/arm/boards/terasic-de0-nano-soc/iocsr_config_cyclone5.c index 4e9ac7fb77..d5098055ff 100644 --- a/arch/arm/boards/terasic-de0-nano-soc/iocsr_config_cyclone5.c +++ b/arch/arm/boards/terasic-de0-nano-soc/iocsr_config_cyclone5.c @@ -27,7 +27,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include +#include static const unsigned long iocsr_scan_chain0_table[((CONFIG_HPS_IOCSR_SCANCHAIN0_LENGTH / 32) + 1)] = { 0x00000000, diff --git a/arch/arm/boards/terasic-de0-nano-soc/lowlevel.c b/arch/arm/boards/terasic-de0-nano-soc/lowlevel.c index 6d937abda5..1d5ea6b12a 100644 --- a/arch/arm/boards/terasic-de0-nano-soc/lowlevel.c +++ b/arch/arm/boards/terasic-de0-nano-soc/lowlevel.c @@ -7,13 +7,13 @@ #include #include #include "sdram_config.h" -#include +#include #include "pinmux_config.c" #include "pll_config.h" #include #include "sequencer_defines.h" #include "sequencer_auto.h" -#include +#include #include "sequencer_auto_inst_init.c" #include "sequencer_auto_ac_init.c" #include "iocsr_config_cyclone5.c" diff --git a/arch/arm/boards/terasic-sockit/board.c b/arch/arm/boards/terasic-sockit/board.c index 53cd36834f..ec68315998 100644 --- a/arch/arm/boards/terasic-sockit/board.c +++ b/arch/arm/boards/terasic-sockit/board.c @@ -8,7 +8,6 @@ #include #include #include -#include static int phy_fixup(struct phy_device *dev) { diff --git a/arch/arm/boards/terasic-sockit/iocsr_config_cyclone5.c b/arch/arm/boards/terasic-sockit/iocsr_config_cyclone5.c index 117d7f4ebc..9367b0d110 100644 --- a/arch/arm/boards/terasic-sockit/iocsr_config_cyclone5.c +++ b/arch/arm/boards/terasic-sockit/iocsr_config_cyclone5.c @@ -27,7 +27,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include +#include static const unsigned long iocsr_scan_chain0_table[((CONFIG_HPS_IOCSR_SCANCHAIN0_LENGTH / 32) + 1)] = { 0x00000000, diff --git a/arch/arm/boards/terasic-sockit/lowlevel.c b/arch/arm/boards/terasic-sockit/lowlevel.c index 8012783df3..0a6eb21365 100644 --- a/arch/arm/boards/terasic-sockit/lowlevel.c +++ b/arch/arm/boards/terasic-sockit/lowlevel.c @@ -7,13 +7,13 @@ #include #include #include "sdram_config.h" -#include +#include #include "pinmux_config.c" #include "pll_config.h" #include #include "sequencer_defines.h" #include "sequencer_auto.h" -#include +#include #include "sequencer_auto_inst_init.c" #include "sequencer_auto_ac_init.c" #include "iocsr_config_cyclone5.c" diff --git a/arch/arm/mach-socfpga/Makefile b/arch/arm/mach-socfpga/Makefile index dea0e075d1..30b796dd3b 100644 --- a/arch/arm/mach-socfpga/Makefile +++ b/arch/arm/mach-socfpga/Makefile @@ -1,4 +1,4 @@ -obj-y += generic.o nic301.o bootsource.o reset-manager.o -pbl-y += init.o freeze-controller.o scan-manager.o system-manager.o -pbl-y += clock-manager.o +pbl-y += cyclone5-init.o cyclone5-freeze-controller.o cyclone5-scan-manager.o cyclone5-system-manager.o +pbl-y += cyclone5-clock-manager.o +obj-y += cyclone5-generic.o nic301.o cyclone5-bootsource.o cyclone5-reset-manager.o obj-$(CONFIG_ARCH_SOCFPGA_XLOAD) += xload.o diff --git a/arch/arm/mach-socfpga/bootsource.c b/arch/arm/mach-socfpga/bootsource.c deleted file mode 100644 index 739f0b5c0e..0000000000 --- a/arch/arm/mach-socfpga/bootsource.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include - -#define SYSMGR_BOOTINFO 0x14 - -static int socfpga_boot_save_loc(void) -{ - enum bootsource src = BOOTSOURCE_UNKNOWN; - uint32_t val; - - val = readl(CYCLONE5_SYSMGR_ADDRESS + SYSMGR_BOOTINFO); - - switch (val & 0x7) { - case 0: - /* reserved */ - break; - case 1: - /* FPGA, currently not decoded */ - break; - case 2: - case 3: - src = BOOTSOURCE_NAND; - break; - case 4: - case 5: - src = BOOTSOURCE_MMC; - break; - case 6: - case 7: - src = BOOTSOURCE_SPI; - break; - } - - bootsource_set(src); - bootsource_set_instance(0); - - return 0; -} -core_initcall(socfpga_boot_save_loc); diff --git a/arch/arm/mach-socfpga/clock-manager.c b/arch/arm/mach-socfpga/clock-manager.c deleted file mode 100644 index f17371365f..0000000000 --- a/arch/arm/mach-socfpga/clock-manager.c +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include - -static inline void cm_wait_for_lock(void __iomem *cm, uint32_t mask) -{ - while ((readl(cm + CLKMGR_INTER_ADDRESS) & mask) != mask); -} - -/* function to poll in the fsm busy bit */ -static inline void cm_wait4fsm(void __iomem *cm) -{ - while (readl(cm + CLKMGR_STAT_ADDRESS) & 1); -} - -/* - * function to write the bypass register which requires a poll of the - * busy bit - */ -static inline void cm_write_bypass(void __iomem *cm, uint32_t val) -{ - writel(val, cm + CLKMGR_BYPASS_ADDRESS); - cm_wait4fsm(cm); -} - -/* function to write the ctrl register which requires a poll of the busy bit */ -static inline void cm_write_ctrl(void __iomem *cm, uint32_t val) -{ - writel(val, cm + CLKMGR_CTRL_ADDRESS); - cm_wait4fsm(cm); -} - -/* function to write a clock register that has phase information */ -static inline void cm_write_with_phase(uint32_t value, - void __iomem *reg, uint32_t mask) -{ - /* poll until phase is zero */ - while (readl(reg) & mask); - - writel(value, reg); - - while (readl(reg) & mask); -} - -/* - * Setup clocks while making no assumptions of the - * previous state of the clocks. - * - * - Start by being paranoid and gate all sw managed clocks - * - Put all plls in bypass - * - Put all plls VCO registers back to reset value (bgpwr dwn). - * - Put peripheral and main pll src to reset value to avoid glitch. - * - Delay 5 us. - * - Deassert bg pwr dn and set numerator and denominator - * - Start 7 us timer. - * - set internal dividers - * - Wait for 7 us timer. - * - Enable plls - * - Set external dividers while plls are locking - * - Wait for pll lock - * - Assert/deassert outreset all. - * - Take all pll's out of bypass - * - Clear safe mode - * - set source main and peripheral clocks - * - Ungate clocks - */ -void socfpga_cm_basic_init(const struct socfpga_cm_config *cfg) -{ - uint32_t mainvco, periphvco, val; - void *cm = (void *)CYCLONE5_CLKMGR_ADDRESS; - - /* Start by being paranoid and gate all sw managed clocks */ - - /* - * We need to disable nandclk - * and then do another apb access before disabling - * gatting off the rest of the periperal clocks. - */ - val = readl(cm + CLKMGR_PERPLLGRP_EN_ADDRESS); - val &= ~CLKMGR_PERPLLGRP_EN_NANDCLK_MASK; - writel(val, cm + CLKMGR_PERPLLGRP_EN_ADDRESS); - - /* DO NOT GATE OFF DEBUG CLOCKS & BRIDGE CLOCKS */ - writel(CLKMGR_MAINPLLGRP_EN_DBGTIMERCLK_MASK | - CLKMGR_MAINPLLGRP_EN_DBGTRACECLK_MASK | - CLKMGR_MAINPLLGRP_EN_DBGCLK_MASK | - CLKMGR_MAINPLLGRP_EN_DBGATCLK_MASK | - CLKMGR_MAINPLLGRP_EN_S2FUSER0CLK_MASK | - CLKMGR_MAINPLLGRP_EN_L4MPCLK_MASK, - cm + CLKMGR_MAINPLLGRP_EN_ADDRESS); - - writel(0, cm + CLKMGR_SDRPLLGRP_EN_ADDRESS); - - /* now we can gate off the rest of the peripheral clocks */ - writel(0, cm + CLKMGR_PERPLLGRP_EN_ADDRESS); - - /* Put all plls in bypass */ - cm_write_bypass(cm, - CLKMGR_BYPASS_PERPLL_SET(1) | - CLKMGR_BYPASS_SDRPLL_SET(1) | - CLKMGR_BYPASS_MAINPLL_SET(1)); - - /* - * Put all plls VCO registers back to reset value. - * Some code might have messed with them. - */ - writel(CLKMGR_MAINPLLGRP_VCO_RESET_VALUE & - ~CLKMGR_MAINPLLGRP_VCO_REGEXTSEL_MASK, - cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); - writel(CLKMGR_PERPLLGRP_VCO_RESET_VALUE & - ~CLKMGR_PERPLLGRP_VCO_REGEXTSEL_MASK, - cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); - writel(CLKMGR_SDRPLLGRP_VCO_RESET_VALUE & - ~CLKMGR_SDRPLLGRP_VCO_REGEXTSEL_MASK, - cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); - - /* - * The clocks to the flash devices and the L4_MAIN clocks can - * glitch when coming out of safe mode if their source values - * are different from their reset value. So the trick it to - * put them back to their reset state, and change input - * after exiting safe mode but before ungating the clocks. - */ - writel(CLKMGR_PERPLLGRP_SRC_RESET_VALUE, - cm + CLKMGR_PERPLLGRP_SRC_ADDRESS); - writel(CLKMGR_MAINPLLGRP_L4SRC_RESET_VALUE, - cm + CLKMGR_MAINPLLGRP_L4SRC_ADDRESS); - - /* read back for the required 5 us delay. */ - readl(cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); - readl(cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); - readl(cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); - - /* - * We made sure bgpwr down was assert for 5 us. Now deassert BG PWR DN - * with numerator and denominator. - */ - writel(cfg->main_vco_base | CLEAR_BGP_EN_PWRDN, - cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); - writel(cfg->peri_vco_base | CLEAR_BGP_EN_PWRDN, - cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); - writel(cfg->sdram_vco_base | - CLKMGR_SDRPLLGRP_VCO_OUTRESET_SET(0) | - CLKMGR_SDRPLLGRP_VCO_OUTRESETALL_SET(0) | - CLEAR_BGP_EN_PWRDN, - cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); - - writel(cfg->mpuclk, cm + CLKMGR_MAINPLLGRP_MPUCLK_ADDRESS); - writel(cfg->mainclk, cm + CLKMGR_MAINPLLGRP_MAINCLK_ADDRESS); - writel(cfg->alteragrp_mpu, cm + CLKMGR_ALTERAGRP_MPUCLK); - writel(cfg->dbgatclk, cm + CLKMGR_MAINPLLGRP_DBGATCLK_ADDRESS); - writel(cfg->alteregrp_main, cm + CLKMGR_ALTERAGRP_MAINCLK); - writel(cfg->cfg2fuser0clk, cm + CLKMGR_MAINPLLGRP_CFGS2FUSER0CLK_ADDRESS); - writel(cfg->emac0clk, cm + CLKMGR_PERPLLGRP_EMAC0CLK_ADDRESS); - writel(cfg->emac1clk, cm + CLKMGR_PERPLLGRP_EMAC1CLK_ADDRESS); - writel(cfg->mainqspiclk, cm + CLKMGR_MAINPLLGRP_MAINQSPICLK_ADDRESS); - writel(cfg->perqspiclk, cm + CLKMGR_PERPLLGRP_PERQSPICLK_ADDRESS); - writel(cfg->mainnandsdmmcclk, cm + CLKMGR_MAINPLLGRP_MAINNANDSDMMCCLK_ADDRESS); - writel(cfg->pernandsdmmcclk, cm + CLKMGR_PERPLLGRP_PERNANDSDMMCCLK_ADDRESS); - writel(cfg->perbaseclk, cm + CLKMGR_PERPLLGRP_PERBASECLK_ADDRESS); - writel(cfg->s2fuser1clk, cm + CLKMGR_PERPLLGRP_S2FUSER1CLK_ADDRESS); - - /* 7 us must have elapsed before we can enable the VCO */ - __udelay(7); - - /* Enable vco */ - writel(cfg->main_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1), - cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); - writel(cfg->peri_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1), - cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); - writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1), - cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); - - /* setup dividers while plls are locking */ - - /* L3 MP and L3 SP */ - writel(cfg->maindiv, cm + CLKMGR_MAINPLLGRP_MAINDIV_ADDRESS); - writel(cfg->dbgdiv, cm + CLKMGR_MAINPLLGRP_DBGDIV_ADDRESS); - writel(cfg->tracediv, cm + CLKMGR_MAINPLLGRP_TRACEDIV_ADDRESS); - - /* L4 MP, L4 SP, can0, and can1 */ - writel(cfg->perdiv, cm + CLKMGR_PERPLLGRP_DIV_ADDRESS); - writel(cfg->gpiodiv, cm + CLKMGR_PERPLLGRP_GPIODIV_ADDRESS); - - cm_wait_for_lock(cm, CLKMGR_INTER_SDRPLLLOCKED_MASK | - CLKMGR_INTER_PERPLLLOCKED_MASK | - CLKMGR_INTER_MAINPLLLOCKED_MASK); - - /* write the sdram clock counters before toggling outreset all */ - writel(cfg->ddrdqsclk & CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_MASK, - cm + CLKMGR_SDRPLLGRP_DDRDQSCLK_ADDRESS); - - writel(cfg->ddr2xdqsclk & CLKMGR_SDRPLLGRP_DDR2XDQSCLK_CNT_MASK, - cm + CLKMGR_SDRPLLGRP_DDR2XDQSCLK_ADDRESS); - - writel(cfg->ddrdqclk & CLKMGR_SDRPLLGRP_DDRDQCLK_CNT_MASK, - cm + CLKMGR_SDRPLLGRP_DDRDQCLK_ADDRESS); - - writel(cfg->s2fuser2clk & CLKMGR_SDRPLLGRP_S2FUSER2CLK_CNT_MASK, - cm + CLKMGR_SDRPLLGRP_S2FUSER2CLK_ADDRESS); - - /* - * after locking, but before taking out of bypass - * assert/deassert outresetall - */ - mainvco = readl(cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); - - /* assert main outresetall */ - writel(mainvco | CLKMGR_MAINPLLGRP_VCO_OUTRESETALL_MASK, - cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); - - periphvco = readl(cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); - - /* assert pheriph outresetall */ - writel(periphvco | CLKMGR_PERPLLGRP_VCO_OUTRESETALL_MASK, - cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); - - /* assert sdram outresetall */ - writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1) | - CLKMGR_SDRPLLGRP_VCO_OUTRESETALL_SET(1), - cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); - - /* deassert main outresetall */ - writel(mainvco & ~CLKMGR_MAINPLLGRP_VCO_OUTRESETALL_MASK, - cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); - - /* deassert pheriph outresetall */ - writel(periphvco & ~CLKMGR_PERPLLGRP_VCO_OUTRESETALL_MASK, - cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); - - /* deassert sdram outresetall */ - writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1), - cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); - - /* - * now that we've toggled outreset all, all the clocks - * are aligned nicely; so we can change any phase. - */ - cm_write_with_phase(cfg->ddrdqsclk, - cm + CLKMGR_SDRPLLGRP_DDRDQSCLK_ADDRESS, - CLKMGR_SDRPLLGRP_DDRDQSCLK_PHASE_MASK); - - /* SDRAM DDR2XDQSCLK */ - cm_write_with_phase(cfg->ddr2xdqsclk, - cm + CLKMGR_SDRPLLGRP_DDR2XDQSCLK_ADDRESS, - CLKMGR_SDRPLLGRP_DDR2XDQSCLK_PHASE_MASK); - - cm_write_with_phase(cfg->ddrdqclk, - cm + CLKMGR_SDRPLLGRP_DDRDQCLK_ADDRESS, - CLKMGR_SDRPLLGRP_DDRDQCLK_PHASE_MASK); - - cm_write_with_phase(cfg->s2fuser2clk, - cm + CLKMGR_SDRPLLGRP_S2FUSER2CLK_ADDRESS, - CLKMGR_SDRPLLGRP_S2FUSER2CLK_PHASE_MASK); - - /* Take all three PLLs out of bypass when safe mode is cleared. */ - cm_write_bypass(cm, 0); - - /* clear safe mode */ - val = readl(cm + CLKMGR_CTRL_ADDRESS); - val |= CLKMGR_CTRL_SAFEMODE_SET(CLKMGR_CTRL_SAFEMODE_MASK); - cm_write_ctrl(cm, val); - - /* - * now that safe mode is clear with clocks gated - * it safe to change the source mux for the flashes the the L4_MAIN - */ - writel(cfg->persrc, cm + CLKMGR_PERPLLGRP_SRC_ADDRESS); - writel(cfg->l4src, cm + CLKMGR_MAINPLLGRP_L4SRC_ADDRESS); - - /* Now ungate non-hw-managed clocks */ - writel(~0, cm + CLKMGR_MAINPLLGRP_EN_ADDRESS); - writel(~0, cm + CLKMGR_PERPLLGRP_EN_ADDRESS); - writel(~0, cm + CLKMGR_SDRPLLGRP_EN_ADDRESS); - - val = readl(cm + CLKMGR_DBCTRL_ADDRESS); - val |= CLKMGR_DBCTRL_STAYOSC1_MASK; - writel(val, cm + CLKMGR_DBCTRL_ADDRESS); -} diff --git a/arch/arm/mach-socfpga/cyclone5-bootsource.c b/arch/arm/mach-socfpga/cyclone5-bootsource.c new file mode 100644 index 0000000000..da4102c4f5 --- /dev/null +++ b/arch/arm/mach-socfpga/cyclone5-bootsource.c @@ -0,0 +1,57 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define CYCLONE5_SYSMGR_BOOTINFO 0x14 + +static int cyclone5_boot_save_loc(void) +{ + enum bootsource src = BOOTSOURCE_UNKNOWN; + uint32_t val; + + val = readl(CYCLONE5_SYSMGR_ADDRESS + CYCLONE5_SYSMGR_BOOTINFO); + + switch (val & 0x7) { + case 0: + /* reserved */ + break; + case 1: + /* FPGA, currently not decoded */ + break; + case 2: + case 3: + src = BOOTSOURCE_NAND; + break; + case 4: + case 5: + src = BOOTSOURCE_MMC; + break; + case 6: + case 7: + src = BOOTSOURCE_SPI; + break; + } + + bootsource_set(src); + bootsource_set_instance(0); + + return 0; +} +core_initcall(cyclone5_boot_save_loc); diff --git a/arch/arm/mach-socfpga/cyclone5-clock-manager.c b/arch/arm/mach-socfpga/cyclone5-clock-manager.c new file mode 100644 index 0000000000..79c8b6bf28 --- /dev/null +++ b/arch/arm/mach-socfpga/cyclone5-clock-manager.c @@ -0,0 +1,298 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +static inline void cm_wait_for_lock(void __iomem *cm, uint32_t mask) +{ + while ((readl(cm + CLKMGR_INTER_ADDRESS) & mask) != mask); +} + +/* function to poll in the fsm busy bit */ +static inline void cm_wait4fsm(void __iomem *cm) +{ + while (readl(cm + CLKMGR_STAT_ADDRESS) & 1); +} + +/* + * function to write the bypass register which requires a poll of the + * busy bit + */ +static inline void cm_write_bypass(void __iomem *cm, uint32_t val) +{ + writel(val, cm + CLKMGR_BYPASS_ADDRESS); + cm_wait4fsm(cm); +} + +/* function to write the ctrl register which requires a poll of the busy bit */ +static inline void cm_write_ctrl(void __iomem *cm, uint32_t val) +{ + writel(val, cm + CLKMGR_CTRL_ADDRESS); + cm_wait4fsm(cm); +} + +/* function to write a clock register that has phase information */ +static inline void cm_write_with_phase(uint32_t value, + void __iomem *reg, uint32_t mask) +{ + /* poll until phase is zero */ + while (readl(reg) & mask); + + writel(value, reg); + + while (readl(reg) & mask); +} + +/* + * Setup clocks while making no assumptions of the + * previous state of the clocks. + * + * - Start by being paranoid and gate all sw managed clocks + * - Put all plls in bypass + * - Put all plls VCO registers back to reset value (bgpwr dwn). + * - Put peripheral and main pll src to reset value to avoid glitch. + * - Delay 5 us. + * - Deassert bg pwr dn and set numerator and denominator + * - Start 7 us timer. + * - set internal dividers + * - Wait for 7 us timer. + * - Enable plls + * - Set external dividers while plls are locking + * - Wait for pll lock + * - Assert/deassert outreset all. + * - Take all pll's out of bypass + * - Clear safe mode + * - set source main and peripheral clocks + * - Ungate clocks + */ +void socfpga_cm_basic_init(const struct socfpga_cm_config *cfg) +{ + uint32_t mainvco, periphvco, val; + void *cm = (void *)CYCLONE5_CLKMGR_ADDRESS; + + /* Start by being paranoid and gate all sw managed clocks */ + + /* + * We need to disable nandclk + * and then do another apb access before disabling + * gatting off the rest of the periperal clocks. + */ + val = readl(cm + CLKMGR_PERPLLGRP_EN_ADDRESS); + val &= ~CLKMGR_PERPLLGRP_EN_NANDCLK_MASK; + writel(val, cm + CLKMGR_PERPLLGRP_EN_ADDRESS); + + /* DO NOT GATE OFF DEBUG CLOCKS & BRIDGE CLOCKS */ + writel(CLKMGR_MAINPLLGRP_EN_DBGTIMERCLK_MASK | + CLKMGR_MAINPLLGRP_EN_DBGTRACECLK_MASK | + CLKMGR_MAINPLLGRP_EN_DBGCLK_MASK | + CLKMGR_MAINPLLGRP_EN_DBGATCLK_MASK | + CLKMGR_MAINPLLGRP_EN_S2FUSER0CLK_MASK | + CLKMGR_MAINPLLGRP_EN_L4MPCLK_MASK, + cm + CLKMGR_MAINPLLGRP_EN_ADDRESS); + + writel(0, cm + CLKMGR_SDRPLLGRP_EN_ADDRESS); + + /* now we can gate off the rest of the peripheral clocks */ + writel(0, cm + CLKMGR_PERPLLGRP_EN_ADDRESS); + + /* Put all plls in bypass */ + cm_write_bypass(cm, + CLKMGR_BYPASS_PERPLL_SET(1) | + CLKMGR_BYPASS_SDRPLL_SET(1) | + CLKMGR_BYPASS_MAINPLL_SET(1)); + + /* + * Put all plls VCO registers back to reset value. + * Some code might have messed with them. + */ + writel(CLKMGR_MAINPLLGRP_VCO_RESET_VALUE & + ~CLKMGR_MAINPLLGRP_VCO_REGEXTSEL_MASK, + cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); + writel(CLKMGR_PERPLLGRP_VCO_RESET_VALUE & + ~CLKMGR_PERPLLGRP_VCO_REGEXTSEL_MASK, + cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); + writel(CLKMGR_SDRPLLGRP_VCO_RESET_VALUE & + ~CLKMGR_SDRPLLGRP_VCO_REGEXTSEL_MASK, + cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); + + /* + * The clocks to the flash devices and the L4_MAIN clocks can + * glitch when coming out of safe mode if their source values + * are different from their reset value. So the trick it to + * put them back to their reset state, and change input + * after exiting safe mode but before ungating the clocks. + */ + writel(CLKMGR_PERPLLGRP_SRC_RESET_VALUE, + cm + CLKMGR_PERPLLGRP_SRC_ADDRESS); + writel(CLKMGR_MAINPLLGRP_L4SRC_RESET_VALUE, + cm + CLKMGR_MAINPLLGRP_L4SRC_ADDRESS); + + /* read back for the required 5 us delay. */ + readl(cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); + readl(cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); + readl(cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); + + /* + * We made sure bgpwr down was assert for 5 us. Now deassert BG PWR DN + * with numerator and denominator. + */ + writel(cfg->main_vco_base | CLEAR_BGP_EN_PWRDN, + cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); + writel(cfg->peri_vco_base | CLEAR_BGP_EN_PWRDN, + cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); + writel(cfg->sdram_vco_base | + CLKMGR_SDRPLLGRP_VCO_OUTRESET_SET(0) | + CLKMGR_SDRPLLGRP_VCO_OUTRESETALL_SET(0) | + CLEAR_BGP_EN_PWRDN, + cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); + + writel(cfg->mpuclk, cm + CLKMGR_MAINPLLGRP_MPUCLK_ADDRESS); + writel(cfg->mainclk, cm + CLKMGR_MAINPLLGRP_MAINCLK_ADDRESS); + writel(cfg->alteragrp_mpu, cm + CLKMGR_ALTERAGRP_MPUCLK); + writel(cfg->dbgatclk, cm + CLKMGR_MAINPLLGRP_DBGATCLK_ADDRESS); + writel(cfg->alteregrp_main, cm + CLKMGR_ALTERAGRP_MAINCLK); + writel(cfg->cfg2fuser0clk, cm + CLKMGR_MAINPLLGRP_CFGS2FUSER0CLK_ADDRESS); + writel(cfg->emac0clk, cm + CLKMGR_PERPLLGRP_EMAC0CLK_ADDRESS); + writel(cfg->emac1clk, cm + CLKMGR_PERPLLGRP_EMAC1CLK_ADDRESS); + writel(cfg->mainqspiclk, cm + CLKMGR_MAINPLLGRP_MAINQSPICLK_ADDRESS); + writel(cfg->perqspiclk, cm + CLKMGR_PERPLLGRP_PERQSPICLK_ADDRESS); + writel(cfg->mainnandsdmmcclk, cm + CLKMGR_MAINPLLGRP_MAINNANDSDMMCCLK_ADDRESS); + writel(cfg->pernandsdmmcclk, cm + CLKMGR_PERPLLGRP_PERNANDSDMMCCLK_ADDRESS); + writel(cfg->perbaseclk, cm + CLKMGR_PERPLLGRP_PERBASECLK_ADDRESS); + writel(cfg->s2fuser1clk, cm + CLKMGR_PERPLLGRP_S2FUSER1CLK_ADDRESS); + + /* 7 us must have elapsed before we can enable the VCO */ + __udelay(7); + + /* Enable vco */ + writel(cfg->main_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1), + cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); + writel(cfg->peri_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1), + cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); + writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1), + cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); + + /* setup dividers while plls are locking */ + + /* L3 MP and L3 SP */ + writel(cfg->maindiv, cm + CLKMGR_MAINPLLGRP_MAINDIV_ADDRESS); + writel(cfg->dbgdiv, cm + CLKMGR_MAINPLLGRP_DBGDIV_ADDRESS); + writel(cfg->tracediv, cm + CLKMGR_MAINPLLGRP_TRACEDIV_ADDRESS); + + /* L4 MP, L4 SP, can0, and can1 */ + writel(cfg->perdiv, cm + CLKMGR_PERPLLGRP_DIV_ADDRESS); + writel(cfg->gpiodiv, cm + CLKMGR_PERPLLGRP_GPIODIV_ADDRESS); + + cm_wait_for_lock(cm, CLKMGR_INTER_SDRPLLLOCKED_MASK | + CLKMGR_INTER_PERPLLLOCKED_MASK | + CLKMGR_INTER_MAINPLLLOCKED_MASK); + + /* write the sdram clock counters before toggling outreset all */ + writel(cfg->ddrdqsclk & CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_MASK, + cm + CLKMGR_SDRPLLGRP_DDRDQSCLK_ADDRESS); + + writel(cfg->ddr2xdqsclk & CLKMGR_SDRPLLGRP_DDR2XDQSCLK_CNT_MASK, + cm + CLKMGR_SDRPLLGRP_DDR2XDQSCLK_ADDRESS); + + writel(cfg->ddrdqclk & CLKMGR_SDRPLLGRP_DDRDQCLK_CNT_MASK, + cm + CLKMGR_SDRPLLGRP_DDRDQCLK_ADDRESS); + + writel(cfg->s2fuser2clk & CLKMGR_SDRPLLGRP_S2FUSER2CLK_CNT_MASK, + cm + CLKMGR_SDRPLLGRP_S2FUSER2CLK_ADDRESS); + + /* + * after locking, but before taking out of bypass + * assert/deassert outresetall + */ + mainvco = readl(cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); + + /* assert main outresetall */ + writel(mainvco | CLKMGR_MAINPLLGRP_VCO_OUTRESETALL_MASK, + cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); + + periphvco = readl(cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); + + /* assert pheriph outresetall */ + writel(periphvco | CLKMGR_PERPLLGRP_VCO_OUTRESETALL_MASK, + cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); + + /* assert sdram outresetall */ + writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1) | + CLKMGR_SDRPLLGRP_VCO_OUTRESETALL_SET(1), + cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); + + /* deassert main outresetall */ + writel(mainvco & ~CLKMGR_MAINPLLGRP_VCO_OUTRESETALL_MASK, + cm + CLKMGR_MAINPLLGRP_VCO_ADDRESS); + + /* deassert pheriph outresetall */ + writel(periphvco & ~CLKMGR_PERPLLGRP_VCO_OUTRESETALL_MASK, + cm + CLKMGR_PERPLLGRP_VCO_ADDRESS); + + /* deassert sdram outresetall */ + writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN_SET(1), + cm + CLKMGR_SDRPLLGRP_VCO_ADDRESS); + + /* + * now that we've toggled outreset all, all the clocks + * are aligned nicely; so we can change any phase. + */ + cm_write_with_phase(cfg->ddrdqsclk, + cm + CLKMGR_SDRPLLGRP_DDRDQSCLK_ADDRESS, + CLKMGR_SDRPLLGRP_DDRDQSCLK_PHASE_MASK); + + /* SDRAM DDR2XDQSCLK */ + cm_write_with_phase(cfg->ddr2xdqsclk, + cm + CLKMGR_SDRPLLGRP_DDR2XDQSCLK_ADDRESS, + CLKMGR_SDRPLLGRP_DDR2XDQSCLK_PHASE_MASK); + + cm_write_with_phase(cfg->ddrdqclk, + cm + CLKMGR_SDRPLLGRP_DDRDQCLK_ADDRESS, + CLKMGR_SDRPLLGRP_DDRDQCLK_PHASE_MASK); + + cm_write_with_phase(cfg->s2fuser2clk, + cm + CLKMGR_SDRPLLGRP_S2FUSER2CLK_ADDRESS, + CLKMGR_SDRPLLGRP_S2FUSER2CLK_PHASE_MASK); + + /* Take all three PLLs out of bypass when safe mode is cleared. */ + cm_write_bypass(cm, 0); + + /* clear safe mode */ + val = readl(cm + CLKMGR_CTRL_ADDRESS); + val |= CLKMGR_CTRL_SAFEMODE_SET(CLKMGR_CTRL_SAFEMODE_MASK); + cm_write_ctrl(cm, val); + + /* + * now that safe mode is clear with clocks gated + * it safe to change the source mux for the flashes the the L4_MAIN + */ + writel(cfg->persrc, cm + CLKMGR_PERPLLGRP_SRC_ADDRESS); + writel(cfg->l4src, cm + CLKMGR_MAINPLLGRP_L4SRC_ADDRESS); + + /* Now ungate non-hw-managed clocks */ + writel(~0, cm + CLKMGR_MAINPLLGRP_EN_ADDRESS); + writel(~0, cm + CLKMGR_PERPLLGRP_EN_ADDRESS); + writel(~0, cm + CLKMGR_SDRPLLGRP_EN_ADDRESS); + + val = readl(cm + CLKMGR_DBCTRL_ADDRESS); + val |= CLKMGR_DBCTRL_STAYOSC1_MASK; + writel(val, cm + CLKMGR_DBCTRL_ADDRESS); +} diff --git a/arch/arm/mach-socfpga/cyclone5-freeze-controller.c b/arch/arm/mach-socfpga/cyclone5-freeze-controller.c new file mode 100644 index 0000000000..87160161b0 --- /dev/null +++ b/arch/arm/mach-socfpga/cyclone5-freeze-controller.c @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +#define SYSMGR_FRZCTRL_LOOP_PARAM (1000) +#define SYSMGR_FRZCTRL_DELAY_LOOP_PARAM (10) + +/* + * sys_mgr_frzctrl_freeze_req + * Freeze HPS IOs + */ +int sys_mgr_frzctrl_freeze_req(enum frz_channel_id channel_id) +{ + uint32_t reg, val; + void *sm = (void *)CYCLONE5_SYSMGR_ADDRESS; + + /* select software FSM */ + writel(SYSMGR_FRZCTRL_SRC_VIO1_ENUM_SW, + (sm + SYSMGR_FRZCTRL_SRC_ADDRESS)); + + /* Freeze channel ID checking and base address */ + switch (channel_id) { + case FREEZE_CHANNEL_0: + case FREEZE_CHANNEL_1: + case FREEZE_CHANNEL_2: + reg = SYSMGR_FRZCTRL_VIOCTRL_ADDRESS + (channel_id << SYSMGR_FRZCTRL_VIOCTRL_SHIFT); + + /* + * Assert active low enrnsl, plniotri + * and niotri signals + */ + val = readl(sm + reg); + val &= ~(SYSMGR_FRZCTRL_VIOCTRL_SLEW_MASK + | SYSMGR_FRZCTRL_VIOCTRL_WKPULLUP_MASK + | SYSMGR_FRZCTRL_VIOCTRL_TRISTATE_MASK); + writel(val, sm + reg); + + /* + * Note: Delay for 20ns at min + * Assert active low bhniotri signal and de-assert + * active high csrdone + */ + val = readl(sm + reg); + val &= ~(SYSMGR_FRZCTRL_VIOCTRL_BUSHOLD_MASK | SYSMGR_FRZCTRL_VIOCTRL_CFG_MASK); + writel(val, sm + reg); + + break; + + case FREEZE_CHANNEL_3: + /* + * Assert active low enrnsl, plniotri and + * niotri signals + */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val &= ~(SYSMGR_FRZCTRL_HIOCTRL_SLEW_MASK + | SYSMGR_FRZCTRL_HIOCTRL_WKPULLUP_MASK + | SYSMGR_FRZCTRL_HIOCTRL_TRISTATE_MASK); + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + /* + * Note: Delay for 40ns at min + * assert active low bhniotri & nfrzdrv signals, + * de-assert active high csrdone and assert + * active high frzreg and nfrzdrv signals + */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val &= ~(SYSMGR_FRZCTRL_HIOCTRL_BUSHOLD_MASK + | SYSMGR_FRZCTRL_HIOCTRL_CFG_MASK); + val |= SYSMGR_FRZCTRL_HIOCTRL_REGRST_MASK + | SYSMGR_FRZCTRL_HIOCTRL_OCTRST_MASK; + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + /* + * Note: Delay for 40ns at min + * assert active high reinit signal and de-assert + * active high pllbiasen signals + */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val &= ~(SYSMGR_FRZCTRL_HIOCTRL_OCT_CFGEN_CALSTART_MASK); + val |= SYSMGR_FRZCTRL_HIOCTRL_DLLRST_MASK; + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * sys_mgr_frzctrl_thaw_req + * Unfreeze/Thaw HPS IOs + */ +int sys_mgr_frzctrl_thaw_req(enum frz_channel_id channel_id) +{ + uint32_t reg, val; + void *sm = (void *)CYCLONE5_SYSMGR_ADDRESS; + + /* select software FSM */ + writel(SYSMGR_FRZCTRL_SRC_VIO1_ENUM_SW, sm + SYSMGR_FRZCTRL_SRC_ADDRESS); + + /* Freeze channel ID checking and base address */ + switch (channel_id) { + case FREEZE_CHANNEL_0: + case FREEZE_CHANNEL_1: + case FREEZE_CHANNEL_2: + reg = SYSMGR_FRZCTRL_VIOCTRL_ADDRESS + + (channel_id << SYSMGR_FRZCTRL_VIOCTRL_SHIFT); + + /* + * Assert active low bhniotri signal and + * de-assert active high csrdone + */ + val = readl(sm + reg); + val |= SYSMGR_FRZCTRL_VIOCTRL_BUSHOLD_MASK | + SYSMGR_FRZCTRL_VIOCTRL_CFG_MASK; + writel(val, sm + reg); + + /* + * Note: Delay for 20ns at min + * de-assert active low plniotri and niotri signals + */ + val = readl(sm + reg); + val |= SYSMGR_FRZCTRL_VIOCTRL_WKPULLUP_MASK | + SYSMGR_FRZCTRL_VIOCTRL_TRISTATE_MASK; + writel(val, sm + reg); + + /* + * Note: Delay for 20ns at min + * de-assert active low enrnsl signal + */ + val = readl(sm + reg); + val |= SYSMGR_FRZCTRL_VIOCTRL_SLEW_MASK; + writel(val, sm + reg); + + break; + + case FREEZE_CHANNEL_3: + /* de-assert active high reinit signal */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val &= ~SYSMGR_FRZCTRL_HIOCTRL_DLLRST_MASK; + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + /* + * Note: Delay for 40ns at min + * assert active high pllbiasen signals + */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val |= SYSMGR_FRZCTRL_HIOCTRL_OCT_CFGEN_CALSTART_MASK; + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + /* + * Delay 1000 intosc. intosc is based on eosc1 + * At 25MHz this would be 40us. Play safe, we have time... + */ + __udelay(1000); + + /* + * de-assert active low bhniotri signals, + * assert active high csrdone and nfrzdrv signal + */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val |= SYSMGR_FRZCTRL_HIOCTRL_BUSHOLD_MASK | + SYSMGR_FRZCTRL_HIOCTRL_CFG_MASK; + val &= ~SYSMGR_FRZCTRL_HIOCTRL_OCTRST_MASK; + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + /* Delay 33 intosc */ + __udelay(100); + + /* de-assert active low plniotri and niotri signals */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val |= SYSMGR_FRZCTRL_HIOCTRL_WKPULLUP_MASK | + SYSMGR_FRZCTRL_HIOCTRL_TRISTATE_MASK; + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + /* + * Note: Delay for 40ns at min + * de-assert active high frzreg signal + */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val &= ~SYSMGR_FRZCTRL_HIOCTRL_REGRST_MASK; + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + /* + * Note: Delay for 40ns at min + * de-assert active low enrnsl signal + */ + val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val |= SYSMGR_FRZCTRL_HIOCTRL_SLEW_MASK; + writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/arm/mach-socfpga/cyclone5-generic.c b/arch/arm/mach-socfpga/cyclone5-generic.c new file mode 100644 index 0000000000..3f49a9a542 --- /dev/null +++ b/arch/arm/mach-socfpga/cyclone5-generic.c @@ -0,0 +1,210 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SYSMGR_SDMMCGRP_CTRL_REG (CYCLONE5_SYSMGR_ADDRESS + 0x108) +#define SYSMGR_SDMMC_CTRL_SMPLSEL(smplsel) (((smplsel) & 0x7) << 3) +#define SYSMGR_SDMMC_CTRL_DRVSEL(drvsel) ((drvsel) & 0x7) + +enum socfpga_clks { + timer, mmc, qspi_clk, uart, clk_max +}; + +static struct clk *clks[clk_max]; + +#if defined(CONFIG_MCI_DW) +static struct dw_mmc_platform_data mmc_pdata = { + .bus_width_caps = MMC_CAP_4_BIT_DATA, + .ciu_div = 3, +}; + +void socfpga_cyclone5_mmc_init(void) +{ + clks[mmc] = clk_fixed("mmc", 400000000); + clkdev_add_physbase(clks[mmc], CYCLONE5_SDMMC_ADDRESS, NULL); + add_generic_device("dw_mmc", 0, NULL, CYCLONE5_SDMMC_ADDRESS, SZ_4K, + IORESOURCE_MEM, &mmc_pdata); +} +#else +void socfpga_cyclone5_mmc_init(void) +{ + pr_debug("%s: MMC support not compiled in!\n", __func__); + + return; +} +#endif + +#if defined(CONFIG_SPI_CADENCE_QUADSPI) +static struct cadence_qspi_platform_data qspi_pdata = { + .ext_decoder = 0, + .fifo_depth = 128, +}; + +static void add_cadence_qspi_device(int id, resource_size_t ctrl, + resource_size_t data, void *pdata) +{ + struct device_d *dev; + struct resource *res; + + res = xzalloc(sizeof(struct resource) * 2); + res[0].start = ctrl; + res[0].end = ctrl + 0x100 - 1; + res[0].flags = IORESOURCE_MEM; + res[1].start = data; + res[1].end = data + 0x100 - 1; + res[1].flags = IORESOURCE_MEM; + + dev = add_generic_device_res("cadence_qspi", id, res, 2, pdata); + + dev_dbg(dev, "added resource\n"); +} + +void socfpga_cyclone5_qspi_init(void) +{ + clks[qspi_clk] = clk_fixed("qspi_clk", 370000000); + clkdev_add_physbase(clks[qspi_clk], CYCLONE5_QSPI_CTRL_ADDRESS, NULL); + clkdev_add_physbase(clks[qspi_clk], CYCLONE5_QSPI_DATA_ADDRESS, NULL); + add_cadence_qspi_device(0, CYCLONE5_QSPI_CTRL_ADDRESS, + CYCLONE5_QSPI_DATA_ADDRESS, &qspi_pdata); +} +#else +void socfpga_cyclone5_qspi_init(void) +{ + pr_debug("%s: QSPI support not compiled in!\n", __func__); + + return; +} +#endif + +static struct NS16550_plat uart_pdata = { + .clock = 100000000, + .shift = 2, +}; + +void socfpga_cyclone5_uart_init(void) +{ + struct device_d *dev; + + clks[uart] = clk_fixed("uart", 100000000); + clkdev_add_physbase(clks[uart], CYCLONE5_UART0_ADDRESS, NULL); + clkdev_add_physbase(clks[uart], CYCLONE5_UART1_ADDRESS, NULL); + dev = add_ns16550_device(0, 0xffc02000, 1024, IORESOURCE_MEM | + IORESOURCE_MEM_8BIT, &uart_pdata); + + dev_dbg(dev, "initialized\n"); +} + +void socfpga_cyclone5_timer_init(void) +{ + struct device_d *dev; + + clks[timer] = clk_fixed("timer", 200000000); + clkdev_add_physbase(clks[timer], CYCLONE5_SMP_TWD_ADDRESS, NULL); + dev = add_generic_device("smp_twd", 0, NULL, CYCLONE5_SMP_TWD_ADDRESS, 0x100, + IORESOURCE_MEM, NULL); + + dev_dbg(dev, "added smp_twd\n"); +} + +static int socfpga_detect_sdram(void) +{ + void __iomem *base = (void *)CYCLONE5_SDR_ADDRESS; + uint32_t dramaddrw, ctrlwidth, memsize; + int colbits, rowbits, bankbits; + int width_bytes; + + dramaddrw = readl(base + 0x5000 + 0x2c); + + colbits = dramaddrw & 0x1f; + rowbits = (dramaddrw >> 5) & 0x1f; + bankbits = (dramaddrw >> 10) & 0x7; + + ctrlwidth = readl(base + 0x5000 + 0x60); + + switch (ctrlwidth & 0x3) { + default: + case 0: + width_bytes = 1; + break; + case 1: + width_bytes = 2; + break; + case 2: + width_bytes = 4; + break; + } + + memsize = (1 << colbits) * (1 << rowbits) * (1 << bankbits) * width_bytes; + + pr_debug("%s: colbits: %d rowbits: %d bankbits: %d width: %d => memsize: 0x%08x\n", + __func__, colbits, rowbits, bankbits, width_bytes, memsize); + + arm_add_mem_device("ram0", 0x0, memsize); + + return 0; +} + +/* Some initialization for the EMAC */ +static void socfpga_init_emac(void) +{ + uint32_t rst, val; + + /* No need for this without network support, e.g. xloader build */ + if (!IS_ENABLED(CONFIG_NET)) + return; + + /* According to Cyclone V datasheet, 17-60 "EMAC HPS Interface + * Initialization", changing PHYSEL should be done with EMAC in reset + * via permodrst. */ + + /* Everything, except L4WD0/1, is out of reset via socfpga_lowlevel_init() */ + rst = readl(CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); + rst |= RSTMGR_PERMODRST_EMAC0 | RSTMGR_PERMODRST_EMAC1; + writel(rst, CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); + + /* Set emac0/1 PHY interface select to RGMII. We could read phy-mode + * from the device tree, if it was desired to support interfaces other + * than RGMII. */ + val = readl(CONFIG_SYSMGR_EMAC_CTRL); + val &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << SYSMGR_EMACGRP_CTRL_PHYSEL0_LSB); + val &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << SYSMGR_EMACGRP_CTRL_PHYSEL1_LSB); + val |= SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII << SYSMGR_EMACGRP_CTRL_PHYSEL0_LSB; + val |= SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII << SYSMGR_EMACGRP_CTRL_PHYSEL1_LSB; + writel(val, CONFIG_SYSMGR_EMAC_CTRL); + + /* Take emac0 and emac1 out of reset */ + rst &= ~(RSTMGR_PERMODRST_EMAC0 | RSTMGR_PERMODRST_EMAC1); + writel(rst, CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); +} + +static int socfpga_init(void) +{ + socfpga_init_emac(); + + writel(SYSMGR_SDMMC_CTRL_DRVSEL(3) | SYSMGR_SDMMC_CTRL_SMPLSEL(0), + SYSMGR_SDMMCGRP_CTRL_REG); + + nic301_slave_ns(); + + socfpga_detect_sdram(); + + return 0; +} +core_initcall(socfpga_init); diff --git a/arch/arm/mach-socfpga/cyclone5-init.c b/arch/arm/mach-socfpga/cyclone5-init.c new file mode 100644 index 0000000000..412808b841 --- /dev/null +++ b/arch/arm/mach-socfpga/cyclone5-init.c @@ -0,0 +1,58 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void socfpga_lowlevel_init(struct socfpga_cm_config *cm_config, + struct socfpga_io_config *io_config) +{ + uint32_t val; + + val = 0xffffffff; + val &= ~(1 << RSTMGR_PERMODRST_L4WD0_LSB); + val &= ~(1 << RSTMGR_PERMODRST_OSC1TIMER0_LSB); + writel(val, CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); + + /* freeze all IO banks */ + sys_mgr_frzctrl_freeze_req(FREEZE_CHANNEL_0); + sys_mgr_frzctrl_freeze_req(FREEZE_CHANNEL_1); + sys_mgr_frzctrl_freeze_req(FREEZE_CHANNEL_2); + sys_mgr_frzctrl_freeze_req(FREEZE_CHANNEL_3); + + writel(~0, CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_BRG_MOD_RESET_OFS); + + debug("Reconfigure Clock Manager\n"); + + /* reconfigure the PLLs */ + socfpga_cm_basic_init(cm_config); + + debug("Configure IOCSR\n"); + /* configure the IOCSR through scan chain */ + scan_mgr_io_scan_chain_prg(IO_SCAN_CHAIN_0, CONFIG_HPS_IOCSR_SCANCHAIN0_LENGTH, io_config->iocsr_emac_mixed2); + scan_mgr_io_scan_chain_prg(IO_SCAN_CHAIN_1, CONFIG_HPS_IOCSR_SCANCHAIN1_LENGTH, io_config->iocsr_mixed1_flash); + scan_mgr_io_scan_chain_prg(IO_SCAN_CHAIN_2, CONFIG_HPS_IOCSR_SCANCHAIN2_LENGTH, io_config->iocsr_general); + scan_mgr_io_scan_chain_prg(IO_SCAN_CHAIN_3, CONFIG_HPS_IOCSR_SCANCHAIN3_LENGTH, io_config->iocsr_ddr); + + /* configure the pin muxing through system manager */ + socfpga_sysmgr_pinmux_init(io_config->pinmux, io_config->num_pin); + + writel(RSTMGR_PERMODRST_L4WD0 | RSTMGR_PERMODRST_L4WD1, + CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); + + /* unfreeze / thaw all IO banks */ + sys_mgr_frzctrl_thaw_req(FREEZE_CHANNEL_0); + sys_mgr_frzctrl_thaw_req(FREEZE_CHANNEL_1); + sys_mgr_frzctrl_thaw_req(FREEZE_CHANNEL_2); + sys_mgr_frzctrl_thaw_req(FREEZE_CHANNEL_3); + + writel(0x18, CYCLONE5_L3REGS_ADDRESS); + writel(0x1, 0xfffefc00); + + INIT_LL(); +} diff --git a/arch/arm/mach-socfpga/cyclone5-reset-manager.c b/arch/arm/mach-socfpga/cyclone5-reset-manager.c new file mode 100644 index 0000000000..4bbe1a8101 --- /dev/null +++ b/arch/arm/mach-socfpga/cyclone5-reset-manager.c @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +/* Disable the watchdog (toggle reset to watchdog) */ +void watchdog_disable(void) +{ + void __iomem *rm = (void *)CYCLONE5_RSTMGR_ADDRESS; + uint32_t val; + + /* assert reset for watchdog */ + val = readl(rm + RESET_MGR_PER_MOD_RESET_OFS); + val |= 1 << RSTMGR_PERMODRST_L4WD0_LSB; + writel(val, rm + RESET_MGR_PER_MOD_RESET_OFS); + + /* deassert watchdog from reset (watchdog in not running state) */ + val = readl(rm + RESET_MGR_PER_MOD_RESET_OFS); + val &= ~(1 << RSTMGR_PERMODRST_L4WD0_LSB); + writel(val, rm + RESET_MGR_PER_MOD_RESET_OFS); +} + +/* Write the reset manager register to cause reset */ +static void __noreturn socfpga_restart_soc(struct restart_handler *rst) +{ + /* request a warm reset */ + writel((1 << RSTMGR_CTRL_SWWARMRSTREQ_LSB), + CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_CTRL_OFS); + /* + * infinite loop here as watchdog will trigger and reset + * the processor + */ + hang(); +} + +static int restart_register_feature(void) +{ + restart_handler_register_fn(socfpga_restart_soc); + + return 0; +} +coredevice_initcall(restart_register_feature); diff --git a/arch/arm/mach-socfpga/cyclone5-scan-manager.c b/arch/arm/mach-socfpga/cyclone5-scan-manager.c new file mode 100644 index 0000000000..cf076c3885 --- /dev/null +++ b/arch/arm/mach-socfpga/cyclone5-scan-manager.c @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +/* + * @fn scan_mgr_io_scan_chain_engine_is_idle + * + * @brief function to check IO scan chain engine status and wait if the + * engine is active. Poll the IO scan chain engine till maximum iteration + * reached. + * + * @param max_iter uint32_t [in] - maximum polling loop to revent infinite loop + */ +static int scan_mgr_io_scan_chain_engine_is_idle(uint32_t max_iter) +{ + uint32_t scanmgr_status; + + scanmgr_status = readl(SCANMGR_STAT_ADDRESS + + CYCLONE5_SCANMGR_ADDRESS); + + /* Poll the engine until the scan engine is inactive */ + while (SCANMGR_STAT_ACTIVE_GET(scanmgr_status) + || (SCANMGR_STAT_WFIFOCNT_GET(scanmgr_status) > 0)) { + + max_iter--; + + if (max_iter > 0) { + scanmgr_status = readl( + CYCLONE5_SCANMGR_ADDRESS + + SCANMGR_STAT_ADDRESS); + } else { + return 0; + } + } + return 1; +} + +/* + * scan_mgr_io_scan_chain_prg + * Program HPS IO Scan Chain + */ +int scan_mgr_io_scan_chain_prg(enum io_scan_chain io_scan_chain_id, + uint32_t io_scan_chain_len_in_bits, + const unsigned long *iocsr_scan_chain) +{ + uint16_t tdi_tdo_header; + uint32_t io_program_iter; + uint32_t io_scan_chain_data_residual; + uint32_t residual; + uint32_t i; + uint32_t index = 0; + uint32_t val; + int ret; + void __iomem *sysmgr = (void *)CYCLONE5_SYSMGR_ADDRESS; + void __iomem *scanmgr = (void *)CYCLONE5_SCANMGR_ADDRESS; + + /* De-assert reinit if the IO scan chain is intended for HIO */ + if (io_scan_chain_id == IO_SCAN_CHAIN_3) { + val = readl(sysmgr + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + val &= ~SYSMGR_FRZCTRL_HIOCTRL_DLLRST_MASK; + writel(val, sysmgr + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); + } /* if (HIO) */ + + /* + * Check if the scan chain engine is inactive and the + * WFIFO is empty before enabling the IO scan chain + */ + if (!scan_mgr_io_scan_chain_engine_is_idle(MAX_WAITING_DELAY_IO_SCAN_ENGINE)) + return -EBUSY; + + /* + * Enable IO Scan chain based on scan chain id + * Note: only one chain can be enabled at a time + */ + val = readl(scanmgr + SCANMGR_EN_ADDRESS); + val |= 1 << io_scan_chain_id; + writel(val, scanmgr + SCANMGR_EN_ADDRESS); + + /* + * Calculate number of iteration needed for + * full 128-bit (4 x32-bits) bits shifting. + * Each TDI_TDO packet can shift in maximum 128-bits + */ + io_program_iter = io_scan_chain_len_in_bits >> IO_SCAN_CHAIN_128BIT_SHIFT; + io_scan_chain_data_residual = io_scan_chain_len_in_bits & IO_SCAN_CHAIN_128BIT_MASK; + + /* + * Construct TDI_TDO packet for + * 128-bit IO scan chain (2 bytes) + */ + tdi_tdo_header = TDI_TDO_HEADER_FIRST_BYTE | + (TDI_TDO_MAX_PAYLOAD << TDI_TDO_HEADER_SECOND_BYTE_SHIFT); + + /* Program IO scan chain in 128-bit iteration */ + for (i = 0; i < io_program_iter; i++) { + + /* write TDI_TDO packet header to scan manager */ + writel(tdi_tdo_header, (scanmgr + SCANMGR_FIFODOUBLEBYTE_ADDRESS)); + + /* calculate array index */ + index = i * 4; + + /* + * write 4 successive 32-bit IO scan + * chain data into WFIFO + */ + writel(iocsr_scan_chain[index], (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); + writel(iocsr_scan_chain[index + 1], (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); + writel(iocsr_scan_chain[index + 2], (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); + writel(iocsr_scan_chain[index + 3], (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); + + /* + * Check if the scan chain engine has completed the + * IO scan chain data shifting + */ + if (!scan_mgr_io_scan_chain_engine_is_idle(MAX_WAITING_DELAY_IO_SCAN_ENGINE)) { + ret = -EBUSY; + goto out_disable; + } + } + + /* Calculate array index for final TDI_TDO packet */ + index = io_program_iter * 4; + + /* Final TDI_TDO packet if any */ + if (0 != io_scan_chain_data_residual) { + /* + * Calculate number of quad bytes FIFO write + * needed for the final TDI_TDO packet + */ + io_program_iter = io_scan_chain_data_residual >> IO_SCAN_CHAIN_32BIT_SHIFT; + + /* + * Construct TDI_TDO packet for remaining IO + * scan chain (2 bytes) + */ + tdi_tdo_header = TDI_TDO_HEADER_FIRST_BYTE | + ((io_scan_chain_data_residual - 1) << TDI_TDO_HEADER_SECOND_BYTE_SHIFT); + + /* + * Program the last part of IO scan chain + * write TDI_TDO packet header (2 bytes) to + * scan manager + */ + writel(tdi_tdo_header, (scanmgr + SCANMGR_FIFODOUBLEBYTE_ADDRESS)); + + for (i = 0; i < io_program_iter; i++) { + + /* + * write remaining scan chain data into scan + * manager WFIFO with 4 bytes write + */ + writel(iocsr_scan_chain[index + i], + (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); + } + + index += io_program_iter; + residual = io_scan_chain_data_residual & IO_SCAN_CHAIN_32BIT_MASK; + + if (IO_SCAN_CHAIN_PAYLOAD_24BIT < residual) { + /* + * write the last 4B scan chain data + * into scan manager WFIFO + */ + writel(iocsr_scan_chain[index], + (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); + } else { + /* + * write the remaining 1 - 3 bytes scan chain + * data into scan manager WFIFO byte by byte + * to prevent JTAG engine shifting unused data + * from the FIFO and mistaken the data as a + * valid command (even though unused bits are + * set to 0, but just to prevent hardware + * glitch) + */ + for (i = 0; i < residual; i += 8) { + writel(((iocsr_scan_chain[index] >> i) & IO_SCAN_CHAIN_BYTE_MASK), + (scanmgr + SCANMGR_FIFOSINGLEBYTE_ADDRESS)); + } + } + + /* + * Check if the scan chain engine has completed the + * IO scan chain data shifting + */ + if (!scan_mgr_io_scan_chain_engine_is_idle(MAX_WAITING_DELAY_IO_SCAN_ENGINE)) { + ret = -EBUSY; + goto out_disable; + } + } /* if (io_scan_chain_data_residual) */ + + ret = 0; + +out_disable: + /* Disable IO Scan chain when configuration done*/ + val = readl(scanmgr + SCANMGR_EN_ADDRESS); + val &= ~(1 << io_scan_chain_id); + writel(val, scanmgr + SCANMGR_EN_ADDRESS); + + return ret; +} diff --git a/arch/arm/mach-socfpga/cyclone5-system-manager.c b/arch/arm/mach-socfpga/cyclone5-system-manager.c new file mode 100644 index 0000000000..7e86692c39 --- /dev/null +++ b/arch/arm/mach-socfpga/cyclone5-system-manager.c @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include + +void socfpga_sysmgr_pinmux_init(unsigned long *sys_mgr_init_table, int num) +{ + unsigned long offset = CONFIG_SYSMGR_PINMUXGRP_OFFSET; + const unsigned long *pval = sys_mgr_init_table; + unsigned long i; + + for (i = 0; i < num; i++) { + writel(*pval++, CYCLONE5_SYSMGR_ADDRESS + offset); + offset += sizeof(uint32_t); + } +} diff --git a/arch/arm/mach-socfpga/freeze-controller.c b/arch/arm/mach-socfpga/freeze-controller.c deleted file mode 100644 index 570bdeb735..0000000000 --- a/arch/arm/mach-socfpga/freeze-controller.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include - -#define SYSMGR_FRZCTRL_LOOP_PARAM (1000) -#define SYSMGR_FRZCTRL_DELAY_LOOP_PARAM (10) - -/* - * sys_mgr_frzctrl_freeze_req - * Freeze HPS IOs - */ -int sys_mgr_frzctrl_freeze_req(enum frz_channel_id channel_id) -{ - uint32_t reg, val; - void *sm = (void *)CYCLONE5_SYSMGR_ADDRESS; - - /* select software FSM */ - writel(SYSMGR_FRZCTRL_SRC_VIO1_ENUM_SW, - (sm + SYSMGR_FRZCTRL_SRC_ADDRESS)); - - /* Freeze channel ID checking and base address */ - switch (channel_id) { - case FREEZE_CHANNEL_0: - case FREEZE_CHANNEL_1: - case FREEZE_CHANNEL_2: - reg = SYSMGR_FRZCTRL_VIOCTRL_ADDRESS + (channel_id << SYSMGR_FRZCTRL_VIOCTRL_SHIFT); - - /* - * Assert active low enrnsl, plniotri - * and niotri signals - */ - val = readl(sm + reg); - val &= ~(SYSMGR_FRZCTRL_VIOCTRL_SLEW_MASK - | SYSMGR_FRZCTRL_VIOCTRL_WKPULLUP_MASK - | SYSMGR_FRZCTRL_VIOCTRL_TRISTATE_MASK); - writel(val, sm + reg); - - /* - * Note: Delay for 20ns at min - * Assert active low bhniotri signal and de-assert - * active high csrdone - */ - val = readl(sm + reg); - val &= ~(SYSMGR_FRZCTRL_VIOCTRL_BUSHOLD_MASK | SYSMGR_FRZCTRL_VIOCTRL_CFG_MASK); - writel(val, sm + reg); - - break; - - case FREEZE_CHANNEL_3: - /* - * Assert active low enrnsl, plniotri and - * niotri signals - */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val &= ~(SYSMGR_FRZCTRL_HIOCTRL_SLEW_MASK - | SYSMGR_FRZCTRL_HIOCTRL_WKPULLUP_MASK - | SYSMGR_FRZCTRL_HIOCTRL_TRISTATE_MASK); - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - /* - * Note: Delay for 40ns at min - * assert active low bhniotri & nfrzdrv signals, - * de-assert active high csrdone and assert - * active high frzreg and nfrzdrv signals - */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val &= ~(SYSMGR_FRZCTRL_HIOCTRL_BUSHOLD_MASK - | SYSMGR_FRZCTRL_HIOCTRL_CFG_MASK); - val |= SYSMGR_FRZCTRL_HIOCTRL_REGRST_MASK - | SYSMGR_FRZCTRL_HIOCTRL_OCTRST_MASK; - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - /* - * Note: Delay for 40ns at min - * assert active high reinit signal and de-assert - * active high pllbiasen signals - */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val &= ~(SYSMGR_FRZCTRL_HIOCTRL_OCT_CFGEN_CALSTART_MASK); - val |= SYSMGR_FRZCTRL_HIOCTRL_DLLRST_MASK; - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - break; - default: - return -EINVAL; - } - - return 0; -} - -/* - * sys_mgr_frzctrl_thaw_req - * Unfreeze/Thaw HPS IOs - */ -int sys_mgr_frzctrl_thaw_req(enum frz_channel_id channel_id) -{ - uint32_t reg, val; - void *sm = (void *)CYCLONE5_SYSMGR_ADDRESS; - - /* select software FSM */ - writel(SYSMGR_FRZCTRL_SRC_VIO1_ENUM_SW, sm + SYSMGR_FRZCTRL_SRC_ADDRESS); - - /* Freeze channel ID checking and base address */ - switch (channel_id) { - case FREEZE_CHANNEL_0: - case FREEZE_CHANNEL_1: - case FREEZE_CHANNEL_2: - reg = SYSMGR_FRZCTRL_VIOCTRL_ADDRESS + - (channel_id << SYSMGR_FRZCTRL_VIOCTRL_SHIFT); - - /* - * Assert active low bhniotri signal and - * de-assert active high csrdone - */ - val = readl(sm + reg); - val |= SYSMGR_FRZCTRL_VIOCTRL_BUSHOLD_MASK | - SYSMGR_FRZCTRL_VIOCTRL_CFG_MASK; - writel(val, sm + reg); - - /* - * Note: Delay for 20ns at min - * de-assert active low plniotri and niotri signals - */ - val = readl(sm + reg); - val |= SYSMGR_FRZCTRL_VIOCTRL_WKPULLUP_MASK | - SYSMGR_FRZCTRL_VIOCTRL_TRISTATE_MASK; - writel(val, sm + reg); - - /* - * Note: Delay for 20ns at min - * de-assert active low enrnsl signal - */ - val = readl(sm + reg); - val |= SYSMGR_FRZCTRL_VIOCTRL_SLEW_MASK; - writel(val, sm + reg); - - break; - - case FREEZE_CHANNEL_3: - /* de-assert active high reinit signal */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val &= ~SYSMGR_FRZCTRL_HIOCTRL_DLLRST_MASK; - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - /* - * Note: Delay for 40ns at min - * assert active high pllbiasen signals - */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val |= SYSMGR_FRZCTRL_HIOCTRL_OCT_CFGEN_CALSTART_MASK; - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - /* - * Delay 1000 intosc. intosc is based on eosc1 - * At 25MHz this would be 40us. Play safe, we have time... - */ - __udelay(1000); - - /* - * de-assert active low bhniotri signals, - * assert active high csrdone and nfrzdrv signal - */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val |= SYSMGR_FRZCTRL_HIOCTRL_BUSHOLD_MASK | - SYSMGR_FRZCTRL_HIOCTRL_CFG_MASK; - val &= ~SYSMGR_FRZCTRL_HIOCTRL_OCTRST_MASK; - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - /* Delay 33 intosc */ - __udelay(100); - - /* de-assert active low plniotri and niotri signals */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val |= SYSMGR_FRZCTRL_HIOCTRL_WKPULLUP_MASK | - SYSMGR_FRZCTRL_HIOCTRL_TRISTATE_MASK; - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - /* - * Note: Delay for 40ns at min - * de-assert active high frzreg signal - */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val &= ~SYSMGR_FRZCTRL_HIOCTRL_REGRST_MASK; - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - /* - * Note: Delay for 40ns at min - * de-assert active low enrnsl signal - */ - val = readl(sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val |= SYSMGR_FRZCTRL_HIOCTRL_SLEW_MASK; - writel(val, sm + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - - break; - default: - return -EINVAL; - } - - return 0; -} diff --git a/arch/arm/mach-socfpga/include/mach/clock-manager.h b/arch/arm/mach-socfpga/include/mach/clock-manager.h deleted file mode 100644 index 45800de79a..0000000000 --- a/arch/arm/mach-socfpga/include/mach/clock-manager.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef _CLOCK_MANAGER_H_ -#define _CLOCK_MANAGER_H_ - -struct socfpga_cm_config { - /* main group */ - uint32_t main_vco_base; - uint32_t mpuclk; - uint32_t mainclk; - uint32_t dbgatclk; - uint32_t mainqspiclk; - uint32_t mainnandsdmmcclk; - uint32_t cfg2fuser0clk; - uint32_t maindiv; - uint32_t dbgdiv; - uint32_t tracediv; - uint32_t l4src; - - /* peripheral group */ - uint32_t peri_vco_base; - uint32_t emac0clk; - uint32_t emac1clk; - uint32_t perqspiclk; - uint32_t pernandsdmmcclk; - uint32_t perbaseclk; - uint32_t s2fuser1clk; - uint32_t perdiv; - uint32_t gpiodiv; - uint32_t persrc; - - /* sdram pll group */ - uint32_t sdram_vco_base; - uint32_t ddrdqsclk; - uint32_t ddr2xdqsclk; - uint32_t ddrdqclk; - uint32_t s2fuser2clk; - - /* altera group */ - uint32_t alteragrp_mpu; - uint32_t alteregrp_main; -}; - -void socfpga_cm_basic_init(const struct socfpga_cm_config *cfg); - -#define CLKMGR_CTRL_ADDRESS 0x0 -#define CLKMGR_BYPASS_ADDRESS 0x4 -#define CLKMGR_INTER_ADDRESS 0x8 -#define CLKMGR_INTREN_ADDRESS 0xc -#define CLKMGR_DBCTRL_ADDRESS 0x10 -#define CLKMGR_STAT_ADDRESS 0x14 -#define CLKMGR_MAINPLLGRP_ADDRESS 0x40 -#define CLKMGR_MAINPLLGRP_VCO_ADDRESS 0x40 -#define CLKMGR_MAINPLLGRP_MISC_ADDRESS 0x44 -#define CLKMGR_MAINPLLGRP_MPUCLK_ADDRESS 0x48 -#define CLKMGR_MAINPLLGRP_MAINCLK_ADDRESS 0x4c -#define CLKMGR_MAINPLLGRP_DBGATCLK_ADDRESS 0x50 -#define CLKMGR_MAINPLLGRP_MAINQSPICLK_ADDRESS 0x54 -#define CLKMGR_MAINPLLGRP_MAINNANDSDMMCCLK_ADDRESS 0x58 -#define CLKMGR_MAINPLLGRP_CFGS2FUSER0CLK_ADDRESS 0x5c -#define CLKMGR_MAINPLLGRP_EN_ADDRESS 0x60 -#define CLKMGR_MAINPLLGRP_MAINDIV_ADDRESS 0x64 -#define CLKMGR_MAINPLLGRP_DBGDIV_ADDRESS 0x68 -#define CLKMGR_MAINPLLGRP_TRACEDIV_ADDRESS 0x6c -#define CLKMGR_MAINPLLGRP_L4SRC_ADDRESS 0x70 -#define CLKMGR_PERPLLGRP_ADDRESS 0x80 -#define CLKMGR_PERPLLGRP_VCO_ADDRESS 0x80 -#define CLKMGR_PERPLLGRP_MISC_ADDRESS 0x84 -#define CLKMGR_PERPLLGRP_EMAC0CLK_ADDRESS 0x88 -#define CLKMGR_PERPLLGRP_EMAC1CLK_ADDRESS 0x8c -#define CLKMGR_PERPLLGRP_PERQSPICLK_ADDRESS 0x90 -#define CLKMGR_PERPLLGRP_PERNANDSDMMCCLK_ADDRESS 0x94 -#define CLKMGR_PERPLLGRP_PERBASECLK_ADDRESS 0x98 -#define CLKMGR_PERPLLGRP_S2FUSER1CLK_ADDRESS 0x9c -#define CLKMGR_PERPLLGRP_EN_ADDRESS 0xa0 -#define CLKMGR_PERPLLGRP_DIV_ADDRESS 0xa4 -#define CLKMGR_PERPLLGRP_GPIODIV_ADDRESS 0xa8 -#define CLKMGR_PERPLLGRP_SRC_ADDRESS 0xac -#define CLKMGR_SDRPLLGRP_ADDRESS 0xc0 -#define CLKMGR_SDRPLLGRP_VCO_ADDRESS 0xc0 -#define CLKMGR_SDRPLLGRP_CTRL_ADDRESS 0xc4 -#define CLKMGR_SDRPLLGRP_DDRDQSCLK_ADDRESS 0xc8 -#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_ADDRESS 0xcc -#define CLKMGR_SDRPLLGRP_DDRDQCLK_ADDRESS 0xd0 -#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_ADDRESS 0xd4 -#define CLKMGR_SDRPLLGRP_EN_ADDRESS 0xd8 -#define CLKMGR_ALTERAGRP_MPUCLK 0xe0 -#define CLKMGR_ALTERAGRP_MAINCLK 0xe4 - -#define CLKMGR_DBCTRL_STAYOSC1_MASK 0x00000001 -#define CLKMGR_MAINPLLGRP_EN_S2FUSER0CLK_MASK 0x00000200 -#define CLKMGR_MAINPLLGRP_EN_DBGTIMERCLK_MASK 0x00000080 -#define CLKMGR_MAINPLLGRP_EN_DBGTRACECLK_MASK 0x00000040 -#define CLKMGR_MAINPLLGRP_EN_DBGCLK_MASK 0x00000020 -#define CLKMGR_MAINPLLGRP_EN_DBGATCLK_MASK 0x00000010 -#define CLKMGR_MAINPLLGRP_EN_L4MPCLK_MASK 0x00000004 -#define CLKMGR_MAINPLLGRP_VCO_RESET_VALUE 0x8001000d -#define CLKMGR_PERPLLGRP_VCO_RESET_VALUE 0x8001000d -#define CLKMGR_SDRPLLGRP_VCO_RESET_VALUE 0x8001000d -#define CLKMGR_MAINPLLGRP_MAINDIV_L4MPCLK_SET(x) (((x) << 4) & 0x00000070) -#define CLKMGR_MAINPLLGRP_MAINDIV_L4SPCLK_SET(x) (((x) << 7) & 0x00000380) -#define CLKMGR_MAINPLLGRP_L4SRC_L4MP_SET(x) (((x) << 0) & 0x00000001) -#define CLKMGR_MAINPLLGRP_L4SRC_L4SP_SET(x) (((x) << 1) & 0x00000002) -#define CLKMGR_PERPLLGRP_SRC_QSPI_SET(x) (((x) << 4) & 0x00000030) -#define CLKMGR_PERPLLGRP_SRC_NAND_SET(x) (((x) << 2) & 0x0000000c) -#define CLKMGR_PERPLLGRP_SRC_SDMMC_SET(x) (((x) << 0) & 0x00000003) -#define CLKMGR_MAINPLLGRP_VCO_DENOM_SET(x) (((x) << 16) & 0x003f0000) -#define CLKMGR_MAINPLLGRP_VCO_NUMER_SET(x) (((x) << 3) & 0x0000fff8) -#define CLKMGR_MAINPLLGRP_VCO_PWRDN_SET(x) (((x) << 2) & 0x00000004) -#define CLKMGR_MAINPLLGRP_VCO_EN_SET(x) (((x) << 1) & 0x00000002) -#define CLKMGR_MAINPLLGRP_VCO_BGPWRDN_SET(x) (((x) << 0) & 0x00000001) -#define CLKMGR_PERPLLGRP_VCO_PSRC_SET(x) (((x) << 22) & 0x00c00000) -#define CLKMGR_PERPLLGRP_VCO_DENOM_SET(x) (((x) << 16) & 0x003f0000) -#define CLKMGR_PERPLLGRP_VCO_NUMER_SET(x) (((x) << 3) & 0x0000fff8) -#define CLKMGR_SDRPLLGRP_VCO_OUTRESET_SET(x) (((x) << 25) & 0x7e000000) -#define CLKMGR_SDRPLLGRP_VCO_OUTRESETALL_SET(x) (((x) << 24) & 0x01000000) -#define CLKMGR_SDRPLLGRP_VCO_SSRC_SET(x) (((x) << 22) & 0x00c00000) -#define CLKMGR_SDRPLLGRP_VCO_DENOM_SET(x) (((x) << 16) & 0x003f0000) -#define CLKMGR_SDRPLLGRP_VCO_NUMER_SET(x) (((x) << 3) & 0x0000fff8) -#define CLKMGR_MAINPLLGRP_MPUCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_MAINPLLGRP_MAINCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_MAINPLLGRP_DBGATCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_MAINPLLGRP_CFGS2FUSER0CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_PERPLLGRP_EMAC0CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_PERPLLGRP_EMAC1CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_MAINPLLGRP_MAINQSPICLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_MAINPLLGRP_MAINNANDSDMMCCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_PERPLLGRP_PERBASECLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_PERPLLGRP_S2FUSER1CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_PERPLLGRP_PERNANDSDMMCCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_SDRPLLGRP_DDRDQSCLK_PHASE_SET(x) (((x) << 9) & 0x00000e00) -#define CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_PHASE_SET(x) (((x) << 9) & 0x00000e00) -#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_SDRPLLGRP_DDRDQCLK_PHASE_SET(x) (((x) << 9) & 0x00000e00) -#define CLKMGR_SDRPLLGRP_DDRDQCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_PHASE_SET(x) (((x) << 9) & 0x00000e00) -#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_MAINPLLGRP_DBGDIV_DBGCLK_SET(x) (((x) << 2) & 0x0000000c) -#define CLKMGR_MAINPLLGRP_DBGDIV_DBGATCLK_SET(x) (((x) << 0) & 0x00000003) -#define CLKMGR_MAINPLLGRP_TRACEDIV_TRACECLK_SET(x) (((x) << 0) & 0x00000007) -#define CLKMGR_MAINPLLGRP_MAINDIV_L3MPCLK_SET(x) (((x) << 0) & 0x00000003) -#define CLKMGR_MAINPLLGRP_MAINDIV_L3SPCLK_SET(x) (((x) << 2) & 0x0000000c) -#define CLKMGR_BYPASS_PERPLL_SET(x) (((x) << 3) & 0x00000008) -#define CLKMGR_BYPASS_SDRPLL_SET(x) (((x) << 1) & 0x00000002) -#define CLKMGR_BYPASS_MAINPLL_SET(x) (((x) << 0) & 0x00000001) -#define CLKMGR_PERPLLGRP_DIV_USBCLK_SET(x) (((x) << 0) & 0x00000007) -#define CLKMGR_PERPLLGRP_DIV_SPIMCLK_SET(x) (((x) << 3) & 0x00000038) -#define CLKMGR_PERPLLGRP_DIV_CAN0CLK_SET(x) (((x) << 6) & 0x000001c0) -#define CLKMGR_PERPLLGRP_DIV_CAN1CLK_SET(x) (((x) << 9) & 0x00000e00) -#define CLKMGR_INTER_SDRPLLLOCKED_MASK 0x00000100 -#define CLKMGR_INTER_PERPLLLOCKED_MASK 0x00000080 -#define CLKMGR_INTER_MAINPLLLOCKED_MASK 0x00000040 -#define CLKMGR_CTRL_SAFEMODE_MASK 0x00000001 -#define CLKMGR_CTRL_SAFEMODE_SET(x) (((x) << 0) & 0x00000001) -#define CLKMGR_SDRPLLGRP_VCO_OUTRESET_MASK 0x7e000000 -#define CLKMGR_SDRPLLGRP_VCO_OUTRESETALL_SET(x) (((x) << 24) & 0x01000000) -#define CLKMGR_PERPLLGRP_PERQSPICLK_CNT_SET(x) (((x) << 0) & 0x000001ff) -#define CLKMGR_PERPLLGRP_DIV_SPIMCLK_SET(x) (((x) << 3) & 0x00000038) -#define CLKMGR_PERPLLGRP_GPIODIV_GPIODBCLK_SET(x) (((x) << 0) & 0x00ffffff) -#define CLKMGR_BYPASS_PERPLLSRC_SET(x) (((x) << 4) & 0x00000010) -#define CLKMGR_BYPASS_SDRPLLSRC_SET(x) (((x) << 2) & 0x00000004) -#define CLKMGR_PERPLLGRP_SRC_RESET_VALUE 0x00000015 -#define CLKMGR_MAINPLLGRP_L4SRC_RESET_VALUE 0x00000000 -#define CLKMGR_MAINPLLGRP_VCO_REGEXTSEL_MASK 0x80000000 -#define CLKMGR_PERPLLGRP_VCO_REGEXTSEL_MASK 0x80000000 -#define CLKMGR_SDRPLLGRP_VCO_REGEXTSEL_MASK 0x80000000 -#define CLKMGR_SDRPLLGRP_DDRDQSCLK_PHASE_MASK 0x001ffe00 -#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_PHASE_MASK 0x001ffe00 -#define CLKMGR_SDRPLLGRP_DDRDQCLK_PHASE_MASK 0x001ffe00 -#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_PHASE_MASK 0x001ffe00 -#define CLKMGR_MAINPLLGRP_VCO_OUTRESETALL_MASK 0x01000000 -#define CLKMGR_PERPLLGRP_VCO_OUTRESETALL_MASK 0x01000000 -#define CLKMGR_PERPLLGRP_EN_NANDCLK_MASK 0x00000400 -#define CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_MASK 0x000001ff -#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_CNT_MASK 0x000001ff -#define CLKMGR_SDRPLLGRP_DDRDQCLK_CNT_MASK 0x000001ff -#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_CNT_MASK 0x000001ff - -#define CLEAR_BGP_EN_PWRDN \ - (CLKMGR_MAINPLLGRP_VCO_PWRDN_SET(0)| \ - CLKMGR_MAINPLLGRP_VCO_EN_SET(0)| \ - CLKMGR_MAINPLLGRP_VCO_BGPWRDN_SET(0)) - -#endif /* _CLOCK_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-clock-manager.h b/arch/arm/mach-socfpga/include/mach/cyclone5-clock-manager.h new file mode 100644 index 0000000000..797aa5d3cf --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-clock-manager.h @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _CLOCK_MANAGER_CYCLONE5_H_ +#define _CLOCK_MANAGER_CYCLONE5_H_ + +struct socfpga_cm_config { + /* main group */ + uint32_t main_vco_base; + uint32_t mpuclk; + uint32_t mainclk; + uint32_t dbgatclk; + uint32_t mainqspiclk; + uint32_t mainnandsdmmcclk; + uint32_t cfg2fuser0clk; + uint32_t maindiv; + uint32_t dbgdiv; + uint32_t tracediv; + uint32_t l4src; + + /* peripheral group */ + uint32_t peri_vco_base; + uint32_t emac0clk; + uint32_t emac1clk; + uint32_t perqspiclk; + uint32_t pernandsdmmcclk; + uint32_t perbaseclk; + uint32_t s2fuser1clk; + uint32_t perdiv; + uint32_t gpiodiv; + uint32_t persrc; + + /* sdram pll group */ + uint32_t sdram_vco_base; + uint32_t ddrdqsclk; + uint32_t ddr2xdqsclk; + uint32_t ddrdqclk; + uint32_t s2fuser2clk; + + /* altera group */ + uint32_t alteragrp_mpu; + uint32_t alteregrp_main; +}; + +void socfpga_cm_basic_init(const struct socfpga_cm_config *cfg); + +#define CLKMGR_CTRL_ADDRESS 0x0 +#define CLKMGR_BYPASS_ADDRESS 0x4 +#define CLKMGR_INTER_ADDRESS 0x8 +#define CLKMGR_INTREN_ADDRESS 0xc +#define CLKMGR_DBCTRL_ADDRESS 0x10 +#define CLKMGR_STAT_ADDRESS 0x14 +#define CLKMGR_MAINPLLGRP_ADDRESS 0x40 +#define CLKMGR_MAINPLLGRP_VCO_ADDRESS 0x40 +#define CLKMGR_MAINPLLGRP_MISC_ADDRESS 0x44 +#define CLKMGR_MAINPLLGRP_MPUCLK_ADDRESS 0x48 +#define CLKMGR_MAINPLLGRP_MAINCLK_ADDRESS 0x4c +#define CLKMGR_MAINPLLGRP_DBGATCLK_ADDRESS 0x50 +#define CLKMGR_MAINPLLGRP_MAINQSPICLK_ADDRESS 0x54 +#define CLKMGR_MAINPLLGRP_MAINNANDSDMMCCLK_ADDRESS 0x58 +#define CLKMGR_MAINPLLGRP_CFGS2FUSER0CLK_ADDRESS 0x5c +#define CLKMGR_MAINPLLGRP_EN_ADDRESS 0x60 +#define CLKMGR_MAINPLLGRP_MAINDIV_ADDRESS 0x64 +#define CLKMGR_MAINPLLGRP_DBGDIV_ADDRESS 0x68 +#define CLKMGR_MAINPLLGRP_TRACEDIV_ADDRESS 0x6c +#define CLKMGR_MAINPLLGRP_L4SRC_ADDRESS 0x70 +#define CLKMGR_PERPLLGRP_ADDRESS 0x80 +#define CLKMGR_PERPLLGRP_VCO_ADDRESS 0x80 +#define CLKMGR_PERPLLGRP_MISC_ADDRESS 0x84 +#define CLKMGR_PERPLLGRP_EMAC0CLK_ADDRESS 0x88 +#define CLKMGR_PERPLLGRP_EMAC1CLK_ADDRESS 0x8c +#define CLKMGR_PERPLLGRP_PERQSPICLK_ADDRESS 0x90 +#define CLKMGR_PERPLLGRP_PERNANDSDMMCCLK_ADDRESS 0x94 +#define CLKMGR_PERPLLGRP_PERBASECLK_ADDRESS 0x98 +#define CLKMGR_PERPLLGRP_S2FUSER1CLK_ADDRESS 0x9c +#define CLKMGR_PERPLLGRP_EN_ADDRESS 0xa0 +#define CLKMGR_PERPLLGRP_DIV_ADDRESS 0xa4 +#define CLKMGR_PERPLLGRP_GPIODIV_ADDRESS 0xa8 +#define CLKMGR_PERPLLGRP_SRC_ADDRESS 0xac +#define CLKMGR_SDRPLLGRP_ADDRESS 0xc0 +#define CLKMGR_SDRPLLGRP_VCO_ADDRESS 0xc0 +#define CLKMGR_SDRPLLGRP_CTRL_ADDRESS 0xc4 +#define CLKMGR_SDRPLLGRP_DDRDQSCLK_ADDRESS 0xc8 +#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_ADDRESS 0xcc +#define CLKMGR_SDRPLLGRP_DDRDQCLK_ADDRESS 0xd0 +#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_ADDRESS 0xd4 +#define CLKMGR_SDRPLLGRP_EN_ADDRESS 0xd8 +#define CLKMGR_ALTERAGRP_MPUCLK 0xe0 +#define CLKMGR_ALTERAGRP_MAINCLK 0xe4 + +#define CLKMGR_DBCTRL_STAYOSC1_MASK 0x00000001 +#define CLKMGR_MAINPLLGRP_EN_S2FUSER0CLK_MASK 0x00000200 +#define CLKMGR_MAINPLLGRP_EN_DBGTIMERCLK_MASK 0x00000080 +#define CLKMGR_MAINPLLGRP_EN_DBGTRACECLK_MASK 0x00000040 +#define CLKMGR_MAINPLLGRP_EN_DBGCLK_MASK 0x00000020 +#define CLKMGR_MAINPLLGRP_EN_DBGATCLK_MASK 0x00000010 +#define CLKMGR_MAINPLLGRP_EN_L4MPCLK_MASK 0x00000004 +#define CLKMGR_MAINPLLGRP_VCO_RESET_VALUE 0x8001000d +#define CLKMGR_PERPLLGRP_VCO_RESET_VALUE 0x8001000d +#define CLKMGR_SDRPLLGRP_VCO_RESET_VALUE 0x8001000d +#define CLKMGR_MAINPLLGRP_MAINDIV_L4MPCLK_SET(x) (((x) << 4) & 0x00000070) +#define CLKMGR_MAINPLLGRP_MAINDIV_L4SPCLK_SET(x) (((x) << 7) & 0x00000380) +#define CLKMGR_MAINPLLGRP_L4SRC_L4MP_SET(x) (((x) << 0) & 0x00000001) +#define CLKMGR_MAINPLLGRP_L4SRC_L4SP_SET(x) (((x) << 1) & 0x00000002) +#define CLKMGR_PERPLLGRP_SRC_QSPI_SET(x) (((x) << 4) & 0x00000030) +#define CLKMGR_PERPLLGRP_SRC_NAND_SET(x) (((x) << 2) & 0x0000000c) +#define CLKMGR_PERPLLGRP_SRC_SDMMC_SET(x) (((x) << 0) & 0x00000003) +#define CLKMGR_MAINPLLGRP_VCO_DENOM_SET(x) (((x) << 16) & 0x003f0000) +#define CLKMGR_MAINPLLGRP_VCO_NUMER_SET(x) (((x) << 3) & 0x0000fff8) +#define CLKMGR_MAINPLLGRP_VCO_PWRDN_SET(x) (((x) << 2) & 0x00000004) +#define CLKMGR_MAINPLLGRP_VCO_EN_SET(x) (((x) << 1) & 0x00000002) +#define CLKMGR_MAINPLLGRP_VCO_BGPWRDN_SET(x) (((x) << 0) & 0x00000001) +#define CLKMGR_PERPLLGRP_VCO_PSRC_SET(x) (((x) << 22) & 0x00c00000) +#define CLKMGR_PERPLLGRP_VCO_DENOM_SET(x) (((x) << 16) & 0x003f0000) +#define CLKMGR_PERPLLGRP_VCO_NUMER_SET(x) (((x) << 3) & 0x0000fff8) +#define CLKMGR_SDRPLLGRP_VCO_OUTRESET_SET(x) (((x) << 25) & 0x7e000000) +#define CLKMGR_SDRPLLGRP_VCO_OUTRESETALL_SET(x) (((x) << 24) & 0x01000000) +#define CLKMGR_SDRPLLGRP_VCO_SSRC_SET(x) (((x) << 22) & 0x00c00000) +#define CLKMGR_SDRPLLGRP_VCO_DENOM_SET(x) (((x) << 16) & 0x003f0000) +#define CLKMGR_SDRPLLGRP_VCO_NUMER_SET(x) (((x) << 3) & 0x0000fff8) +#define CLKMGR_MAINPLLGRP_MPUCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_MAINPLLGRP_MAINCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_MAINPLLGRP_DBGATCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_MAINPLLGRP_CFGS2FUSER0CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_PERPLLGRP_EMAC0CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_PERPLLGRP_EMAC1CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_MAINPLLGRP_MAINQSPICLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_MAINPLLGRP_MAINNANDSDMMCCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_PERPLLGRP_PERBASECLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_PERPLLGRP_S2FUSER1CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_PERPLLGRP_PERNANDSDMMCCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_SDRPLLGRP_DDRDQSCLK_PHASE_SET(x) (((x) << 9) & 0x00000e00) +#define CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_PHASE_SET(x) (((x) << 9) & 0x00000e00) +#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_SDRPLLGRP_DDRDQCLK_PHASE_SET(x) (((x) << 9) & 0x00000e00) +#define CLKMGR_SDRPLLGRP_DDRDQCLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_PHASE_SET(x) (((x) << 9) & 0x00000e00) +#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_MAINPLLGRP_DBGDIV_DBGCLK_SET(x) (((x) << 2) & 0x0000000c) +#define CLKMGR_MAINPLLGRP_DBGDIV_DBGATCLK_SET(x) (((x) << 0) & 0x00000003) +#define CLKMGR_MAINPLLGRP_TRACEDIV_TRACECLK_SET(x) (((x) << 0) & 0x00000007) +#define CLKMGR_MAINPLLGRP_MAINDIV_L3MPCLK_SET(x) (((x) << 0) & 0x00000003) +#define CLKMGR_MAINPLLGRP_MAINDIV_L3SPCLK_SET(x) (((x) << 2) & 0x0000000c) +#define CLKMGR_BYPASS_PERPLL_SET(x) (((x) << 3) & 0x00000008) +#define CLKMGR_BYPASS_SDRPLL_SET(x) (((x) << 1) & 0x00000002) +#define CLKMGR_BYPASS_MAINPLL_SET(x) (((x) << 0) & 0x00000001) +#define CLKMGR_PERPLLGRP_DIV_USBCLK_SET(x) (((x) << 0) & 0x00000007) +#define CLKMGR_PERPLLGRP_DIV_SPIMCLK_SET(x) (((x) << 3) & 0x00000038) +#define CLKMGR_PERPLLGRP_DIV_CAN0CLK_SET(x) (((x) << 6) & 0x000001c0) +#define CLKMGR_PERPLLGRP_DIV_CAN1CLK_SET(x) (((x) << 9) & 0x00000e00) +#define CLKMGR_INTER_SDRPLLLOCKED_MASK 0x00000100 +#define CLKMGR_INTER_PERPLLLOCKED_MASK 0x00000080 +#define CLKMGR_INTER_MAINPLLLOCKED_MASK 0x00000040 +#define CLKMGR_CTRL_SAFEMODE_MASK 0x00000001 +#define CLKMGR_CTRL_SAFEMODE_SET(x) (((x) << 0) & 0x00000001) +#define CLKMGR_SDRPLLGRP_VCO_OUTRESET_MASK 0x7e000000 +#define CLKMGR_SDRPLLGRP_VCO_OUTRESETALL_SET(x) (((x) << 24) & 0x01000000) +#define CLKMGR_PERPLLGRP_PERQSPICLK_CNT_SET(x) (((x) << 0) & 0x000001ff) +#define CLKMGR_PERPLLGRP_DIV_SPIMCLK_SET(x) (((x) << 3) & 0x00000038) +#define CLKMGR_PERPLLGRP_GPIODIV_GPIODBCLK_SET(x) (((x) << 0) & 0x00ffffff) +#define CLKMGR_BYPASS_PERPLLSRC_SET(x) (((x) << 4) & 0x00000010) +#define CLKMGR_BYPASS_SDRPLLSRC_SET(x) (((x) << 2) & 0x00000004) +#define CLKMGR_PERPLLGRP_SRC_RESET_VALUE 0x00000015 +#define CLKMGR_MAINPLLGRP_L4SRC_RESET_VALUE 0x00000000 +#define CLKMGR_MAINPLLGRP_VCO_REGEXTSEL_MASK 0x80000000 +#define CLKMGR_PERPLLGRP_VCO_REGEXTSEL_MASK 0x80000000 +#define CLKMGR_SDRPLLGRP_VCO_REGEXTSEL_MASK 0x80000000 +#define CLKMGR_SDRPLLGRP_DDRDQSCLK_PHASE_MASK 0x001ffe00 +#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_PHASE_MASK 0x001ffe00 +#define CLKMGR_SDRPLLGRP_DDRDQCLK_PHASE_MASK 0x001ffe00 +#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_PHASE_MASK 0x001ffe00 +#define CLKMGR_MAINPLLGRP_VCO_OUTRESETALL_MASK 0x01000000 +#define CLKMGR_PERPLLGRP_VCO_OUTRESETALL_MASK 0x01000000 +#define CLKMGR_PERPLLGRP_EN_NANDCLK_MASK 0x00000400 +#define CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_MASK 0x000001ff +#define CLKMGR_SDRPLLGRP_DDR2XDQSCLK_CNT_MASK 0x000001ff +#define CLKMGR_SDRPLLGRP_DDRDQCLK_CNT_MASK 0x000001ff +#define CLKMGR_SDRPLLGRP_S2FUSER2CLK_CNT_MASK 0x000001ff + +#define CLEAR_BGP_EN_PWRDN \ + (CLKMGR_MAINPLLGRP_VCO_PWRDN_SET(0)| \ + CLKMGR_MAINPLLGRP_VCO_EN_SET(0)| \ + CLKMGR_MAINPLLGRP_VCO_BGPWRDN_SET(0)) + +#endif diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-freeze-controller.h b/arch/arm/mach-socfpga/include/mach/cyclone5-freeze-controller.h new file mode 100644 index 0000000000..93ce5152ed --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-freeze-controller.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _CYCLONE5_FREEZE_CONTROLLER_H_ +#define _CYCLONE5_FREEZE_CONTROLLER_H_ + +#include + +#define SYSMGR_FRZCTRL_ADDRESS 0x40 +#define SYSMGR_FRZCTRL_VIOCTRL_ADDRESS 0x40 +#define SYSMGR_FRZCTRL_HIOCTRL_ADDRESS 0x50 +#define SYSMGR_FRZCTRL_SRC_ADDRESS 0x54 +#define SYSMGR_FRZCTRL_HWCTRL_ADDRESS 0x58 + +#define SYSMGR_FRZCTRL_SRC_VIO1_ENUM_SW 0x0 +#define SYSMGR_FRZCTRL_SRC_VIO1_ENUM_HW 0x1 +#define SYSMGR_FRZCTRL_VIOCTRL_SLEW_MASK 0x00000010 +#define SYSMGR_FRZCTRL_VIOCTRL_WKPULLUP_MASK 0x00000008 +#define SYSMGR_FRZCTRL_VIOCTRL_TRISTATE_MASK 0x00000004 +#define SYSMGR_FRZCTRL_VIOCTRL_BUSHOLD_MASK 0x00000002 +#define SYSMGR_FRZCTRL_VIOCTRL_CFG_MASK 0x00000001 +#define SYSMGR_FRZCTRL_HIOCTRL_SLEW_MASK 0x00000010 +#define SYSMGR_FRZCTRL_HIOCTRL_WKPULLUP_MASK 0x00000008 +#define SYSMGR_FRZCTRL_HIOCTRL_TRISTATE_MASK 0x00000004 +#define SYSMGR_FRZCTRL_HIOCTRL_BUSHOLD_MASK 0x00000002 +#define SYSMGR_FRZCTRL_HIOCTRL_CFG_MASK 0x00000001 +#define SYSMGR_FRZCTRL_HIOCTRL_REGRST_MASK 0x00000080 +#define SYSMGR_FRZCTRL_HIOCTRL_OCTRST_MASK 0x00000040 +#define SYSMGR_FRZCTRL_HIOCTRL_OCT_CFGEN_CALSTART_MASK 0x00000100 +#define SYSMGR_FRZCTRL_HIOCTRL_DLLRST_MASK 0x00000020 +#define SYSMGR_FRZCTRL_HWCTRL_VIO1REQ_MASK 0x00000001 +#define SYSMGR_FRZCTRL_HWCTRL_VIO1STATE_ENUM_FROZEN 0x2 +#define SYSMGR_FRZCTRL_HWCTRL_VIO1STATE_ENUM_THAWED 0x1 + +#define SYSMGR_FRZCTRL_HWCTRL_VIO1STATE_GET(x) (((x) & 0x00000006) >> 1) + +/* + * FreezeChannelSelect + * Definition of enum for freeze channel + */ +enum frz_channel_id { + FREEZE_CHANNEL_0 = 0, /* EMAC_IO & MIXED2_IO */ + FREEZE_CHANNEL_1, /* MIXED1_IO and FLASH_IO */ + FREEZE_CHANNEL_2, /* General IO */ + FREEZE_CHANNEL_3, /* DDR IO */ +}; + +/* Shift count needed to calculte for FRZCTRL VIO control register offset */ +#define SYSMGR_FRZCTRL_VIOCTRL_SHIFT (2) + +/* + * Freeze HPS IOs + * + * FreezeChannelSelect [in] - Freeze channel ID + * FreezeControllerFSMSelect [in] - To use hardware or software state machine + * If FREEZE_CONTROLLER_FSM_HW is selected for FSM select then the + * the freeze channel id is input is ignored. It is default to channel 1 + */ +int sys_mgr_frzctrl_freeze_req(enum frz_channel_id channel_id); + +/* + * Unfreeze/Thaw HPS IOs + * + * FreezeChannelSelect [in] - Freeze channel ID + * FreezeControllerFSMSelect [in] - To use hardware or software state machine + * If FREEZE_CONTROLLER_FSM_HW is selected for FSM select then the + * the freeze channel id is input is ignored. It is default to channel 1 + */ +int sys_mgr_frzctrl_thaw_req(enum frz_channel_id channel_id); + +#endif /* _FREEZE_CONTROLLER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-regs.h b/arch/arm/mach-socfpga/include/mach/cyclone5-regs.h new file mode 100644 index 0000000000..e88daf7189 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-regs.h @@ -0,0 +1,22 @@ +#ifndef __MACH_SOCFPGA_REGS_H +#define __MACH_SOCFPGA_REGS_H + +#define CYCLONE5_SDMMC_ADDRESS 0xff704000 +#define CYCLONE5_QSPI_CTRL_ADDRESS 0xff705000 +#define CYCLONE5_QSPI_DATA_ADDRESS 0xffa00000 +#define CYCLONE5_FPGAMGRREGS_ADDRESS 0xff706000 +#define CYCLONE5_GPIO0_BASE 0xff708000 +#define CYCLONE5_GPIO1_BASE 0xff709000 +#define CYCLONE5_GPIO2_BASE 0xff70A000 +#define CYCLONE5_L3REGS_ADDRESS 0xff800000 +#define CYCLONE5_FPGAMGRDATA_ADDRESS 0xffb90000 +#define CYCLONE5_UART0_ADDRESS 0xffc02000 +#define CYCLONE5_UART1_ADDRESS 0xffc03000 +#define CYCLONE5_SDR_ADDRESS 0xffc20000 +#define CYCLONE5_CLKMGR_ADDRESS 0xffd04000 +#define CYCLONE5_RSTMGR_ADDRESS 0xffd05000 +#define CYCLONE5_SYSMGR_ADDRESS 0xffd08000 +#define CYCLONE5_SCANMGR_ADDRESS 0xfff02000 +#define CYCLONE5_SMP_TWD_ADDRESS 0xfffec600 + +#endif /* __MACH_SOCFPGA_REGS_H */ diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-reset-manager.h b/arch/arm/mach-socfpga/include/mach/cyclone5-reset-manager.h new file mode 100644 index 0000000000..899401ce3c --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-reset-manager.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _RESET_MANAGER_H_ +#define _RESET_MANAGER_H_ + +#define RESET_MGR_STATUS_OFS 0x0 +#define RESET_MGR_CTRL_OFS 0x4 +#define RESET_MGR_COUNTS_OFS 0x8 +#define RESET_MGR_MPU_MOD_RESET_OFS 0x10 +#define RESET_MGR_PER_MOD_RESET_OFS 0x14 +#define RESET_MGR_PER2_MOD_RESET_OFS 0x18 +#define RESET_MGR_BRG_MOD_RESET_OFS 0x1c + +#define RSTMGR_CTRL_SWWARMRSTREQ_LSB 1 +#define RSTMGR_PERMODRST_OSC1TIMER0_LSB 8 + +#define RSTMGR_PERMODRST_EMAC0_LSB 0 +#define RSTMGR_PERMODRST_EMAC1_LSB 1 +#define RSTMGR_PERMODRST_L4WD0_LSB 6 +#define RSTMGR_PERMODRST_SDR_LSB 29 +#define RSTMGR_BRGMODRST_HPS2FPGA_MASK 0x00000001 +#define RSTMGR_BRGMODRST_LWHPS2FPGA_MASK 0x00000002 +#define RSTMGR_BRGMODRST_FPGA2HPS_MASK 0x00000004 + +/* Warm Reset mask */ +#define RSTMGR_STAT_L4WD1RST_MASK 0x00008000 +#define RSTMGR_STAT_L4WD0RST_MASK 0x00004000 +#define RSTMGR_STAT_MPUWD1RST_MASK 0x00002000 +#define RSTMGR_STAT_MPUWD0RST_MASK 0x00001000 +#define RSTMGR_STAT_SWWARMRST_MASK 0x00000400 +#define RSTMGR_STAT_FPGAWARMRST_MASK 0x00000200 +#define RSTMGR_STAT_NRSTPINRST_MASK 0x00000100 +#define RSTMGR_WARMRST_MASK 0x0000f700 + +#define RSTMGR_CTRL_SDRSELFREFEN_MASK 0x00000010 +#define RSTMGR_CTRL_FPGAHSEN_MASK 0x00010000 +#define RSTMGR_CTRL_ETRSTALLEN_MASK 0x00100000 + +#define RSTMGR_PERMODRST_EMAC0 (1 << 0) +#define RSTMGR_PERMODRST_EMAC1 (1 << 1) +#define RSTMGR_PERMODRST_USB0 (1 << 2) +#define RSTMGR_PERMODRST_USB1 (1 << 3) +#define RSTMGR_PERMODRST_NAND (1 << 4) +#define RSTMGR_PERMODRST_QSPI (1 << 5) +#define RSTMGR_PERMODRST_L4WD0 (1 << 6) +#define RSTMGR_PERMODRST_L4WD1 (1 << 7) +#define RSTMGR_PERMODRST_OSC1TIMER1 (1 << 9) +#define RSTMGR_PERMODRST_SPTIMER0 (1 << 10) +#define RSTMGR_PERMODRST_SPTIMER1 (1 << 11) +#define RSTMGR_PERMODRST_I2C0 (1 << 12) +#define RSTMGR_PERMODRST_I2C1 (1 << 13) +#define RSTMGR_PERMODRST_I2C2 (1 << 14) +#define RSTMGR_PERMODRST_I2C3 (1 << 15) +#define RSTMGR_PERMODRST_UART0 (1 << 16) +#define RSTMGR_PERMODRST_UART1 (1 << 17) +#define RSTMGR_PERMODRST_SPIM0 (1 << 18) +#define RSTMGR_PERMODRST_SPIM1 (1 << 19) +#define RSTMGR_PERMODRST_SPIS0 (1 << 20) +#define RSTMGR_PERMODRST_SPIS1 (1 << 21) +#define RSTMGR_PERMODRST_SDMMC (1 << 22) +#define RSTMGR_PERMODRST_CAN0 (1 << 23) +#define RSTMGR_PERMODRST_CAN1 (1 << 24) +#define RSTMGR_PERMODRST_GPIO0 (1 << 25) +#define RSTMGR_PERMODRST_GPIO1 (1 << 26) +#define RSTMGR_PERMODRST_GPIO2 (1 << 27) +#define RSTMGR_PERMODRST_DMA (1 << 28) +#define RSTMGR_PERMODRST_SDR (1 << 29) + +#define RSTMGR_PER2MODRST_DMAIF0 (1 << 0) +#define RSTMGR_PER2MODRST_DMAIF1 (1 << 1) +#define RSTMGR_PER2MODRST_DMAIF2 (1 << 2) +#define RSTMGR_PER2MODRST_DMAIF3 (1 << 3) +#define RSTMGR_PER2MODRST_DMAIF4 (1 << 4) +#define RSTMGR_PER2MODRST_DMAIF5 (1 << 5) +#define RSTMGR_PER2MODRST_DMAIF6 (1 << 6) +#define RSTMGR_PER2MODRST_DMAIF7 (1 << 7) + +#endif /* _RESET_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-scan-manager.h b/arch/arm/mach-socfpga/include/mach/cyclone5-scan-manager.h new file mode 100644 index 0000000000..df720a7e08 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-scan-manager.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _SCAN_MANAGER_H_ +#define _SCAN_MANAGER_H_ + +#include +#include + +/*********************************************************** + * * + * Cyclone5 specific stuff. Get rid of this. * + * * + ***********************************************************/ +#define CONFIG_HPS_IOCSR_SCANCHAIN0_LENGTH (764) +#define CONFIG_HPS_IOCSR_SCANCHAIN1_LENGTH (1719) +#define CONFIG_HPS_IOCSR_SCANCHAIN2_LENGTH (955) +#define CONFIG_HPS_IOCSR_SCANCHAIN3_LENGTH (16766) + +typedef unsigned long Scan_mgr_entry_t; + +#define NUM_OF_CHAINS (4) +#define SHIFT_COUNT_32BIT (5) +#define MASK_COUNT_32BIT (0x1F) + +#define SCANMGR_STAT_ADDRESS 0x0 +#define SCANMGR_EN_ADDRESS 0x4 +#define SCANMGR_FIFOSINGLEBYTE_ADDRESS 0x10 +#define SCANMGR_FIFODOUBLEBYTE_ADDRESS 0x14 +#define SCANMGR_FIFOQUADBYTE_ADDRESS 0x1c + +#define SCANMGR_STAT_ACTIVE_GET(x) (((x) & 0x80000000) >> 31) +#define SCANMGR_STAT_WFIFOCNT_GET(x) (((x) & 0x70000000) >> 28) + +enum io_scan_chain { + IO_SCAN_CHAIN_0 = 0, /* EMAC_IO and MIXED2_IO */ + IO_SCAN_CHAIN_1, /* MIXED1_IO and FLASH_IO */ + IO_SCAN_CHAIN_2, /* General IO */ + IO_SCAN_CHAIN_3, /* DDR IO */ + IO_SCAN_CHAIN_UNDEFINED +}; + +#define IO_SCAN_CHAIN_NUM NUM_OF_CHAINS +/* Maximum number of IO scan chains */ + +#define IO_SCAN_CHAIN_128BIT_SHIFT (7) +/* + * Shift count to get number of IO scan chain data in granularity + * of 128-bit ( N / 128 ) + */ + +#define IO_SCAN_CHAIN_128BIT_MASK (0x7F) +/* + * Mask to get residual IO scan chain data in + * granularity of 128-bit ( N mod 128 ) + */ + +#define IO_SCAN_CHAIN_32BIT_SHIFT SHIFT_COUNT_32BIT +/* + * Shift count to get number of IO scan chain + * data in granularity of 32-bit ( N / 32 ) + */ + +#define IO_SCAN_CHAIN_32BIT_MASK MASK_COUNT_32BIT +/* + * Mask to get residual IO scan chain data in + * granularity of 32-bit ( N mod 32 ) + */ + +#define IO_SCAN_CHAIN_BYTE_MASK (0xFF) +/* Byte mask */ + +#define IO_SCAN_CHAIN_PAYLOAD_24BIT (24) +/* 24-bits (3 bytes) IO scan chain payload definition */ + +#define TDI_TDO_MAX_PAYLOAD (127) +/* + * Maximum length of TDI_TDO packet payload is 128 bits, + * represented by (length - 1) in TDI_TDO header + */ + +#define TDI_TDO_HEADER_FIRST_BYTE (0x80) +/* TDI_TDO packet header for IO scan chain program */ + +#define TDI_TDO_HEADER_SECOND_BYTE_SHIFT (8) +/* Position of second command byte for TDI_TDO packet */ + +#define MAX_WAITING_DELAY_IO_SCAN_ENGINE (100) +/* + * Maximum polling loop to wait for IO scan chain engine + * becomes idle to prevent infinite loop + */ + +/* + * scan_mgr_io_scan_chain_prg + * + * Program HPS IO Scan Chain + * + * io_scan_chain_id @ref IOScanChainSelect [in] - IO scan chain ID with + * range of enumIOScanChainSelect * + * io_scan_chain_len_in_bits uint32_t [in] - IO scan chain length in bits + * *iocsr_scan_chain @ref Scan_mgr_entry_t [in] - IO scan chain table + */ +int scan_mgr_io_scan_chain_prg(enum io_scan_chain io_scan_chain_id, + uint32_t io_scan_chain_len_in_bits, + const unsigned long *iocsr_scan_chain); + +struct socfpga_io_config { + unsigned long *pinmux; + unsigned int num_pin; + const unsigned long *iocsr_emac_mixed2; + const unsigned long *iocsr_mixed1_flash; + const unsigned long *iocsr_general; + const unsigned long *iocsr_ddr; +}; + +#endif /* _SCAN_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-sdram-config.h b/arch/arm/mach-socfpga/include/mach/cyclone5-sdram-config.h new file mode 100644 index 0000000000..a19a837994 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-sdram-config.h @@ -0,0 +1,161 @@ +#ifndef __MACH_SDRAM_CONFIG_H +#define __MACH_SDRAM_CONFIG_H + +#include +#include +#include + +static inline void sdram_write(unsigned register_offset, unsigned val) +{ + debug("0x%08x Data 0x%08x\n", + (CYCLONE5_SDR_ADDRESS + register_offset), val); + /* Write to register */ + writel(val, (CYCLONE5_SDR_ADDRESS + register_offset)); +} + +static inline void socfpga_sdram_mmr_init(void) +{ + uint32_t val; + + val = CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMTYPE << SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB | + CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMBL << SDR_CTRLGRP_CTRLCFG_MEMBL_LSB | + CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER << SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB | + CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCEN << SDR_CTRLGRP_CTRLCFG_ECCEN_LSB | + CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCCORREN << SDR_CTRLGRP_CTRLCFG_ECCCORREN_LSB | + CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_REORDEREN << SDR_CTRLGRP_CTRLCFG_REORDEREN_LSB | + CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_STARVELIMIT << SDR_CTRLGRP_CTRLCFG_STARVELIMIT_LSB | + CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_DQSTRKEN << SDR_CTRLGRP_CTRLCFG_DQSTRKEN_LSB | + CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_NODMPINS << SDR_CTRLGRP_CTRLCFG_NODMPINS_LSB; + sdram_write(SDR_CTRLGRP_CTRLCFG_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCWL << SDR_CTRLGRP_DRAMTIMING1_TCWL_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_AL << SDR_CTRLGRP_DRAMTIMING1_TAL_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCL << SDR_CTRLGRP_DRAMTIMING1_TCL_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD << SDR_CTRLGRP_DRAMTIMING1_TRRD_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW << SDR_CTRLGRP_DRAMTIMING1_TFAW_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC << SDR_CTRLGRP_DRAMTIMING1_TRFC_LSB; + sdram_write(SDR_CTRLGRP_DRAMTIMING1_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI << SDR_CTRLGRP_DRAMTIMING2_TREFI_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD << SDR_CTRLGRP_DRAMTIMING2_TRCD_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP << SDR_CTRLGRP_DRAMTIMING2_TRP_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR << SDR_CTRLGRP_DRAMTIMING2_TWR_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR << SDR_CTRLGRP_DRAMTIMING2_TWTR_LSB; + sdram_write(SDR_CTRLGRP_DRAMTIMING2_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP << SDR_CTRLGRP_DRAMTIMING3_TRTP_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS << SDR_CTRLGRP_DRAMTIMING3_TRAS_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC << SDR_CTRLGRP_DRAMTIMING3_TRC_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TMRD << SDR_CTRLGRP_DRAMTIMING3_TMRD_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TCCD << SDR_CTRLGRP_DRAMTIMING3_TCCD_LSB; + sdram_write(SDR_CTRLGRP_DRAMTIMING3_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_SELFRFSHEXIT << SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_PWRDOWNEXIT << SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_LSB; + sdram_write(SDR_CTRLGRP_DRAMTIMING4_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_AUTOPDCYCLES << SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_LSB; + sdram_write(SDR_CTRLGRP_LOWPWRTIMING_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS << SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS << SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS << SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS << SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB; + sdram_write(SDR_CTRLGRP_DRAMADDRW_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMIFWIDTH_IFWIDTH << SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_LSB; + sdram_write(SDR_CTRLGRP_DRAMIFWIDTH_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMDEVWIDTH_DEVWIDTH << SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_LSB; + sdram_write(SDR_CTRLGRP_DRAMDEVWIDTH_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMINTR_INTREN << SDR_CTRLGRP_DRAMINTR_INTREN_LSB; + sdram_write(SDR_CTRLGRP_DRAMINTR_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_STATICCFG_MEMBL << SDR_CTRLGRP_STATICCFG_MEMBL_LSB | + CONFIG_HPS_SDR_CTRLCFG_STATICCFG_USEECCASDATA << SDR_CTRLGRP_STATICCFG_USEECCASDATA_LSB; + sdram_write(SDR_CTRLGRP_STATICCFG_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_CTRLWIDTH_CTRLWIDTH << SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_LSB; + sdram_write(SDR_CTRLGRP_CTRLWIDTH_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_PORTCFG_AUTOPCHEN << SDR_CTRLGRP_PORTCFG_AUTOPCHEN_LSB; + sdram_write(SDR_CTRLGRP_PORTCFG_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_SYNCMODE << SDR_CTRLGRP_FIFOCFG_SYNCMODE_LSB | + CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_INCSYNC << SDR_CTRLGRP_FIFOCFG_INCSYNC_LSB; + sdram_write(SDR_CTRLGRP_FIFOCFG_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPPRIORITY_USERPRIORITY << SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_LSB; + sdram_write(SDR_CTRLGRP_MPPRIORITY_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_0_STATICWEIGHT_31_0 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_LSB; + sdram_write(SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_STATICWEIGHT_49_32 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_LSB | + CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_SUMOFWEIGHT_13_0 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_LSB; + sdram_write(SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_2_SUMOFWEIGHT_45_14 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_LSB; + sdram_write(SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_3_SUMOFWEIGHT_63_46 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_LSB; + sdram_write(SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPPACING_0_THRESHOLD1_31_0 << SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_LSB; + sdram_write(SDR_CTRLGRP_MPPACING_MPPACING_0_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD1_59_32 << SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_LSB | + CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0 << + SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_LSB; + sdram_write(SDR_CTRLGRP_MPPACING_MPPACING_1_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4 << SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_LSB; + sdram_write(SDR_CTRLGRP_MPPACING_MPPACING_2_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36 << SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_LSB; + sdram_write(SDR_CTRLGRP_MPPACING_MPPACING_3_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0 << + SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB; + sdram_write(SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32 << + SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB; + sdram_write(SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64 << + SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB; + sdram_write(SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_PHYCTRL_PHYCTRL_0; + sdram_write(SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_CPORTWIDTH_CPORTWIDTH << SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_LSB; + sdram_write(SDR_CTRLGRP_CPORTWIDTH_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_CPORTWMAP_CPORTWMAP << SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_LSB; + sdram_write(SDR_CTRLGRP_CPORTWMAP_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_CPORTRMAP_CPORTRMAP << SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_LSB; + sdram_write(SDR_CTRLGRP_CPORTRMAP_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_RFIFOCMAP_RFIFOCMAP << SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_LSB; + sdram_write(SDR_CTRLGRP_RFIFOCMAP_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_WFIFOCMAP_WFIFOCMAP << SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_LSB; + sdram_write(SDR_CTRLGRP_WFIFOCMAP_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_CPORTRDWR_CPORTRDWR << SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_LSB; + sdram_write(SDR_CTRLGRP_CPORTRDWR_ADDRESS, val); + + val = CONFIG_HPS_SDR_CTRLCFG_DRAMODT_READ << SDR_CTRLGRP_DRAMODT_READ_LSB | + CONFIG_HPS_SDR_CTRLCFG_DRAMODT_WRITE << SDR_CTRLGRP_DRAMODT_WRITE_LSB; + sdram_write(SDR_CTRLGRP_DRAMODT_ADDRESS, val); + + val = readl(CYCLONE5_SDR_ADDRESS + SDR_CTRLGRP_STATICCFG_ADDRESS); + val &= ~(SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK); + val |= 1 << SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB; + writel(val, (CYCLONE5_SDR_ADDRESS + SDR_CTRLGRP_STATICCFG_ADDRESS)); +} +#endif /* __MACH_SDRAM_CONFIG_H */ diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-sdram.h b/arch/arm/mach-socfpga/include/mach/cyclone5-sdram.h new file mode 100644 index 0000000000..ebd331e83e --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-sdram.h @@ -0,0 +1,399 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _SDRAM_H_ +#define _SDRAM_H_ + +/* Group: sdr.phygrp.sccgrp */ +#define SDR_PHYGRP_SCCGRP_ADDRESS 0x0 +/* Group: sdr.phygrp.phymgrgrp */ +#define SDR_PHYGRP_PHYMGRGRP_ADDRESS 0x1000 +/* Group: sdr.phygrp.rwmgrgrp */ +#define SDR_PHYGRP_RWMGRGRP_ADDRESS 0x2000 +/* Group: sdr.phygrp.datamgrgrp */ +#define SDR_PHYGRP_DATAMGRGRP_ADDRESS 0x4000 +/* Group: sdr.phygrp.regfilegrp */ +#define SDR_PHYGRP_REGFILEGRP_ADDRESS 0x4800 +/* Group: sdr.ctrlgrp */ +#define SDR_CTRLGRP_ADDRESS 0x5000 +/* Register: sdr.ctrlgrp.ctrlcfg */ +#define SDR_CTRLGRP_CTRLCFG_ADDRESS 0x5000 +/* Register: sdr.ctrlgrp.dramtiming1 */ +#define SDR_CTRLGRP_DRAMTIMING1_ADDRESS 0x5004 +/* Register: sdr.ctrlgrp.dramtiming2 */ +#define SDR_CTRLGRP_DRAMTIMING2_ADDRESS 0x5008 +/* Register: sdr.ctrlgrp.dramtiming3 */ +#define SDR_CTRLGRP_DRAMTIMING3_ADDRESS 0x500c +/* Register: sdr.ctrlgrp.dramtiming4 */ +#define SDR_CTRLGRP_DRAMTIMING4_ADDRESS 0x5010 +/* Register: sdr.ctrlgrp.lowpwrtiming */ +#define SDR_CTRLGRP_LOWPWRTIMING_ADDRESS 0x5014 +/* Register: sdr.ctrlgrp.dramodt */ +#define SDR_CTRLGRP_DRAMODT_ADDRESS 0x5018 +/* Register: sdr.ctrlgrp.dramaddrw */ +#define SDR_CTRLGRP_DRAMADDRW_ADDRESS 0x502c +/* Register: sdr.ctrlgrp.dramifwidth */ +#define SDR_CTRLGRP_DRAMIFWIDTH_ADDRESS 0x5030 +/* Register: sdr.ctrlgrp.dramdevwidth */ +#define SDR_CTRLGRP_DRAMDEVWIDTH_ADDRESS 0x5034 +/* Register: sdr.ctrlgrp.dramsts */ +#define SDR_CTRLGRP_DRAMSTS_ADDRESS 0x5038 +/* Register: sdr.ctrlgrp.dramintr */ +#define SDR_CTRLGRP_DRAMINTR_ADDRESS 0x503c +/* Register: sdr.ctrlgrp.sbecount */ +#define SDR_CTRLGRP_SBECOUNT_ADDRESS 0x5040 +/* Register: sdr.ctrlgrp.dbecount */ +#define SDR_CTRLGRP_DBECOUNT_ADDRESS 0x5044 +/* Register: sdr.ctrlgrp.erraddr */ +#define SDR_CTRLGRP_ERRADDR_ADDRESS 0x5048 +/* Register: sdr.ctrlgrp.dropcount */ +#define SDR_CTRLGRP_DROPCOUNT_ADDRESS 0x504c +/* Register: sdr.ctrlgrp.dropaddr */ +#define SDR_CTRLGRP_DROPADDR_ADDRESS 0x5050 +/* Register: sdr.ctrlgrp.staticcfg */ +#define SDR_CTRLGRP_STATICCFG_ADDRESS 0x505c +/* Register: sdr.ctrlgrp.ctrlwidth */ +#define SDR_CTRLGRP_CTRLWIDTH_ADDRESS 0x5060 +/* Register: sdr.ctrlgrp.cportwidth */ +#define SDR_CTRLGRP_CPORTWIDTH_ADDRESS 0x5064 +/* Register: sdr.ctrlgrp.cportwmap */ +#define SDR_CTRLGRP_CPORTWMAP_ADDRESS 0x5068 +/* Register: sdr.ctrlgrp.cportrmap */ +#define SDR_CTRLGRP_CPORTRMAP_ADDRESS 0x506c +/* Register: sdr.ctrlgrp.rfifocmap */ +#define SDR_CTRLGRP_RFIFOCMAP_ADDRESS 0x5070 +/* Register: sdr.ctrlgrp.wfifocmap */ +#define SDR_CTRLGRP_WFIFOCMAP_ADDRESS 0x5074 +/* Register: sdr.ctrlgrp.cportrdwr */ +#define SDR_CTRLGRP_CPORTRDWR_ADDRESS 0x5078 +/* Register: sdr.ctrlgrp.portcfg */ +#define SDR_CTRLGRP_PORTCFG_ADDRESS 0x507c +/* Register: sdr.ctrlgrp.fpgaportrst */ +#define SDR_CTRLGRP_FPGAPORTRST_ADDRESS 0x5080 +/* Register: sdr.ctrlgrp.fifocfg */ +#define SDR_CTRLGRP_FIFOCFG_ADDRESS 0x5088 +/* Register: sdr.ctrlgrp.mppriority */ +#define SDR_CTRLGRP_MPPRIORITY_ADDRESS 0x50ac +/* Wide Register: sdr.ctrlgrp.mpweight */ +#define SDR_CTRLGRP_MPWEIGHT_ADDRESS 0x50b0 +/* Register: sdr.ctrlgrp.mpweight.mpweight_0 */ +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_ADDRESS 0x50b0 +/* Register: sdr.ctrlgrp.mpweight.mpweight_1 */ +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_ADDRESS 0x50b4 +/* Register: sdr.ctrlgrp.mpweight.mpweight_2 */ +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_ADDRESS 0x50b8 +/* Register: sdr.ctrlgrp.mpweight.mpweight_3 */ +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_ADDRESS 0x50bc +/* Register: sdr.ctrlgrp.mppacing.mppacing_0 */ +#define SDR_CTRLGRP_MPPACING_MPPACING_0_ADDRESS 0x50c0 +/* Register: sdr.ctrlgrp.mppacing.mppacing_1 */ +#define SDR_CTRLGRP_MPPACING_MPPACING_1_ADDRESS 0x50c4 +/* Register: sdr.ctrlgrp.mppacing.mppacing_2 */ +#define SDR_CTRLGRP_MPPACING_MPPACING_2_ADDRESS 0x50c8 +/* Register: sdr.ctrlgrp.mppacing.mppacing_3 */ +#define SDR_CTRLGRP_MPPACING_MPPACING_3_ADDRESS 0x50cc +/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_0 */ +#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_ADDRESS 0x50d0 +/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_1 */ +#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_ADDRESS 0x50d4 +/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_2 */ +#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_ADDRESS 0x50d8 +/* Wide Register: sdr.ctrlgrp.phyctrl */ +#define SDR_CTRLGRP_PHYCTRL_ADDRESS 0x5150 +/* Register: sdr.ctrlgrp.phyctrl.phyctrl_0 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDRESS 0x5150 +/* Register: sdr.ctrlgrp.phyctrl.phyctrl_1 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_ADDRESS 0x5154 +/* Register: sdr.ctrlgrp.phyctrl.phyctrl_2 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_ADDRESS 0x5158 +/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_0 */ +/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_0 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_OFFSET 0x150 +/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_1 */ +/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_1 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_OFFSET 0x154 +/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_2 */ +/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_2 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_OFFSET 0x158 + +/* Register template: sdr::ctrlgrp::ctrlcfg */ +#define SDR_CTRLGRP_CTRLCFG_OUTPUTREG_LSB 26 +#define SDR_CTRLGRP_CTRLCFG_OUTPUTREG_MASK 0x04000000 +#define SDR_CTRLGRP_CTRLCFG_BURSTTERMEN_LSB 25 +#define SDR_CTRLGRP_CTRLCFG_BURSTTERMEN_MASK 0x02000000 +#define SDR_CTRLGRP_CTRLCFG_BURSTINTREN_LSB 24 +#define SDR_CTRLGRP_CTRLCFG_BURSTINTREN_MASK 0x01000000 +#define SDR_CTRLGRP_CTRLCFG_NODMPINS_LSB 23 +#define SDR_CTRLGRP_CTRLCFG_NODMPINS_MASK 0x00800000 +#define SDR_CTRLGRP_CTRLCFG_DQSTRKEN_LSB 22 +#define SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK 0x00400000 +#define SDR_CTRLGRP_CTRLCFG_STARVELIMIT_LSB 16 +#define SDR_CTRLGRP_CTRLCFG_STARVELIMIT_MASK 0x003f0000 +#define SDR_CTRLGRP_CTRLCFG_REORDEREN_LSB 15 +#define SDR_CTRLGRP_CTRLCFG_REORDEREN_MASK 0x00008000 +#define SDR_CTRLGRP_CTRLCFG_GENDBE_LSB 14 +#define SDR_CTRLGRP_CTRLCFG_GENDBE_MASK 0x00004000 +#define SDR_CTRLGRP_CTRLCFG_GENSBE_LSB 13 +#define SDR_CTRLGRP_CTRLCFG_GENSBE_MASK 0x00002000 +#define SDR_CTRLGRP_CTRLCFG_CFG_ENABLE_ECC_CODE_OVERWRITES_LSB 12 +#define SDR_CTRLGRP_CTRLCFG_CFG_ENABLE_ECC_CODE_OVERWRITES_MASK 0x00001000 +#define SDR_CTRLGRP_CTRLCFG_ECCCORREN_LSB 11 +#define SDR_CTRLGRP_CTRLCFG_ECCCORREN_MASK 0x00000800 +#define SDR_CTRLGRP_CTRLCFG_ECCEN_LSB 10 +#define SDR_CTRLGRP_CTRLCFG_ECCEN_MASK 0x00000400 +#define SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB 8 +#define SDR_CTRLGRP_CTRLCFG_ADDRORDER_MASK 0x00000300 +#define SDR_CTRLGRP_CTRLCFG_MEMBL_LSB 3 +#define SDR_CTRLGRP_CTRLCFG_MEMBL_MASK 0x000000f8 +#define SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB 0 +#define SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK 0x00000007 +/* Register template: sdr::ctrlgrp::dramtiming1 */ +#define SDR_CTRLGRP_DRAMTIMING1_TRFC_LSB 24 +#define SDR_CTRLGRP_DRAMTIMING1_TRFC_MASK 0xff000000 +#define SDR_CTRLGRP_DRAMTIMING1_TFAW_LSB 18 +#define SDR_CTRLGRP_DRAMTIMING1_TFAW_MASK 0x00fc0000 +#define SDR_CTRLGRP_DRAMTIMING1_TRRD_LSB 14 +#define SDR_CTRLGRP_DRAMTIMING1_TRRD_MASK 0x0003c000 +#define SDR_CTRLGRP_DRAMTIMING1_TCL_LSB 9 +#define SDR_CTRLGRP_DRAMTIMING1_TCL_MASK 0x00003e00 +#define SDR_CTRLGRP_DRAMTIMING1_TAL_LSB 4 +#define SDR_CTRLGRP_DRAMTIMING1_TAL_MASK 0x000001f0 +#define SDR_CTRLGRP_DRAMTIMING1_TCWL_LSB 0 +#define SDR_CTRLGRP_DRAMTIMING1_TCWL_MASK 0x0000000f +/* Register template: sdr::ctrlgrp::dramtiming2 */ +#define SDR_CTRLGRP_DRAMTIMING2_TWTR_LSB 25 +#define SDR_CTRLGRP_DRAMTIMING2_TWTR_MASK 0x1e000000 +#define SDR_CTRLGRP_DRAMTIMING2_TWR_LSB 21 +#define SDR_CTRLGRP_DRAMTIMING2_TWR_MASK 0x01e00000 +#define SDR_CTRLGRP_DRAMTIMING2_TRP_LSB 17 +#define SDR_CTRLGRP_DRAMTIMING2_TRP_MASK 0x001e0000 +#define SDR_CTRLGRP_DRAMTIMING2_TRCD_LSB 13 +#define SDR_CTRLGRP_DRAMTIMING2_TRCD_MASK 0x0001e000 +#define SDR_CTRLGRP_DRAMTIMING2_TREFI_LSB 0 +#define SDR_CTRLGRP_DRAMTIMING2_TREFI_MASK 0x00001fff +/* Register template: sdr::ctrlgrp::dramtiming3 */ +#define SDR_CTRLGRP_DRAMTIMING3_TCCD_LSB 19 +#define SDR_CTRLGRP_DRAMTIMING3_TCCD_MASK 0x00780000 +#define SDR_CTRLGRP_DRAMTIMING3_TMRD_LSB 15 +#define SDR_CTRLGRP_DRAMTIMING3_TMRD_MASK 0x00078000 +#define SDR_CTRLGRP_DRAMTIMING3_TRC_LSB 9 +#define SDR_CTRLGRP_DRAMTIMING3_TRC_MASK 0x00007e00 +#define SDR_CTRLGRP_DRAMTIMING3_TRAS_LSB 4 +#define SDR_CTRLGRP_DRAMTIMING3_TRAS_MASK 0x000001f0 +#define SDR_CTRLGRP_DRAMTIMING3_TRTP_LSB 0 +#define SDR_CTRLGRP_DRAMTIMING3_TRTP_MASK 0x0000000f +/* Register template: sdr::ctrlgrp::dramtiming4 */ +#define SDR_CTRLGRP_DRAMTIMING4_MINPWRSAVECYCLES_LSB 20 +#define SDR_CTRLGRP_DRAMTIMING4_MINPWRSAVECYCLES_MASK 0x00f00000 +#define SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_LSB 10 +#define SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_MASK 0x000ffc00 +#define SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_LSB 0 +#define SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_MASK 0x000003ff +/* Register template: sdr::ctrlgrp::lowpwrtiming */ +#define SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_LSB 16 +#define SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_MASK 0x000f0000 +#define SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_LSB 0 +#define SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_MASK 0x0000ffff +/* Register template: sdr::ctrlgrp::dramaddrw */ +#define SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB 13 +#define SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK 0x0000e000 +#define SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB 10 +#define SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK 0x00001c00 +#define SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB 5 +#define SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK 0x000003e0 +#define SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB 0 +#define SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK 0x0000001f +/* Register template: sdr::ctrlgrp::dramifwidth */ +#define SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_LSB 0 +#define SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_MASK 0x000000ff +/* Register template: sdr::ctrlgrp::dramdevwidth */ +#define SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_LSB 0 +#define SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_MASK 0x0000000f +/* Register template: sdr::ctrlgrp::dramintr */ +#define SDR_CTRLGRP_DRAMINTR_INTRCLR_LSB 4 +#define SDR_CTRLGRP_DRAMINTR_INTRCLR_MASK 0x00000010 +#define SDR_CTRLGRP_DRAMINTR_CORRDROPMASK_LSB 3 +#define SDR_CTRLGRP_DRAMINTR_CORRDROPMASK_MASK 0x00000008 +#define SDR_CTRLGRP_DRAMINTR_DBEMASK_LSB 2 +#define SDR_CTRLGRP_DRAMINTR_DBEMASK_MASK 0x00000004 +#define SDR_CTRLGRP_DRAMINTR_SBEMASK_LSB 1 +#define SDR_CTRLGRP_DRAMINTR_SBEMASK_MASK 0x00000002 +#define SDR_CTRLGRP_DRAMINTR_INTREN_LSB 0 +#define SDR_CTRLGRP_DRAMINTR_INTREN_MASK 0x00000001 +/* Register template: sdr::ctrlgrp::sbecount */ +#define SDR_CTRLGRP_SBECOUNT_COUNT_LSB 0 +#define SDR_CTRLGRP_SBECOUNT_COUNT_MASK 0x000000ff +/* Register template: sdr::ctrlgrp::dbecount */ +#define SDR_CTRLGRP_DBECOUNT_COUNT_LSB 0 +#define SDR_CTRLGRP_DBECOUNT_COUNT_MASK 0x000000ff +/* Register template: sdr::ctrlgrp::staticcfg */ +#define SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB 3 +#define SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK 0x00000008 +#define SDR_CTRLGRP_STATICCFG_USEECCASDATA_LSB 2 +#define SDR_CTRLGRP_STATICCFG_USEECCASDATA_MASK 0x00000004 +#define SDR_CTRLGRP_STATICCFG_MEMBL_LSB 0 +#define SDR_CTRLGRP_STATICCFG_MEMBL_MASK 0x00000003 +/* Register template: sdr::ctrlgrp::ctrlwidth */ +#define SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_LSB 0 +#define SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_MASK 0x00000003 +/* Register template: sdr::ctrlgrp::cportwidth */ +#define SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_LSB 0 +#define SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_MASK 0x000fffff +/* Register template: sdr::ctrlgrp::cportwmap */ +#define SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_LSB 0 +#define SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_MASK 0x3fffffff +/* Register template: sdr::ctrlgrp::cportrmap */ +#define SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_LSB 0 +#define SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_MASK 0x3fffffff +/* Register template: sdr::ctrlgrp::rfifocmap */ +#define SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_LSB 0 +#define SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_MASK 0x00ffffff +/* Register template: sdr::ctrlgrp::wfifocmap */ +#define SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_LSB 0 +#define SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_MASK 0x00ffffff +/* Register template: sdr::ctrlgrp::cportrdwr */ +#define SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_LSB 0 +#define SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_MASK 0x000fffff +/* Register template: sdr::ctrlgrp::portcfg */ +#define SDR_CTRLGRP_PORTCFG_AUTOPCHEN_LSB 10 +#define SDR_CTRLGRP_PORTCFG_AUTOPCHEN_MASK 0x000ffc00 +#define SDR_CTRLGRP_PORTCFG_PORTPROTOCOL_LSB 0 +#define SDR_CTRLGRP_PORTCFG_PORTPROTOCOL_MASK 0x000003ff +/* Register template: sdr::ctrlgrp::fifocfg */ +#define SDR_CTRLGRP_FIFOCFG_INCSYNC_LSB 10 +#define SDR_CTRLGRP_FIFOCFG_INCSYNC_MASK 0x00000400 +#define SDR_CTRLGRP_FIFOCFG_SYNCMODE_LSB 0 +#define SDR_CTRLGRP_FIFOCFG_SYNCMODE_MASK 0x000003ff +/* Register template: sdr::ctrlgrp::mppriority */ +#define SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_LSB 0 +#define SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_MASK 0x3fffffff +/* Wide Register template: sdr::ctrlgrp::mpweight */ +/* Register template: sdr::ctrlgrp::mpweight::mpweight_0 */ +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_LSB 0 +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_MASK 0xffffffff +/* Register template: sdr::ctrlgrp::mpweight::mpweight_1 */ +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_LSB 18 +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_MASK 0xfffc0000 +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_LSB 0 +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_MASK 0x0003ffff +/* Register template: sdr::ctrlgrp::mpweight::mpweight_2 */ +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_LSB 0 +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_MASK 0xffffffff +/* Register template: sdr::ctrlgrp::mpweight::mpweight_3 */ +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_LSB 0 +#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_MASK 0x0003ffff +/* Wide Register template: sdr::ctrlgrp::mppacing */ +/* Register template: sdr::ctrlgrp::mppacing::mppacing_0 */ +#define SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_LSB 0 +#define SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_MASK 0xffffffff +/* Register template: sdr::ctrlgrp::mppacing::mppacing_1 */ +#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_LSB 28 +#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_MASK 0xf0000000 +#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_LSB 0 +#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_MASK 0x0fffffff +/* Register template: sdr::ctrlgrp::mppacing::mppacing_2 */ +#define SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_LSB 0 +#define SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_MASK 0xffffffff +/* Register template: sdr::ctrlgrp::mppacing::mppacing_3 */ +#define SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_LSB 0 +#define SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_MASK 0x00ffffff +/* Wide Register template: sdr::ctrlgrp::mpthresholdrst */ +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_0 */ +#define \ +SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB 0 +#define \ +SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK \ +0xffffffff +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_1 */ +#define \ +SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB 0 +#define \ +SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK \ +0xffffffff +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_2 */ +#define \ +SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB 0 +#define \ +SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK \ +0x0000ffff +/* Register template: sdr::ctrlgrp::remappriority */ +#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_LSB 0 +#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_MASK 0x000000ff +/* Wide Register template: sdr::ctrlgrp::phyctrl */ +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_0 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_LSB 12 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH 20 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_MASK 0xfffff000 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(x) \ + (((x) << 12) & 0xfffff000) +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_LSB 10 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_MASK 0x00000c00 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(x) \ + (((x) << 10) & 0x00000c00) +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_LSB 9 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_MASK 0x00000200 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(x) \ + (((x) << 9) & 0x00000200) +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_LSB 8 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_MASK 0x00000100 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(x) \ + (((x) << 8) & 0x00000100) +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_LSB 6 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_MASK 0x000000c0 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(x) \ + (((x) << 6) & 0x000000c0) +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_LSB 4 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_MASK 0x00000030 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(x) \ + (((x) << 4) & 0x00000030) +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_LSB 2 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_MASK 0x0000000c +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(x) \ + (((x) << 2) & 0x0000000c) +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_LSB 0 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_MASK 0x00000003 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(x) \ + (((x) << 0) & 0x00000003) +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_1 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_LSB 12 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH 20 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_MASK 0xfffff000 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(x) \ + (((x) << 12) & 0xfffff000) +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_LSB 0 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_MASK 0x00000fff +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(x) \ + (((x) << 0) & 0x00000fff) +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_2 */ +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_LSB 0 +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_MASK 0x00000fff +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(x) \ + (((x) << 0) & 0x00000fff) +/* Register template: sdr::ctrlgrp::dramodt */ +#define SDR_CTRLGRP_DRAMODT_READ_LSB 4 +#define SDR_CTRLGRP_DRAMODT_READ_MASK 0x000000f0 +#define SDR_CTRLGRP_DRAMODT_WRITE_LSB 0 +#define SDR_CTRLGRP_DRAMODT_WRITE_MASK 0x0000000f +/* Register template: sdr::ctrlgrp::fpgaportrst */ +#define SDR_CTRLGRP_FPGAPORTRST_READ_PORT_0_LSB 0 +#define SDR_CTRLGRP_FPGAPORTRST_WRITE_PORT_0_LSB 4 +#define SDR_CTRLGRP_FPGAPORTRST_COMMAND_PORT_0_LSB 8 +/* Field instance: sdr::ctrlgrp::dramsts */ +#define SDR_CTRLGRP_DRAMSTS_DBEERR_MASK 0x00000008 +#define SDR_CTRLGRP_DRAMSTS_SBEERR_MASK 0x00000004 + +#endif /* _SDRAM_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-sequencer.c b/arch/arm/mach-socfpga/include/mach/cyclone5-sequencer.c new file mode 100644 index 0000000000..e5ecb0f1b8 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-sequencer.c @@ -0,0 +1,5241 @@ +/* +* Copyright Altera Corporation (C) 2012-2014. All rights reserved +* +* SPDX-License-Identifier: BSD-3-Clause +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of Altera Corporation nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "system.h" +#include "sdram_io.h" +#include "cyclone5-sequencer.h" +#include "tclrpt.h" + +/****************************************************************************** + ****************************************************************************** + ** NOTE: Special Rules for Globale Variables ** + ** ** + ** All global variables that are explicitly initialized (including ** + ** explicitly initialized to zero), are only initialized once, during ** + ** configuration time, and not again on reset. This means that they ** + ** preserve their current contents across resets, which is needed for some ** + ** special cases involving communication with external modules. In ** + ** addition, this avoids paying the price to have the memory initialized, ** + ** even for zeroed data, provided it is explicitly set to zero in the code, ** + ** and doesn't rely on implicit initialization. ** + ****************************************************************************** + ******************************************************************************/ + +#ifndef ARMCOMPILER + +// Temporary workaround to place the initial stack pointer at a safe offset from end +#define STRINGIFY(s) STRINGIFY_STR(s) +#define STRINGIFY_STR(s) #s +asm(".global __alt_stack_pointer"); +asm("__alt_stack_pointer = " STRINGIFY(STACK_POINTER)); +#endif + +#include + +#define NEWVERSION_RDDESKEW 1 +#define NEWVERSION_WRDESKEW 1 +#define NEWVERSION_GW 1 +#define NEWVERSION_WL 1 +#define NEWVERSION_DQSEN 1 + +// Just to make the debugging code more uniform + +#define HALF_RATE_MODE 0 + +#define QUARTER_RATE_MODE 0 +#define DELTA_D 1 + +// case:56390 +// VFIFO_CONTROL_WIDTH_PER_DQS is the number of VFIFOs actually instantiated per DQS. This is always one except: +// AV QDRII where it is 2 for x18 and x18w2, and 4 for x36 and x36w2 +// RLDRAMII x36 and x36w2 where it is 2. +// In 12.0sp1 we set this to 4 for all of the special cases above to keep it simple. +// In 12.0sp2 or 12.1 this should get moved to generation and unified with the same constant used in the phy mgr + +#define VFIFO_CONTROL_WIDTH_PER_DQS 1 + +// In order to reduce ROM size, most of the selectable calibration steps are +// decided at compile time based on the user's calibration mode selection, +// as captured by the STATIC_CALIB_STEPS selection below. +// +// However, to support simulation-time selection of fast simulation mode, where +// we skip everything except the bare minimum, we need a few of the steps to +// be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the +// check, which is based on the rtl-supplied value, or we dynamically compute the +// value to use based on the dynamically-chosen calibration mode + +#define BTFLD_FMT "%lx" + +// For HPS running on actual hardware + +#define DLEVEL 0 +#ifdef HPS_HW_SERIAL_SUPPORT +// space around comma is required for varargs macro to remove comma if args is empty +#define DPRINT(level, fmt, args...) if (DLEVEL >= (level)) printf("SEQ.C: " fmt "\n" , ## args) +#define IPRINT(fmt, args...) printf("SEQ.C: " fmt "\n" , ## args) +#else +#define DPRINT(level, fmt, args...) +#define IPRINT(fmt, args...) +#endif +#define BFM_GBL_SET(field,value) +#define BFM_GBL_GET(field) ((long unsigned int)0) +#define BFM_STAGE(stage) +#define BFM_INC_VFIFO +#define COV(label) + +#define TRACE_FUNC(fmt, args...) DPRINT(1, "%s[%d]: " fmt, __func__, __LINE__ , ## args) + +#define DYNAMIC_CALIB_STEPS (dyn_calib_steps) + +#define STATIC_IN_RTL_SIM 0 + +#define STATIC_SKIP_DELAY_LOOPS 0 + +#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | STATIC_SKIP_DELAY_LOOPS) + +// calibration steps requested by the rtl +static uint16_t dyn_calib_steps = 0; + +// To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option +// instead of static, we use boolean logic to select between +// non-skip and skip values +// +// The mask is set to include all bits when not-skipping, but is +// zero when skipping + +static uint16_t skip_delay_mask = 0; // mask off bits when skipping/not-skipping + +#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ + ((non_skip_value) & skip_delay_mask) + +// TODO: The skip group strategy is completely missing + +static gbl_t *gbl = 0; +static param_t *param = 0; + +static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, uint32_t write_group, + uint32_t use_dm, uint32_t all_correct, + t_btfld * bit_chk, uint32_t all_ranks); + +// This (TEST_SIZE) is used to test handling of large roms, to make +// sure we are sizing things correctly +// Note, the initialized data takes up twice the space in rom, since +// there needs to be a copy with the initial value and a copy that is +// written too, since on soft-reset, it needs to have the initial values +// without reloading the memory from external sources + +// #define TEST_SIZE (6*1024) + +#ifdef TEST_SIZE + +#define PRE_POST_TEST_SIZE 3 + +static unsigned int pre_test_size_mem[PRE_POST_TEST_SIZE] = { 1, 2, 3 }; + +static unsigned int test_size_mem[TEST_SIZE / sizeof(unsigned int)] = { 100, 200, 300 }; + +static unsigned int post_test_size_mem[PRE_POST_TEST_SIZE] = { 10, 20, 30 }; + +static void write_test_mem(void) +{ + int i; + + for (i = 0; i < PRE_POST_TEST_SIZE; i++) { + pre_test_size_mem[i] = (i + 1) * 10; + post_test_size_mem[i] = (i + 1); + } + + for (i = 0; i < sizeof(test_size_mem) / sizeof(unsigned int); i++) { + test_size_mem[i] = i; + } + +} + +static int check_test_mem(int start) +{ + int i; + + for (i = 0; i < PRE_POST_TEST_SIZE; i++) { + if (start) { + if (pre_test_size_mem[i] != (i + 1)) { + return 0; + } + if (post_test_size_mem[i] != (i + 1) * 10) { + return 0; + } + } else { + if (pre_test_size_mem[i] != (i + 1) * 10) { + return 0; + } + if (post_test_size_mem[i] != (i + 1)) { + return 0; + } + } + } + + for (i = 0; i < sizeof(test_size_mem) / sizeof(unsigned int); i++) { + if (start) { + if (i < 3) { + if (test_size_mem[i] != (i + 1) * 100) { + return 0; + } + } else { + if (test_size_mem[i] != 0) { + return 0; + } + } + } else { + if (test_size_mem[i] != i) { + return 0; + } + } + } + + return 1; +} + +#endif // TEST_SIZE + +static void set_failing_group_stage(uint32_t group, uint32_t stage, uint32_t substage) +{ + if (gbl->error_stage == CAL_STAGE_NIL) { + gbl->error_substage = substage; + gbl->error_stage = stage; + gbl->error_group = group; + + } + +} + +static inline void reg_file_set_group(uint32_t set_group) +{ + // Read the current group and stage + uint32_t cur_stage_group = IORD_32DIRECT(REG_FILE_CUR_STAGE, 0); + + // Clear the group + cur_stage_group &= 0x0000FFFF; + + // Set the group + cur_stage_group |= (set_group << 16); + + // Write the data back + IOWR_32DIRECT(REG_FILE_CUR_STAGE, 0, cur_stage_group); +} + +static inline void reg_file_set_stage(uint32_t set_stage) +{ + // Read the current group and stage + uint32_t cur_stage_group = IORD_32DIRECT(REG_FILE_CUR_STAGE, 0); + + // Clear the stage and substage + cur_stage_group &= 0xFFFF0000; + + // Set the stage + cur_stage_group |= (set_stage & 0x000000FF); + + // Write the data back + IOWR_32DIRECT(REG_FILE_CUR_STAGE, 0, cur_stage_group); +} + +static inline void reg_file_set_sub_stage(uint32_t set_sub_stage) +{ + // Read the current group and stage + uint32_t cur_stage_group = IORD_32DIRECT(REG_FILE_CUR_STAGE, 0); + + // Clear the substage + cur_stage_group &= 0xFFFF00FF; + + // Set the sub stage + cur_stage_group |= ((set_sub_stage << 8) & 0x0000FF00); + + // Write the data back + IOWR_32DIRECT(REG_FILE_CUR_STAGE, 0, cur_stage_group); +} + +static inline uint32_t is_write_group_enabled_for_dm(uint32_t write_group) +{ + return 1; +} + +static inline void select_curr_shadow_reg_using_rank(uint32_t rank) +{ +} + +static void initialize(void) +{ + IOWR_32DIRECT(PHY_MGR_MUX_SEL, 0, 0x3); + + //USER memory clock is not stable we begin initialization + + IOWR_32DIRECT(PHY_MGR_RESET_MEM_STBL, 0, 0); + + //USER calibration status all set to zero + + IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, 0); + IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, 0); + + if (((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) { + param->read_correct_mask_vg = + ((t_btfld) 1 << + (RW_MGR_MEM_DQ_PER_READ_DQS / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; + param->write_correct_mask_vg = + ((t_btfld) 1 << + (RW_MGR_MEM_DQ_PER_READ_DQS / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; + param->read_correct_mask = ((t_btfld) 1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1; + param->write_correct_mask = ((t_btfld) 1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1; + param->dm_correct_mask = + ((t_btfld) 1 << (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH)) - 1; + } +} + +static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode) +{ + uint32_t odt_mask_0 = 0; + uint32_t odt_mask_1 = 0; + uint32_t cs_and_odt_mask; + + if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) { + + if (LRDIMM) { + // USER LRDIMMs have two cases to consider: single-slot and dual-slot. + // USER In single-slot, assert ODT for write only. + // USER In dual-slot, assert ODT for both slots for write, + // USER and on the opposite slot only for reads. + // USER + // USER Further complicating this is that both DIMMs have either 1 or 2 ODT + // USER inputs, which do the same thing (only one is actually required). + if ((RW_MGR_MEM_CHIP_SELECT_WIDTH / RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM) == 1) { + // USER Single-slot case + if (RW_MGR_MEM_ODT_WIDTH == 1) { + // USER Read = 0, Write = 1 + odt_mask_0 = 0x0; + odt_mask_1 = 0x1; + } else if (RW_MGR_MEM_ODT_WIDTH == 2) { + // USER Read = 00, Write = 11 + odt_mask_0 = 0x0; + odt_mask_1 = 0x3; + } + } else if ((RW_MGR_MEM_CHIP_SELECT_WIDTH / RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM) + == 2) { + // USER Dual-slot case + if (RW_MGR_MEM_ODT_WIDTH == 2) { + // USER Read: asserted for opposite slot, Write: asserted for both + odt_mask_0 = (rank < 2) ? 0x2 : 0x1; + odt_mask_1 = 0x3; + } else if (RW_MGR_MEM_ODT_WIDTH == 4) { + // USER Read: asserted for opposite slot, Write: asserted for both + odt_mask_0 = (rank < 2) ? 0xC : 0x3; + odt_mask_1 = 0xF; + } + } + } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) { + //USER 1 Rank + //USER Read: ODT = 0 + //USER Write: ODT = 1 + odt_mask_0 = 0x0; + odt_mask_1 = 0x1; + } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) { + //USER 2 Ranks + if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1 || + (RDIMM && RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 2 + && RW_MGR_MEM_CHIP_SELECT_WIDTH == 4)) { + //USER - Dual-Slot , Single-Rank (1 chip-select per DIMM) + //USER OR + //USER - RDIMM, 4 total CS (2 CS per DIMM) means 2 DIMM + //USER Since MEM_NUMBER_OF_RANKS is 2 they are both single rank + //USER with 2 CS each (special for RDIMM) + //USER Read: Turn on ODT on the opposite rank + //USER Write: Turn on ODT on all ranks + odt_mask_0 = 0x3 & ~(1 << rank); + odt_mask_1 = 0x3; + } else { + //USER - Single-Slot , Dual-rank DIMMs (2 chip-selects per DIMM) + //USER Read: Turn on ODT off on all ranks + //USER Write: Turn on ODT on active rank + odt_mask_0 = 0x0; + odt_mask_1 = 0x3 & (1 << rank); + } + } else { + //USER 4 Ranks + //USER Read: + //USER ----------+-----------------------+ + //USER | | + //USER | ODT | + //USER Read From +-----------------------+ + //USER Rank | 3 | 2 | 1 | 0 | + //USER ----------+-----+-----+-----+-----+ + //USER 0 | 0 | 1 | 0 | 0 | + //USER 1 | 1 | 0 | 0 | 0 | + //USER 2 | 0 | 0 | 0 | 1 | + //USER 3 | 0 | 0 | 1 | 0 | + //USER ----------+-----+-----+-----+-----+ + //USER + //USER Write: + //USER ----------+-----------------------+ + //USER | | + //USER | ODT | + //USER Write To +-----------------------+ + //USER Rank | 3 | 2 | 1 | 0 | + //USER ----------+-----+-----+-----+-----+ + //USER 0 | 0 | 1 | 0 | 1 | + //USER 1 | 1 | 0 | 1 | 0 | + //USER 2 | 0 | 1 | 0 | 1 | + //USER 3 | 1 | 0 | 1 | 0 | + //USER ----------+-----+-----+-----+-----+ + switch (rank) { + case 0: + odt_mask_0 = 0x4; + odt_mask_1 = 0x5; + break; + case 1: + odt_mask_0 = 0x8; + odt_mask_1 = 0xA; + break; + case 2: + odt_mask_0 = 0x1; + odt_mask_1 = 0x5; + break; + case 3: + odt_mask_0 = 0x2; + odt_mask_1 = 0xA; + break; + } + } + } else { + odt_mask_0 = 0x0; + odt_mask_1 = 0x0; + } + + if (RDIMM && RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 2 + && RW_MGR_MEM_CHIP_SELECT_WIDTH == 4 && RW_MGR_MEM_NUMBER_OF_RANKS == 2) { + //USER See RDIMM special case above + cs_and_odt_mask = + (0xFF & ~(1 << (2 * rank))) | + ((0xFF & odt_mask_0) << 8) | ((0xFF & odt_mask_1) << 16); + } else if (LRDIMM) { + } else { + cs_and_odt_mask = + (0xFF & ~(1 << rank)) | + ((0xFF & odt_mask_0) << 8) | ((0xFF & odt_mask_1) << 16); + } + + IOWR_32DIRECT(RW_MGR_SET_CS_AND_ODT_MASK, 0, cs_and_odt_mask); +} + +//USER Given a rank, select the set of shadow registers that is responsible for the +//USER delays of such rank, so that subsequent SCC updates will go to those shadow +//USER registers. +static void select_shadow_regs_for_update(uint32_t rank, uint32_t group, + uint32_t update_scan_chains) +{ +} + +static void scc_mgr_initialize(void) +{ + // Clear register file for HPS + // 16 (2^4) is the size of the full register file in the scc mgr: + // RFILE_DEPTH = log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS + MEM_IF_READ_DQS_WIDTH - 1) + 1; + uint32_t i; + for (i = 0; i < 16; i++) { + DPRINT(1, "Clearing SCC RFILE index %lu", i); + IOWR_32DIRECT(SCC_MGR_HHP_RFILE, i << 2, 0); + } +} + +static inline void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay) +{ + WRITE_SCC_DQS_IN_DELAY(read_group, delay); + +} + +static inline void scc_mgr_set_dqs_io_in_delay(uint32_t write_group, uint32_t delay) +{ + WRITE_SCC_DQS_IO_IN_DELAY(delay); + +} + +static inline void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase) +{ + WRITE_SCC_DQS_EN_PHASE(read_group, phase); + +} + +static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group, uint32_t phase) +{ + uint32_t r; + uint32_t update_scan_chains; + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + //USER although the h/w doesn't support different phases per shadow register, + //USER for simplicity our scc manager modeling keeps different phase settings per + //USER shadow reg, and it's important for us to keep them in sync to match h/w. + //USER for efficiency, the scan chain update should occur only once to sr0. + update_scan_chains = (r == 0) ? 1 : 0; + + select_shadow_regs_for_update(r, read_group, update_scan_chains); + scc_mgr_set_dqs_en_phase(read_group, phase); + + if (update_scan_chains) { + IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, read_group); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } + } +} + +static inline void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase) +{ + WRITE_SCC_DQDQS_OUT_PHASE(write_group, phase); + +} + +static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group, uint32_t phase) +{ + uint32_t r; + uint32_t update_scan_chains; + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + //USER although the h/w doesn't support different phases per shadow register, + //USER for simplicity our scc manager modeling keeps different phase settings per + //USER shadow reg, and it's important for us to keep them in sync to match h/w. + //USER for efficiency, the scan chain update should occur only once to sr0. + update_scan_chains = (r == 0) ? 1 : 0; + + select_shadow_regs_for_update(r, write_group, update_scan_chains); + scc_mgr_set_dqdqs_output_phase(write_group, phase); + + if (update_scan_chains) { + IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, write_group); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } + } +} + +static inline void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay) +{ + WRITE_SCC_DQS_EN_DELAY(read_group, delay); + +} + +static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group, uint32_t delay) +{ + uint32_t r; + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + + select_shadow_regs_for_update(r, read_group, 0); + + scc_mgr_set_dqs_en_delay(read_group, delay); + + IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, read_group); + + // In shadow register mode, the T11 settings are stored in registers + // in the core, which are updated by the DQS_ENA signals. Not issuing + // the SCC_MGR_UPD command allows us to save lots of rank switching + // overhead, by calling select_shadow_regs_for_update with update_scan_chains + // set to 0. + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } +} + +static void scc_mgr_set_oct_out1_delay(uint32_t write_group, uint32_t delay) +{ + uint32_t read_group; + + // Load the setting in the SCC manager + // Although OCT affects only write data, the OCT delay is controlled by the DQS logic block + // which is instantiated once per read group. For protocols where a write group consists + // of multiple read groups, the setting must be set multiple times. + for (read_group = + write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; + read_group < + (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; + ++read_group) { + + WRITE_SCC_OCT_OUT1_DELAY(read_group, delay); + } + +} + +static void scc_mgr_set_oct_out2_delay(uint32_t write_group, uint32_t delay) +{ + uint32_t read_group; + + // Load the setting in the SCC manager + // Although OCT affects only write data, the OCT delay is controlled by the DQS logic block + // which is instantiated once per read group. For protocols where a write group consists + // of multiple read groups, the setting must be set multiple times. + for (read_group = + write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; + read_group < + (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; + ++read_group) { + + WRITE_SCC_OCT_OUT2_DELAY(read_group, delay); + } + +} + +static inline void scc_mgr_set_dqs_bypass(uint32_t write_group, uint32_t bypass) +{ + // Load the setting in the SCC manager + WRITE_SCC_DQS_BYPASS(write_group, bypass); +} + +static inline void scc_mgr_set_dq_out1_delay(uint32_t write_group, uint32_t dq_in_group, + uint32_t delay) +{ + + // Load the setting in the SCC manager + WRITE_SCC_DQ_OUT1_DELAY(dq_in_group, delay); + +} + +static inline void scc_mgr_set_dq_out2_delay(uint32_t write_group, uint32_t dq_in_group, + uint32_t delay) +{ + + // Load the setting in the SCC manager + WRITE_SCC_DQ_OUT2_DELAY(dq_in_group, delay); + +} + +static inline void scc_mgr_set_dq_in_delay(uint32_t write_group, uint32_t dq_in_group, + uint32_t delay) +{ + + // Load the setting in the SCC manager + WRITE_SCC_DQ_IN_DELAY(dq_in_group, delay); + +} + +static inline void scc_mgr_set_dq_bypass(uint32_t write_group, uint32_t dq_in_group, + uint32_t bypass) +{ + // Load the setting in the SCC manager + WRITE_SCC_DQ_BYPASS(dq_in_group, bypass); +} + +static inline void scc_mgr_set_rfifo_mode(uint32_t write_group, uint32_t dq_in_group, uint32_t mode) +{ + // Load the setting in the SCC manager + WRITE_SCC_RFIFO_MODE(dq_in_group, mode); +} + +static inline void scc_mgr_set_hhp_extras(void) +{ + // Load the fixed setting in the SCC manager + // bits: 0:0 = 1'b1 - dqs bypass + // bits: 1:1 = 1'b1 - dq bypass + // bits: 4:2 = 3'b001 - rfifo_mode + // bits: 6:5 = 2'b01 - rfifo clock_select + // bits: 7:7 = 1'b0 - separate gating from ungating setting + // bits: 8:8 = 1'b0 - separate OE from Output delay setting + uint32_t value = (0 << 8) | (0 << 7) | (1 << 5) | (1 << 2) | (1 << 1) | (1 << 0); + WRITE_SCC_HHP_EXTRAS(value); +} + +static inline void scc_mgr_set_hhp_dqse_map(void) +{ + // Load the fixed setting in the SCC manager + WRITE_SCC_HHP_DQSE_MAP(0); +} + +static inline void scc_mgr_set_dqs_out1_delay(uint32_t write_group, uint32_t delay) +{ + WRITE_SCC_DQS_IO_OUT1_DELAY(delay); + +} + +static inline void scc_mgr_set_dqs_out2_delay(uint32_t write_group, uint32_t delay) +{ + WRITE_SCC_DQS_IO_OUT2_DELAY(delay); + +} + +static inline void scc_mgr_set_dm_out1_delay(uint32_t write_group, uint32_t dm, uint32_t delay) +{ + WRITE_SCC_DM_IO_OUT1_DELAY(dm, delay); +} + +static inline void scc_mgr_set_dm_out2_delay(uint32_t write_group, uint32_t dm, uint32_t delay) +{ + WRITE_SCC_DM_IO_OUT2_DELAY(dm, delay); +} + +static inline void scc_mgr_set_dm_in_delay(uint32_t write_group, uint32_t dm, uint32_t delay) +{ + WRITE_SCC_DM_IO_IN_DELAY(dm, delay); +} + +static inline void scc_mgr_set_dm_bypass(uint32_t write_group, uint32_t dm, uint32_t bypass) +{ + // Load the setting in the SCC manager + WRITE_SCC_DM_BYPASS(dm, bypass); +} + +//USER Zero all DQS config +// TODO: maybe rename to scc_mgr_zero_dqs_config (or something) +static void scc_mgr_zero_all(void) +{ + uint32_t i, r; + + //USER Zero all DQS config settings, across all groups and all shadow registers + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + + // Strictly speaking this should be called once per group to make + // sure each group's delay chain is refreshed from the SCC register file, + // but since we're resetting all delay chains anyway, we can save some + // runtime by calling select_shadow_regs_for_update just once to switch + // rank. + select_shadow_regs_for_update(r, 0, 1); + + for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { + // The phases actually don't exist on a per-rank basis, but there's + // no harm updating them several times, so let's keep the code simple. + scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE); + scc_mgr_set_dqs_en_phase(i, 0); + scc_mgr_set_dqs_en_delay(i, 0); + } + + for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { + scc_mgr_set_dqdqs_output_phase(i, 0); + // av/cv don't have out2 + scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE); + } + + //USER multicast to all DQS group enables + IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, 0xff); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } +} + +static void scc_set_bypass_mode(uint32_t write_group, uint32_t mode) +{ + // mode = 0 : Do NOT bypass - Half Rate Mode + // mode = 1 : Bypass - Full Rate Mode + + // only need to set once for all groups, pins, dq, dqs, dm + if (write_group == 0) { + DPRINT(1, "Setting HHP Extras"); + scc_mgr_set_hhp_extras(); + DPRINT(1, "Done Setting HHP Extras"); + } + + //USER multicast to all DQ enables + IOWR_32DIRECT(SCC_MGR_DQ_ENA, 0, 0xff); + + IOWR_32DIRECT(SCC_MGR_DM_ENA, 0, 0xff); + + //USER update current DQS IO enable + IOWR_32DIRECT(SCC_MGR_DQS_IO_ENA, 0, 0); + + //USER update the DQS logic + IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, write_group); + + //USER hit update + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); +} + +// Moving up to avoid warnings +static void scc_mgr_load_dqs_for_write_group(uint32_t write_group) +{ + uint32_t read_group; + + // Although OCT affects only write data, the OCT delay is controlled by the DQS logic block + // which is instantiated once per read group. For protocols where a write group consists + // of multiple read groups, the setting must be scanned multiple times. + for (read_group = + write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; + read_group < + (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; + ++read_group) { + + IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, read_group); + } +} + +static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin, int32_t out_only) +{ + uint32_t i, r; + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + + select_shadow_regs_for_update(r, write_group, 1); + + //USER Zero all DQ config settings + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + scc_mgr_set_dq_out1_delay(write_group, i, 0); + scc_mgr_set_dq_out2_delay(write_group, i, IO_DQ_OUT_RESERVE); + if (!out_only) { + scc_mgr_set_dq_in_delay(write_group, i, 0); + } + } + + //USER multicast to all DQ enables + IOWR_32DIRECT(SCC_MGR_DQ_ENA, 0, 0xff); + + //USER Zero all DM config settings + for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { + if (!out_only) { + // Do we really need this? + scc_mgr_set_dm_in_delay(write_group, i, 0); + } + scc_mgr_set_dm_out1_delay(write_group, i, 0); + scc_mgr_set_dm_out2_delay(write_group, i, IO_DM_OUT_RESERVE); + } + + //USER multicast to all DM enables + IOWR_32DIRECT(SCC_MGR_DM_ENA, 0, 0xff); + + //USER zero all DQS io settings + if (!out_only) { + scc_mgr_set_dqs_io_in_delay(write_group, 0); + } + // av/cv don't have out2 + scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE); + scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE); + scc_mgr_load_dqs_for_write_group(write_group); + + //USER multicast to all DQS IO enables (only 1) + IOWR_32DIRECT(SCC_MGR_DQS_IO_ENA, 0, 0); + + //USER hit update to zero everything + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } +} + +//USER load up dqs config settings + +static void scc_mgr_load_dqs(uint32_t dqs) +{ + IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, dqs); +} + +//USER load up dqs io config settings + +static void scc_mgr_load_dqs_io(void) +{ + IOWR_32DIRECT(SCC_MGR_DQS_IO_ENA, 0, 0); +} + +//USER load up dq config settings + +static void scc_mgr_load_dq(uint32_t dq_in_group) +{ + IOWR_32DIRECT(SCC_MGR_DQ_ENA, 0, dq_in_group); +} + +//USER load up dm config settings + +static void scc_mgr_load_dm(uint32_t dm) +{ + IOWR_32DIRECT(SCC_MGR_DM_ENA, 0, dm); +} + +//USER apply and load a particular input delay for the DQ pins in a group +//USER group_bgn is the index of the first dq pin (in the write group) + +static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group, uint32_t group_bgn, + uint32_t delay) +{ + uint32_t i, p; + + for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { + scc_mgr_set_dq_in_delay(write_group, p, delay); + scc_mgr_load_dq(p); + } +} + +//USER apply and load a particular output delay for the DQ pins in a group + +static void scc_mgr_apply_group_dq_out1_delay(uint32_t write_group, uint32_t group_bgn, + uint32_t delay1) +{ + uint32_t i, p; + + for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { + scc_mgr_set_dq_out1_delay(write_group, i, delay1); + scc_mgr_load_dq(i); + } +} + +//USER apply and load a particular output delay for the DM pins in a group + +static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group, uint32_t delay1) +{ + uint32_t i; + + for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { + scc_mgr_set_dm_out1_delay(write_group, i, delay1); + scc_mgr_load_dm(i); + } +} + +//USER apply and load delay on both DQS and OCT out1 +static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group, uint32_t delay) +{ + scc_mgr_set_dqs_out1_delay(write_group, delay); + scc_mgr_load_dqs_io(); + + scc_mgr_set_oct_out1_delay(write_group, delay); + scc_mgr_load_dqs_for_write_group(write_group); +} + +//USER set delay on both DQS and OCT out1 by incrementally changing +//USER the settings one dtap at a time towards the target value, to avoid +//USER breaking the lock of the DLL/PLL on the memory device. +static void scc_mgr_set_group_dqs_io_and_oct_out1_gradual(uint32_t write_group, uint32_t delay) +{ + uint32_t d = READ_SCC_DQS_IO_OUT1_DELAY(); + + while (d > delay) { + --d; + scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + if (QDRII) { + rw_mgr_mem_dll_lock_wait(); + } + } + while (d < delay) { + ++d; + scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + if (QDRII) { + rw_mgr_mem_dll_lock_wait(); + } + } +} + +//USER apply a delay to the entire output side: DQ, DM, DQS, OCT + +static void scc_mgr_apply_group_all_out_delay(uint32_t write_group, uint32_t group_bgn, + uint32_t delay) +{ + //USER dq shift + + scc_mgr_apply_group_dq_out1_delay(write_group, group_bgn, delay); + + //USER dm shift + + scc_mgr_apply_group_dm_out1_delay(write_group, delay); + + //USER dqs and oct shift + + scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, delay); +} + +//USER apply a delay to the entire output side (DQ, DM, DQS, OCT) and to all ranks +static void scc_mgr_apply_group_all_out_delay_all_ranks(uint32_t write_group, uint32_t group_bgn, + uint32_t delay) +{ + uint32_t r; + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + + select_shadow_regs_for_update(r, write_group, 1); + + scc_mgr_apply_group_all_out_delay(write_group, group_bgn, delay); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } +} + +//USER apply a delay to the entire output side: DQ, DM, DQS, OCT + +static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group, uint32_t group_bgn, + uint32_t delay) +{ + uint32_t i, p, new_delay; + + //USER dq shift + + for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { + + new_delay = READ_SCC_DQ_OUT2_DELAY(i); + new_delay += delay; + + if (new_delay > IO_IO_OUT2_DELAY_MAX) { + DPRINT(1, "%s(%lu, %lu, %lu) DQ[%lu,%lu]: %lu > %lu => %lu", + __func__, write_group, group_bgn, delay, i, p, + new_delay, (long unsigned int)IO_IO_OUT2_DELAY_MAX, + (long unsigned int)IO_IO_OUT2_DELAY_MAX); + new_delay = IO_IO_OUT2_DELAY_MAX; + } + + scc_mgr_set_dq_out2_delay(write_group, i, new_delay); + scc_mgr_load_dq(i); + } + + //USER dm shift + + for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { + new_delay = READ_SCC_DM_IO_OUT2_DELAY(i); + new_delay += delay; + + if (new_delay > IO_IO_OUT2_DELAY_MAX) { + DPRINT(1, "%s(%lu, %lu, %lu) DM[%lu]: %lu > %lu => %lu", + __func__, write_group, group_bgn, delay, i, + new_delay, (long unsigned int)IO_IO_OUT2_DELAY_MAX, + (long unsigned int)IO_IO_OUT2_DELAY_MAX); + new_delay = IO_IO_OUT2_DELAY_MAX; + } + + scc_mgr_set_dm_out2_delay(write_group, i, new_delay); + scc_mgr_load_dm(i); + } + + //USER dqs shift + + new_delay = READ_SCC_DQS_IO_OUT2_DELAY(); + new_delay += delay; + + if (new_delay > IO_IO_OUT2_DELAY_MAX) { + DPRINT(1, "%s(%lu, %lu, %lu) DQS: %lu > %d => %d; adding %lu to OUT1", + __func__, write_group, group_bgn, delay, + new_delay, IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX, + new_delay - IO_IO_OUT2_DELAY_MAX); + scc_mgr_set_dqs_out1_delay(write_group, new_delay - IO_IO_OUT2_DELAY_MAX); + new_delay = IO_IO_OUT2_DELAY_MAX; + } + + scc_mgr_set_dqs_out2_delay(write_group, new_delay); + scc_mgr_load_dqs_io(); + + //USER oct shift + + new_delay = READ_SCC_OCT_OUT2_DELAY(write_group); + new_delay += delay; + + if (new_delay > IO_IO_OUT2_DELAY_MAX) { + DPRINT(1, "%s(%lu, %lu, %lu) DQS: %lu > %d => %d; adding %lu to OUT1", + __func__, write_group, group_bgn, delay, + new_delay, IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX, + new_delay - IO_IO_OUT2_DELAY_MAX); + scc_mgr_set_oct_out1_delay(write_group, new_delay - IO_IO_OUT2_DELAY_MAX); + new_delay = IO_IO_OUT2_DELAY_MAX; + } + + scc_mgr_set_oct_out2_delay(write_group, new_delay); + scc_mgr_load_dqs_for_write_group(write_group); +} + +//USER apply a delay to the entire output side (DQ, DM, DQS, OCT) and to all ranks +static void scc_mgr_apply_group_all_out_delay_add_all_ranks(uint32_t write_group, + uint32_t group_bgn, uint32_t delay) +{ + uint32_t r; + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + + select_shadow_regs_for_update(r, write_group, 1); + + scc_mgr_apply_group_all_out_delay_add(write_group, group_bgn, delay); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } +} + +static inline void scc_mgr_spread_out2_delay_all_ranks(uint32_t write_group, uint32_t test_bgn) +{ +} + +// optimization used to recover some slots in ddr3 inst_rom +// could be applied to other protocols if we wanted to +static void set_jump_as_return(void) +{ + // to save space, we replace return with jump to special shared RETURN instruction + // so we set the counter to large value so that we always jump + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0xFF); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_RETURN); + +} + +// should always use constants as argument to ensure all computations are performed at compile time +static inline void delay_for_n_mem_clocks(const uint32_t clocks) +{ + uint32_t afi_clocks; + uint8_t inner; + uint8_t outer; + uint16_t c_loop; + + afi_clocks = (clocks + AFI_RATE_RATIO - 1) / AFI_RATE_RATIO; /* scale (rounding up) to get afi clocks */ + + // Note, we don't bother accounting for being off a little bit because of a few extra instructions in outer loops + // Note, the loops have a test at the end, and do the test before the decrement, and so always perform the loop + // 1 time more than the counter value + if (afi_clocks == 0) { + inner = outer = c_loop = 0; + } else if (afi_clocks <= 0x100) { + inner = afi_clocks - 1; + outer = 0; + c_loop = 0; + } else if (afi_clocks <= 0x10000) { + inner = 0xff; + outer = (afi_clocks - 1) >> 8; + c_loop = 0; + } else { + inner = 0xff; + outer = 0xff; + c_loop = (afi_clocks - 1) >> 16; + } + + // rom instructions are structured as follows: + // + // IDLE_LOOP2: jnz cntr0, TARGET_A + // IDLE_LOOP1: jnz cntr1, TARGET_B + // return + // + // so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and TARGET_B is + // set to IDLE_LOOP2 as well + // + // if we have no outer loop, though, then we can use IDLE_LOOP1 only, and set + // TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely + // + // a little confusing, but it helps save precious space in the inst_rom and sequencer rom + // and keeps the delays more accurate and reduces overhead + if (afi_clocks <= 0x100) { + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner)); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_IDLE_LOOP1); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_IDLE_LOOP1); + + } else { + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner)); + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer)); + + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_IDLE_LOOP2); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_IDLE_LOOP2); + + // hack to get around compiler not being smart enough + if (afi_clocks <= 0x10000) { + // only need to run once + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_IDLE_LOOP2); + } else { + do { + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_IDLE_LOOP2); + } while (c_loop-- != 0); + } + } +} + +// should always use constants as argument to ensure all computations are performed at compile time +static inline void delay_for_n_ns(const uint32_t nanoseconds) +{ + delay_for_n_mem_clocks((1000 * nanoseconds) / (1000000 / AFI_CLK_FREQ) * AFI_RATE_RATIO); +} + +// Special routine to recover memory device from illegal state after +// ck/dqs relationship is violated. +static inline void recover_mem_device_after_ck_dqs_violation(void) +{ + // Current protocol doesn't require any special recovery +} + +static void rw_mgr_rdimm_initialize(void) +{ +} + +static void rw_mgr_mem_initialize(void) +{ + uint32_t r; + + //USER The reset / cke part of initialization is broadcasted to all ranks + IOWR_32DIRECT(RW_MGR_SET_CS_AND_ODT_MASK, 0, RW_MGR_RANK_ALL); + + // Here's how you load register for a loop + //USER Counters are located @ 0x800 + //USER Jump address are located @ 0xC00 + //USER For both, registers 0 to 3 are selected using bits 3 and 2, like in + //USER 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C + // I know this ain't pretty, but Avalon bus throws away the 2 least significant bits + + //USER start with memory RESET activated + + //USER tINIT is typically 200us (but can be adjusted in the GUI) + //USER The total number of cycles required for this nested counter structure to + //USER complete is defined by: + //USER num_cycles = (CTR2 + 1) * [(CTR1 + 1) * (2 * (CTR0 + 1) + 1) + 1] + 1 + + //USER Load counters + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL)); + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL)); + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL)); + + //USER Load jump address + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_INIT_RESET_0_CKE_0); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_INIT_RESET_0_CKE_0); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_INIT_RESET_0_CKE_0); + + //USER Execute count instruction + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_INIT_RESET_0_CKE_0); + + //USER indicate that memory is stable + IOWR_32DIRECT(PHY_MGR_RESET_MEM_STBL, 0, 1); + + //USER transition the RESET to high + //USER Wait for 500us + //USER num_cycles = (CTR2 + 1) * [(CTR1 + 1) * (2 * (CTR0 + 1) + 1) + 1] + 1 + //USER Load counters + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL)); + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL)); + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL)); + + //USER Load jump address + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_INIT_RESET_1_CKE_0); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_INIT_RESET_1_CKE_0); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_INIT_RESET_1_CKE_0); + + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_INIT_RESET_1_CKE_0); + + //USER bring up clock enable + + //USER tXRP < 250 ck cycles + delay_for_n_mem_clocks(250); + + // USER initialize RDIMM buffer so MRS and RZQ Calibrate commands will be + // USER propagated to discrete memory devices + rw_mgr_rdimm_initialize(); + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { + if (param->skip_ranks[r]) { + //USER request to skip the rank + + continue; + } + + //USER set rank + set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); + + //USER Use Mirror-ed commands for odd ranks if address mirrorring is on + if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS2_MIRR); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS3_MIRR); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS1_MIRR); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS0_DLL_RESET_MIRR); + } else { + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS2); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS3); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS1); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS0_DLL_RESET); + } + + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_ZQCL); + + //USER tZQinit = tDLLK = 512 ck cycles + delay_for_n_mem_clocks(512); + } +} + +static void rw_mgr_mem_dll_lock_wait(void) +{ +} + +//USER At the end of calibration we have to program the user settings in, and +//USER hand off the memory to the user. + +static void rw_mgr_mem_handoff(void) +{ + uint32_t r; + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { + if (param->skip_ranks[r]) { + //USER request to skip the rank + + continue; + } + //USER set rank + set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); + + //USER precharge all banks ... + + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_PRECHARGE_ALL); + + //USER load up MR settings specified by user + + //USER Use Mirror-ed commands for odd ranks if address mirrorring is on + if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS2_MIRR); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS3_MIRR); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS1_MIRR); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS0_USER_MIRR); + } else { + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS2); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS3); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS1); + delay_for_n_mem_clocks(4); + set_jump_as_return(); + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS0_USER); + } + //USER need to wait tMOD (12CK or 15ns) time before issuing other commands, + //USER but we will have plenty of NIOS cycles before actual handoff so its okay. + } + +} + +//USER performs a guaranteed read on the patterns we are going to use during a read test to ensure memory works +static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn, uint32_t group, + uint32_t num_tries, t_btfld * bit_chk, + uint32_t all_ranks) +{ + uint32_t r, vg; + t_btfld correct_mask_vg; + t_btfld tmp_bit_chk; + uint32_t rank_end = + all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); + + *bit_chk = param->read_correct_mask; + correct_mask_vg = param->read_correct_mask_vg; + + for (r = rank_bgn; r < rank_end; r++) { + if (param->skip_ranks[r]) { + //USER request to skip the rank + + continue; + } + //USER set rank + set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); + + //USER Load up a constant bursts of read commands + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x20); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_GUARANTEED_READ); + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x20); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_GUARANTEED_READ_CONT); + + tmp_bit_chk = 0; + for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;; vg--) { + //USER reset the fifos to get pointers to known state + + IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); + IOWR_32DIRECT(RW_MGR_RESET_READ_DATAPATH, 0, 0); + + tmp_bit_chk = + tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS / + RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); + + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, + ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + vg) << 2), + __RW_MGR_GUARANTEED_READ); + tmp_bit_chk = + tmp_bit_chk | (correct_mask_vg & ~(IORD_32DIRECT(BASE_RW_MGR, 0))); + + if (vg == 0) { + break; + } + } + *bit_chk &= tmp_bit_chk; + } + + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, (group << 2), __RW_MGR_CLEAR_DQS_ENABLE); + + set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + DPRINT(2, "test_load_patterns(%lu,ALL) => (%lu == %lu) => %lu", group, *bit_chk, + param->read_correct_mask, (long unsigned int)(*bit_chk == param->read_correct_mask)); + return (*bit_chk == param->read_correct_mask); +} + +static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks(uint32_t group, + uint32_t num_tries, + t_btfld * bit_chk) +{ + if (rw_mgr_mem_calibrate_read_test_patterns(0, group, num_tries, bit_chk, 1)) { + return 1; + } else { + // case:139851 - if guaranteed read fails, we can retry using different dqs enable phases. + // It is possible that with the initial phase, dqs enable is asserted/deasserted too close + // to an dqs edge, truncating the read burst. + uint32_t p; + for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++) { + scc_mgr_set_dqs_en_phase_all_ranks(group, p); + if (rw_mgr_mem_calibrate_read_test_patterns + (0, group, num_tries, bit_chk, 1)) { + return 1; + } + } + return 0; + } +} + +//USER load up the patterns we are going to use during a read test +static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn, uint32_t all_ranks) +{ + uint32_t r; + uint32_t rank_end = + all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); + + for (r = rank_bgn; r < rank_end; r++) { + if (param->skip_ranks[r]) { + //USER request to skip the rank + + continue; + } + //USER set rank + set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); + + //USER Load up a constant bursts + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x20); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_GUARANTEED_WRITE_WAIT0); + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x20); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_GUARANTEED_WRITE_WAIT1); + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0x04); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_GUARANTEED_WRITE_WAIT2); + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_3, 0, 0x04); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_GUARANTEED_WRITE_WAIT3); + + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_GUARANTEED_WRITE); + } + + set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); +} + +static inline void rw_mgr_mem_calibrate_read_load_patterns_all_ranks(void) +{ + rw_mgr_mem_calibrate_read_load_patterns(0, 1); +} + +// pe checkout pattern for harden managers +//void pe_checkout_pattern (void) +//{ +// // test RW manager +// +// // do some reads to check load buffer +// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_1, 0, 0x0); +// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_READ_B2B_WAIT1); +// +// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_2, 0, 0x0); +// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_READ_B2B_WAIT2); +// +// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_0, 0, 0x0); +// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_READ_B2B); +// +// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_3, 0, 0x0); +// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_READ_B2B); +// +// // clear error word +// IOWR_32DIRECT (RW_MGR_RESET_READ_DATAPATH, 0, 0); +// +// IOWR_32DIRECT (RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_READ_B2B); +// +// uint32_t readdata; +// +// // read error word +// readdata = IORD_32DIRECT(BASE_RW_MGR, 0); +// +// // read DI buffer +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 0*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 1*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 2*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 3*4, 0); +// +// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_1, 0, 0x0); +// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_READ_B2B_WAIT1); +// +// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_2, 0, 0x0); +// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_READ_B2B_WAIT2); +// +// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_0, 0, 0x0); +// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_READ_B2B); +// +// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_3, 0, 0x0); +// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_READ_B2B); +// +// // clear error word +// IOWR_32DIRECT (RW_MGR_RESET_READ_DATAPATH, 0, 0); +// +// // do read +// IOWR_32DIRECT (RW_MGR_LOOPBACK_MODE, 0, __RW_MGR_READ_B2B); +// +// // read error word +// readdata = IORD_32DIRECT(BASE_RW_MGR, 0); +// +// // error word should be 0x00 +// +// // read DI buffer +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 0*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 1*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 2*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 3*4, 0); +// +// // clear error word +// IOWR_32DIRECT (RW_MGR_RESET_READ_DATAPATH, 0, 0); +// +// // do dm read +// IOWR_32DIRECT (RW_MGR_LOOPBACK_MODE, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1); +// +// // read error word +// readdata = IORD_32DIRECT(BASE_RW_MGR, 0); +// +// // error word should be ff +// +// // read DI buffer +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 0*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 1*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 2*4, 0); +// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 3*4, 0); +// +// // exit loopback mode +// IOWR_32DIRECT (BASE_RW_MGR, 0, __RW_MGR_IDLE_LOOP2); +// +// // start of phy manager access +// +// readdata = IORD_32DIRECT (PHY_MGR_MAX_RLAT_WIDTH, 0); +// readdata = IORD_32DIRECT (PHY_MGR_MAX_AFI_WLAT_WIDTH, 0); +// readdata = IORD_32DIRECT (PHY_MGR_MAX_AFI_RLAT_WIDTH, 0); +// readdata = IORD_32DIRECT (PHY_MGR_CALIB_SKIP_STEPS, 0); +// readdata = IORD_32DIRECT (PHY_MGR_CALIB_VFIFO_OFFSET, 0); +// readdata = IORD_32DIRECT (PHY_MGR_CALIB_LFIFO_OFFSET, 0); +// +// // start of data manager test +// +// readdata = IORD_32DIRECT (DATA_MGR_DRAM_CFG , 0); +// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_WL , 0); +// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_ADD , 0); +// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_RL , 0); +// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_RFC , 0); +// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_REFI , 0); +// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_WR , 0); +// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_MRD , 0); +// readdata = IORD_32DIRECT (DATA_MGR_COL_WIDTH , 0); +// readdata = IORD_32DIRECT (DATA_MGR_ROW_WIDTH , 0); +// readdata = IORD_32DIRECT (DATA_MGR_BANK_WIDTH , 0); +// readdata = IORD_32DIRECT (DATA_MGR_CS_WIDTH , 0); +// readdata = IORD_32DIRECT (DATA_MGR_ITF_WIDTH , 0); +// readdata = IORD_32DIRECT (DATA_MGR_DVC_WIDTH , 0); +// +//} + +//USER try a read and see if it returns correct data back. has dummy reads inserted into the mix +//USER used to align dqs enable. has more thorough checks than the regular read test. + +static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group, + uint32_t num_tries, uint32_t all_correct, + t_btfld * bit_chk, uint32_t all_groups, + uint32_t all_ranks) +{ + uint32_t r, vg; + t_btfld correct_mask_vg; + t_btfld tmp_bit_chk; + uint32_t rank_end = + all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); + uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_DELAY_SWEEPS) + && ENABLE_SUPER_QUICK_CALIBRATION) || BFM_MODE; + + *bit_chk = param->read_correct_mask; + correct_mask_vg = param->read_correct_mask_vg; + + for (r = rank_bgn; r < rank_end; r++) { + if (param->skip_ranks[r]) { + //USER request to skip the rank + + continue; + } + //USER set rank + set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x10); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_READ_B2B_WAIT1); + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0x10); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_READ_B2B_WAIT2); + + if (quick_read_mode) { + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x1); /* need at least two (1+1) reads to capture failures */ + } else if (all_groups) { + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x06); + } else { + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x32); + } + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_READ_B2B); + if (all_groups) { + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_3, 0, + RW_MGR_MEM_IF_READ_DQS_WIDTH * + RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1); + } else { + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_3, 0, 0x0); + } + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_READ_B2B); + + tmp_bit_chk = 0; + for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;; vg--) { + //USER reset the fifos to get pointers to known state + + IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); + IOWR_32DIRECT(RW_MGR_RESET_READ_DATAPATH, 0, 0); + + tmp_bit_chk = + tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS / + RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); + + IOWR_32DIRECT(all_groups ? RW_MGR_RUN_ALL_GROUPS : RW_MGR_RUN_SINGLE_GROUP, + ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + vg) << 2), + __RW_MGR_READ_B2B); + tmp_bit_chk = + tmp_bit_chk | (correct_mask_vg & ~(IORD_32DIRECT(BASE_RW_MGR, 0))); + + if (vg == 0) { + break; + } + } + *bit_chk &= tmp_bit_chk; + } + + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, (group << 2), __RW_MGR_CLEAR_DQS_ENABLE); + + if (all_correct) { + set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + DPRINT(2, "read_test(%lu,ALL,%lu) => (%lu == %lu) => %lu", group, all_groups, + *bit_chk, param->read_correct_mask, + (long unsigned int)(*bit_chk == param->read_correct_mask)); + return (*bit_chk == param->read_correct_mask); + } else { + set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + DPRINT(2, "read_test(%lu,ONE,%lu) => (%lu != %lu) => %lu", group, all_groups, + *bit_chk, (long unsigned int)0, (long unsigned int)(*bit_chk != 0x00)); + return (*bit_chk != 0x00); + } +} + +static inline uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group, uint32_t num_tries, + uint32_t all_correct, + t_btfld * bit_chk, + uint32_t all_groups) +{ + return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct, bit_chk, all_groups, + 1); +} + +static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t * v) +{ + //USER fiddle with FIFO + if (HARD_PHY) { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_HARD_PHY, 0, grp); + } else if (QUARTER_RATE_MODE && !HARD_VFIFO) { + if ((*v & 3) == 3) { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_QR, 0, grp); + } else if ((*v & 2) == 2) { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR_HR, 0, grp); + } else if ((*v & 1) == 1) { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_HR, 0, grp); + } else { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR, 0, grp); + } + } else if (HARD_VFIFO) { + // Arria V & Cyclone V have a hard full-rate VFIFO that only has a single incr signal + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR, 0, grp); + } else { + if (!HALF_RATE_MODE || (*v & 1) == 1) { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_HR, 0, grp); + } else { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR, 0, grp); + } + } + + (*v)++; + BFM_INC_VFIFO; +} + +//Used in quick cal to properly loop through the duplicated VFIFOs in AV QDRII/RLDRAM +static inline void rw_mgr_incr_vfifo_all(uint32_t grp, uint32_t * v) +{ +#if VFIFO_CONTROL_WIDTH_PER_DQS == 1 + rw_mgr_incr_vfifo(grp, v); +#else + uint32_t i; + for (i = 0; i < VFIFO_CONTROL_WIDTH_PER_DQS; i++) { + rw_mgr_incr_vfifo(grp * VFIFO_CONTROL_WIDTH_PER_DQS + i, v); + if (i != 0) { + (*v)--; + } + } +#endif +} + +static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t * v) +{ + + uint32_t i; + + for (i = 0; i < VFIFO_SIZE - 1; i++) { + rw_mgr_incr_vfifo(grp, v); + } +} + +//USER find a good dqs enable to use + +#if NEWVERSION_DQSEN + +// Navid's version + +static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) +{ + uint32_t i, d, v, p; + uint32_t max_working_cnt; + uint32_t fail_cnt; + t_btfld bit_chk; + uint32_t dtaps_per_ptap; + uint32_t found_begin, found_end; + uint32_t work_bgn, work_mid, work_end, tmp_delay; + uint32_t test_status; + uint32_t found_passing_read, found_failing_read, initial_failing_dtap; + + reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); + + scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); + scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); + + fail_cnt = 0; + + //USER ************************************************************** + //USER * Step 0 : Determine number of delay taps for each phase tap * + + dtaps_per_ptap = 0; + tmp_delay = 0; + while (tmp_delay < IO_DELAY_PER_OPA_TAP) { + dtaps_per_ptap++; + tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; + } + dtaps_per_ptap--; + tmp_delay = 0; + + // VFIFO sweep + + //USER ********************************************************* + //USER * Step 1 : First push vfifo until we get a failing read * + for (v = 0; v < VFIFO_SIZE;) { + DPRINT(2, "find_dqs_en_phase: vfifo %lu", BFM_GBL_GET(vfifo_idx)); + test_status = + rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0); + if (!test_status) { + fail_cnt++; + + if (fail_cnt == 2) { + break; + } + } + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } + + if (v >= VFIFO_SIZE) { + //USER no failing read found!! Something must have gone wrong + DPRINT(2, "find_dqs_en_phase: vfifo failed"); + return 0; + } + + max_working_cnt = 0; + + //USER ******************************************************** + //USER * step 2: find first working phase, increment in ptaps * + found_begin = 0; + work_bgn = 0; + for (d = 0; d <= dtaps_per_ptap; d++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { + work_bgn = tmp_delay; + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + for (i = 0; i < VFIFO_SIZE; i++) { + for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++, work_bgn += IO_DELAY_PER_OPA_TAP) { + DPRINT(2, "find_dqs_en_phase: begin: vfifo=%lu ptap=%lu dtap=%lu", + BFM_GBL_GET(vfifo_idx), p, d); + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + test_status = + rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, + &bit_chk, 0); + + if (test_status) { + max_working_cnt = 1; + found_begin = 1; + break; + } + } + + if (found_begin) { + break; + } + + if (p > IO_DQS_EN_PHASE_MAX) { + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } + } + + if (found_begin) { + break; + } + } + + if (i >= VFIFO_SIZE) { + //USER cannot find working solution + DPRINT(2, "find_dqs_en_phase: no vfifo/ptap/dtap"); + return 0; + } + + work_end = work_bgn; + + //USER If d is 0 then the working window covers a phase tap and we can follow the old procedure + //USER otherwise, we've found the beginning, and we need to increment the dtaps until we find the end + if (d == 0) { + //USER ******************************************************************** + //USER * step 3a: if we have room, back off by one and increment in dtaps * + COV(EN_PHASE_PTAP_OVERLAP); + + //USER Special case code for backing up a phase + if (p == 0) { + p = IO_DQS_EN_PHASE_MAX; + rw_mgr_decr_vfifo(grp, &v); + } else { + p = p - 1; + } + tmp_delay = work_bgn - IO_DELAY_PER_OPA_TAP; + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + found_begin = 0; + for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < work_bgn; + d++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { + + DPRINT(2, "find_dqs_en_phase: begin-2: vfifo=%lu ptap=%lu dtap=%lu", + BFM_GBL_GET(vfifo_idx), p, d); + + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + if (rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + found_begin = 1; + work_bgn = tmp_delay; + break; + } + } + + //USER We have found a working dtap before the ptap found above + if (found_begin == 1) { + max_working_cnt++; + } + //USER Restore VFIFO to old state before we decremented it (if needed) + p = p + 1; + if (p > IO_DQS_EN_PHASE_MAX) { + p = 0; + rw_mgr_incr_vfifo(grp, &v); + } + + scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); + + //USER *********************************************************************************** + //USER * step 4a: go forward from working phase to non working phase, increment in ptaps * + p = p + 1; + work_end += IO_DELAY_PER_OPA_TAP; + if (p > IO_DQS_EN_PHASE_MAX) { + //USER fiddle with FIFO + p = 0; + rw_mgr_incr_vfifo(grp, &v); + } + + found_end = 0; + for (; i < VFIFO_SIZE + 1; i++) { + for (; p <= IO_DQS_EN_PHASE_MAX; p++, work_end += IO_DELAY_PER_OPA_TAP) { + DPRINT(2, "find_dqs_en_phase: end: vfifo=%lu ptap=%lu dtap=%lu", + BFM_GBL_GET(vfifo_idx), p, (long unsigned int)0); + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + found_end = 1; + break; + } else { + max_working_cnt++; + } + } + + if (found_end) { + break; + } + + if (p > IO_DQS_EN_PHASE_MAX) { + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + p = 0; + } + } + + if (i >= VFIFO_SIZE + 1) { + //USER cannot see edge of failing read + DPRINT(2, "find_dqs_en_phase: end: failed"); + return 0; + } + //USER ********************************************************* + //USER * step 5a: back off one from last, increment in dtaps * + + //USER Special case code for backing up a phase + if (p == 0) { + p = IO_DQS_EN_PHASE_MAX; + rw_mgr_decr_vfifo(grp, &v); + } else { + p = p - 1; + } + + work_end -= IO_DELAY_PER_OPA_TAP; + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + //USER * The actual increment of dtaps is done outside of the if/else loop to share code + d = 0; + + DPRINT(2, "find_dqs_en_phase: found end v/p: vfifo=%lu ptap=%lu", + BFM_GBL_GET(vfifo_idx), p); + } else { + + //USER ******************************************************************** + //USER * step 3-5b: Find the right edge of the window using delay taps * + COV(EN_PHASE_PTAP_NO_OVERLAP); + + DPRINT(2, "find_dqs_en_phase: begin found: vfifo=%lu ptap=%lu dtap=%lu begin=%lu", + BFM_GBL_GET(vfifo_idx), p, d, work_bgn); + BFM_GBL_SET(dqs_enable_left_edge[grp].v, BFM_GBL_GET(vfifo_idx)); + BFM_GBL_SET(dqs_enable_left_edge[grp].p, p); + BFM_GBL_SET(dqs_enable_left_edge[grp].d, d); + BFM_GBL_SET(dqs_enable_left_edge[grp].ps, work_bgn); + + work_end = work_bgn; + + //USER * The actual increment of dtaps is done outside of the if/else loop to share code + + //USER Only here to counterbalance a subtract later on which is not needed if this branch + //USER of the algorithm is taken + max_working_cnt++; + } + + //USER The dtap increment to find the failing edge is done here + for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { + + DPRINT(2, "find_dqs_en_phase: end-2: dtap=%lu", d); + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + break; + } + } + + //USER Go back to working dtap + if (d != 0) { + work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP; + } + + DPRINT(2, "find_dqs_en_phase: found end v/p/d: vfifo=%lu ptap=%lu dtap=%lu end=%lu", + BFM_GBL_GET(vfifo_idx), p, d - 1, work_end); + BFM_GBL_SET(dqs_enable_right_edge[grp].v, BFM_GBL_GET(vfifo_idx)); + BFM_GBL_SET(dqs_enable_right_edge[grp].p, p); + BFM_GBL_SET(dqs_enable_right_edge[grp].d, d - 1); + BFM_GBL_SET(dqs_enable_right_edge[grp].ps, work_end); + + if (work_end >= work_bgn) { + //USER we have a working range + } else { + //USER nil range + DPRINT(2, "find_dqs_en_phase: end-2: failed"); + return 0; + } + + DPRINT(2, "find_dqs_en_phase: found range [%lu,%lu]", work_bgn, work_end); + + // *************************************************************** + //USER * We need to calculate the number of dtaps that equal a ptap + //USER * To do that we'll back up a ptap and re-find the edge of the + //USER * window using dtaps + + DPRINT(2, "find_dqs_en_phase: calculate dtaps_per_ptap for tracking"); + + //USER Special case code for backing up a phase + if (p == 0) { + p = IO_DQS_EN_PHASE_MAX; + rw_mgr_decr_vfifo(grp, &v); + DPRINT(2, "find_dqs_en_phase: backed up cycle/phase: v=%lu p=%lu", + BFM_GBL_GET(vfifo_idx), p); + } else { + p = p - 1; + DPRINT(2, "find_dqs_en_phase: backed up phase only: v=%lu p=%lu", + BFM_GBL_GET(vfifo_idx), p); + } + + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + //USER Increase dtap until we first see a passing read (in case the window is smaller than a ptap), + //USER and then a failing read to mark the edge of the window again + + //USER Find a passing read + DPRINT(2, "find_dqs_en_phase: find passing read"); + found_passing_read = 0; + found_failing_read = 0; + initial_failing_dtap = d; + for (; d <= IO_DQS_EN_DELAY_MAX; d++) { + DPRINT(2, "find_dqs_en_phase: testing read d=%lu", d); + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + found_passing_read = 1; + break; + } + } + + if (found_passing_read) { + //USER Find a failing read + DPRINT(2, "find_dqs_en_phase: find failing read"); + for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) { + DPRINT(2, "find_dqs_en_phase: testing read d=%lu", d); + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + found_failing_read = 1; + break; + } + } + } else { + DPRINT(1, + "find_dqs_en_phase: failed to calculate dtaps per ptap. Fall back on static value"); + } + + //USER The dynamically calculated dtaps_per_ptap is only valid if we found a passing/failing read + //USER If we didn't, it means d hit the max (IO_DQS_EN_DELAY_MAX). + //USER Otherwise, dtaps_per_ptap retains its statically calculated value. + if (found_passing_read && found_failing_read) { + dtaps_per_ptap = d - initial_failing_dtap; + } + + IOWR_32DIRECT(REG_FILE_DTAPS_PER_PTAP, 0, dtaps_per_ptap); + + DPRINT(2, "find_dqs_en_phase: dtaps_per_ptap=%lu - %lu = %lu", d, initial_failing_dtap, + dtaps_per_ptap); + + //USER ******************************************** + //USER * step 6: Find the centre of the window * + + work_mid = (work_bgn + work_end) / 2; + tmp_delay = 0; + + DPRINT(2, "work_bgn=%ld work_end=%ld work_mid=%ld", work_bgn, work_end, work_mid); + //USER Get the middle delay to be less than a VFIFO delay + for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++, tmp_delay += IO_DELAY_PER_OPA_TAP) ; + DPRINT(2, "vfifo ptap delay %ld", tmp_delay); + while (work_mid > tmp_delay) + work_mid -= tmp_delay; + DPRINT(2, "new work_mid %ld", work_mid); + tmp_delay = 0; + for (p = 0; p <= IO_DQS_EN_PHASE_MAX && tmp_delay < work_mid; + p++, tmp_delay += IO_DELAY_PER_OPA_TAP) ; + tmp_delay -= IO_DELAY_PER_OPA_TAP; + DPRINT(2, "new p %ld, tmp_delay=%ld", p - 1, tmp_delay); + for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < work_mid; + d++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) ; + DPRINT(2, "new d %ld, tmp_delay=%ld", d, tmp_delay); + + scc_mgr_set_dqs_en_phase_all_ranks(grp, p - 1); + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + //USER push vfifo until we can successfully calibrate. We can do this because + //USER the largest possible margin in 1 VFIFO cycle + + for (i = 0; i < VFIFO_SIZE; i++) { + DPRINT(2, "find_dqs_en_phase: center: vfifo=%lu", BFM_GBL_GET(vfifo_idx)); + if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + break; + } + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } + + if (i >= VFIFO_SIZE) { + DPRINT(2, "find_dqs_en_phase: center: failed"); + return 0; + } + DPRINT(2, "find_dqs_en_phase: center found: vfifo=%li ptap=%lu dtap=%lu", + BFM_GBL_GET(vfifo_idx), p - 1, d); + BFM_GBL_SET(dqs_enable_mid[grp].v, BFM_GBL_GET(vfifo_idx)); + BFM_GBL_SET(dqs_enable_mid[grp].p, p - 1); + BFM_GBL_SET(dqs_enable_mid[grp].d, d); + BFM_GBL_SET(dqs_enable_mid[grp].ps, work_mid); + return 1; +} + +#if 0 +// Ryan's algorithm + +static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) +{ + uint32_t i, d, v, p; + uint32_t min_working_p, max_working_p, min_working_d, max_working_d, max_working_cnt; + uint32_t fail_cnt; + t_btfld bit_chk; + uint32_t dtaps_per_ptap; + uint32_t found_begin, found_end; + uint32_t tmp_delay; + + TRACE_FUNC("%lu", grp); + + reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); + + scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); + scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); + + fail_cnt = 0; + + //USER ************************************************************** + //USER * Step 0 : Determine number of delay taps for each phase tap * + + dtaps_per_ptap = 0; + tmp_delay = 0; + while (tmp_delay < IO_DELAY_PER_OPA_TAP) { + dtaps_per_ptap++; + tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; + } + dtaps_per_ptap--; + + //USER ********************************************************* + //USER * Step 1 : First push vfifo until we get a failing read * + for (v = 0; v < VFIFO_SIZE;) { + if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + fail_cnt++; + + if (fail_cnt == 2) { + break; + } + } + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } + + if (i >= VFIFO_SIZE) { + //USER no failing read found!! Something must have gone wrong + return 0; + } + + max_working_cnt = 0; + min_working_p = 0; + + //USER ******************************************************** + //USER * step 2: find first working phase, increment in ptaps * + found_begin = 0; + for (d = 0; d <= dtaps_per_ptap; d++) { + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + for (i = 0; i < VFIFO_SIZE; i++) { + for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++) { + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + if (rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + max_working_cnt = 1; + found_begin = 1; + break; + } + } + + if (found_begin) { + break; + } + + if (p > IO_DQS_EN_PHASE_MAX) { + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } + } + + if (found_begin) { + break; + } + } + + if (i >= VFIFO_SIZE) { + //USER cannot find working solution + return 0; + } + + min_working_p = p; + + //USER If d is 0 then the working window covers a phase tap and we can follow the old procedure + //USER otherwise, we've found the beginning, and we need to increment the dtaps until we find the end + if (d == 0) { + //USER ******************************************************************** + //USER * step 3a: if we have room, back off by one and increment in dtaps * + min_working_d = 0; + + //USER Special case code for backing up a phase + if (p == 0) { + p = IO_DQS_EN_PHASE_MAX; + rw_mgr_decr_vfifo(grp, &v); + } else { + p = p - 1; + } + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + found_begin = 0; + for (d = 0; d <= dtaps_per_ptap; d++) { + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + if (rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + found_begin = 1; + min_working_d = d; + break; + } + } + + //USER We have found a working dtap before the ptap found above + if (found_begin == 1) { + min_working_p = p; + max_working_cnt++; + } + //USER Restore VFIFO to old state before we decremented it + p = p + 1; + if (p > IO_DQS_EN_PHASE_MAX) { + p = 0; + rw_mgr_incr_vfifo(grp, &v); + } + + scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); + + //USER *********************************************************************************** + //USER * step 4a: go forward from working phase to non working phase, increment in ptaps * + p = p + 1; + if (p > IO_DQS_EN_PHASE_MAX) { + //USER fiddle with FIFO + p = 0; + rw_mgr_incr_vfifo(grp, &v); + } + + found_end = 0; + for (; i < VFIFO_SIZE + 1; i++) { + for (; p <= IO_DQS_EN_PHASE_MAX; p++) { + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + found_end = 1; + break; + } else { + max_working_cnt++; + } + } + + if (found_end) { + break; + } + + if (p > IO_DQS_EN_PHASE_MAX) { + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + p = 0; + } + } + + if (i >= VFIFO_SIZE + 1) { + //USER cannot see edge of failing read + return 0; + } + //USER ********************************************************* + //USER * step 5a: back off one from last, increment in dtaps * + max_working_d = 0; + + //USER Special case code for backing up a phase + if (p == 0) { + p = IO_DQS_EN_PHASE_MAX; + rw_mgr_decr_vfifo(grp, &v); + } else { + p = p - 1; + } + + max_working_p = p; + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + + for (d = 0; d <= IO_DQS_EN_DELAY_MAX; d++) { + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + break; + } + } + + //USER Go back to working dtap + if (d != 0) { + max_working_d = d - 1; + } + + } else { + + //USER ******************************************************************** + //USER * step 3-5b: Find the right edge of the window using delay taps * + + max_working_p = min_working_p; + min_working_d = d; + + for (; d <= IO_DQS_EN_DELAY_MAX; d++) { + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + break; + } + } + + //USER Go back to working dtap + if (d != 0) { + max_working_d = d - 1; + } + //USER Only here to counterbalance a subtract later on which is not needed if this branch + //USER of the algorithm is taken + max_working_cnt++; + } + + //USER ******************************************** + //USER * step 6: Find the centre of the window * + + //USER If the number of working phases is even we will step back a phase and find the + //USER edge with a larger delay chain tap + if ((max_working_cnt & 1) == 0) { + p = min_working_p + (max_working_cnt - 1) / 2; + + //USER Special case code for backing up a phase + if (max_working_p == 0) { + max_working_p = IO_DQS_EN_PHASE_MAX; + rw_mgr_decr_vfifo(grp, &v); + } else { + max_working_p = max_working_p - 1; + } + + scc_mgr_set_dqs_en_phase_all_ranks(grp, max_working_p); + + //USER Code to determine at which dtap we should start searching again for a failure + //USER If we've moved back such that the max and min p are the same, we should start searching + //USER from where the window actually exists + if (max_working_p == min_working_p) { + d = min_working_d; + } else { + d = max_working_d; + } + + for (; d <= IO_DQS_EN_DELAY_MAX; d++) { + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks + (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + break; + } + } + + //USER Go back to working dtap + if (d != 0) { + max_working_d = d - 1; + } + } else { + p = min_working_p + (max_working_cnt) / 2; + } + + while (p > IO_DQS_EN_PHASE_MAX) { + p -= (IO_DQS_EN_PHASE_MAX + 1); + } + + d = (min_working_d + max_working_d) / 2; + + scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + + //USER push vfifo until we can successfully calibrate + + for (i = 0; i < VFIFO_SIZE; i++) { + if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + break; + } + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } + + if (i >= VFIFO_SIZE) { + return 0; + } + + return 1; +} + +#endif + +#else +// Val's original version + +static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) +{ + uint32_t i, j, v, d; + uint32_t min_working_d, max_working_cnt; + uint32_t fail_cnt; + t_btfld bit_chk; + uint32_t delay_per_ptap_mid; + + reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); + + scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); + scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); + + fail_cnt = 0; + + //USER first push vfifo until we get a failing read + v = 0; + for (i = 0; i < VFIFO_SIZE; i++) { + if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { + fail_cnt++; + + if (fail_cnt == 2) { + break; + } + } + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } + + if (v >= VFIFO_SIZE) { + //USER no failing read found!! Something must have gone wrong + + return 0; + } + + max_working_cnt = 0; + min_working_d = 0; + + for (i = 0; i < VFIFO_SIZE + 1; i++) { + for (d = 0; d <= IO_DQS_EN_PHASE_MAX; d++) { + scc_mgr_set_dqs_en_phase_all_ranks(grp, d); + + rw_mgr_mem_calibrate_read_test_all_ranks(grp, NUM_READ_PB_TESTS, + PASS_ONE_BIT, &bit_chk, 0); + if (bit_chk) { + //USER passing read + + if (max_working_cnt == 0) { + min_working_d = d; + } + + max_working_cnt++; + } else { + if (max_working_cnt > 0) { + //USER already have one working value + break; + } + } + } + + if (d > IO_DQS_EN_PHASE_MAX) { + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } else { + //USER found working solution! + + d = min_working_d + (max_working_cnt - 1) / 2; + + while (d > IO_DQS_EN_PHASE_MAX) { + d -= (IO_DQS_EN_PHASE_MAX + 1); + } + + break; + } + } + + if (i >= VFIFO_SIZE + 1) { + //USER cannot find working solution or cannot see edge of failing read + + return 0; + } + //USER in the case the number of working steps is even, use 50ps taps to further center the window + + if ((max_working_cnt & 1) == 0) { + delay_per_ptap_mid = IO_DELAY_PER_OPA_TAP / 2; + + //USER increment in 50ps taps until we reach the required amount + + for (i = 0, j = 0; i <= IO_DQS_EN_DELAY_MAX && j < delay_per_ptap_mid; + i++, j += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) ; + + scc_mgr_set_dqs_en_delay_all_ranks(grp, i - 1); + } + + scc_mgr_set_dqs_en_phase_all_ranks(grp, d); + + //USER push vfifo until we can successfully calibrate + + for (i = 0; i < VFIFO_SIZE; i++) { + if (rw_mgr_mem_calibrate_read_test_all_ranks + (grp, NUM_READ_PB_TESTS, PASS_ONE_BIT, &bit_chk, 0)) { + break; + } + //USER fiddle with FIFO + rw_mgr_incr_vfifo(grp, &v); + } + + if (i >= VFIFO_SIZE) { + return 0; + } + + return 1; +} + +#endif + +// Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different dq_in_delay values +static inline uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay(uint32_t + write_group, + uint32_t + read_group, + uint32_t + test_bgn) +{ + uint32_t found; + uint32_t i; + uint32_t p; + uint32_t d; + uint32_t r; + + const uint32_t delay_step = IO_IO_IN_DELAY_MAX / (RW_MGR_MEM_DQ_PER_READ_DQS - 1); + + // try different dq_in_delays since the dq path is shorter than dqs + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + select_shadow_regs_for_update(r, write_group, 1); + for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; + i++, p++, d += delay_step) { + DPRINT(1, + "rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay: g=%lu/%lu r=%lu, i=%lu p=%lu d=%lu", + write_group, read_group, r, i, p, d); + scc_mgr_set_dq_in_delay(write_group, p, d); + scc_mgr_load_dq(p); + } + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } + + found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group); + + DPRINT(1, + "rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay: g=%lu/%lu found=%lu; Reseting delay chain to zero", + write_group, read_group, found); + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + select_shadow_regs_for_update(r, write_group, 1); + for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { + scc_mgr_set_dq_in_delay(write_group, p, 0); + scc_mgr_load_dq(p); + } + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } + + return found; +} + +//USER per-bit deskew DQ and center + +#if NEWVERSION_RDDESKEW + +static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn, uint32_t write_group, + uint32_t read_group, uint32_t test_bgn, + uint32_t use_read_test, uint32_t update_fom) +{ + uint32_t i, p, d, min_index; + //USER Store these as signed since there are comparisons with signed numbers + t_btfld bit_chk; + t_btfld sticky_bit_chk; + int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; + int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; + int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS]; + int32_t mid; + int32_t orig_mid_min, mid_min; + int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs, final_dqs_en; + int32_t dq_margin, dqs_margin; + uint32_t stop; + + start_dqs = READ_SCC_DQS_IN_DELAY(read_group); + if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { + start_dqs_en = READ_SCC_DQS_EN_DELAY(read_group); + } + + select_curr_shadow_reg_using_rank(rank_bgn); + + //USER per-bit deskew + + //USER set the left and right edge of each bit to an illegal value + //USER use (IO_IO_IN_DELAY_MAX + 1) as an illegal value + sticky_bit_chk = 0; + for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { + left_edge[i] = IO_IO_IN_DELAY_MAX + 1; + right_edge[i] = IO_IO_IN_DELAY_MAX + 1; + } + + //USER Search for the left edge of the window for each bit + for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) { + scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + //USER Stop searching when the read test doesn't pass AND when we've seen a passing read on every bit + if (use_read_test) { + stop = + !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group, NUM_READ_PB_TESTS, + PASS_ONE_BIT, &bit_chk, 0, 0); + } else { + rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, PASS_ONE_BIT, + &bit_chk, 0); + bit_chk = + bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * + (read_group - + (write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / + RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); + stop = (bit_chk == 0); + } + sticky_bit_chk = sticky_bit_chk | bit_chk; + stop = stop && (sticky_bit_chk == param->read_correct_mask); + DPRINT(2, "vfifo_center(left): dtap=%lu => " BTFLD_FMT " == " BTFLD_FMT " && %lu", + d, sticky_bit_chk, param->read_correct_mask, stop); + + if (stop == 1) { + break; + } else { + for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { + if (bit_chk & 1) { + //USER Remember a passing test as the left_edge + left_edge[i] = d; + } else { + //USER If a left edge has not been seen yet, then a future passing test will mark this edge as the right edge + if (left_edge[i] == IO_IO_IN_DELAY_MAX + 1) { + right_edge[i] = -(d + 1); + } + } + DPRINT(2, + "vfifo_center[l,d=%lu]: bit_chk_test=%d left_edge[%lu]: %ld right_edge[%lu]: %ld", + d, (int)(bit_chk & 1), i, left_edge[i], i, right_edge[i]); + bit_chk = bit_chk >> 1; + } + } + } + + //USER Reset DQ delay chains to 0 + scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0); + sticky_bit_chk = 0; + for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) { + + DPRINT(2, "vfifo_center: left_edge[%lu]: %ld right_edge[%lu]: %ld", i, left_edge[i], + i, right_edge[i]); + + //USER Check for cases where we haven't found the left edge, which makes our assignment of the the + //USER right edge invalid. Reset it to the illegal value. + if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) + && (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { + right_edge[i] = IO_IO_IN_DELAY_MAX + 1; + DPRINT(2, "vfifo_center: reset right_edge[%lu]: %ld", i, right_edge[i]); + } + //USER Reset sticky bit (except for bits where we have seen both the left and right edge) + sticky_bit_chk = sticky_bit_chk << 1; + if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) + && (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { + sticky_bit_chk = sticky_bit_chk | 1; + } + + if (i == 0) { + break; + } + } + + //USER Search for the right edge of the window for each bit + for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) { + scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); + if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { + uint32_t delay = d + start_dqs_en; + if (delay > IO_DQS_EN_DELAY_MAX) { + delay = IO_DQS_EN_DELAY_MAX; + } + scc_mgr_set_dqs_en_delay(read_group, delay); + } + scc_mgr_load_dqs(read_group); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + //USER Stop searching when the read test doesn't pass AND when we've seen a passing read on every bit + if (use_read_test) { + stop = + !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group, NUM_READ_PB_TESTS, + PASS_ONE_BIT, &bit_chk, 0, 0); + } else { + rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, PASS_ONE_BIT, + &bit_chk, 0); + bit_chk = + bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * + (read_group - + (write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / + RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); + stop = (bit_chk == 0); + } + sticky_bit_chk = sticky_bit_chk | bit_chk; + stop = stop && (sticky_bit_chk == param->read_correct_mask); + + DPRINT(2, "vfifo_center(right): dtap=%lu => " BTFLD_FMT " == " BTFLD_FMT " && %lu", + d, sticky_bit_chk, param->read_correct_mask, stop); + + if (stop == 1) { + break; + } else { + for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { + if (bit_chk & 1) { + //USER Remember a passing test as the right_edge + right_edge[i] = d; + } else { + if (d != 0) { + //USER If a right edge has not been seen yet, then a future passing test will mark this edge as the left edge + if (right_edge[i] == IO_IO_IN_DELAY_MAX + 1) { + left_edge[i] = -(d + 1); + } + } else { + //USER d = 0 failed, but it passed when testing the left edge, so it must be marginal, set it to -1 + if (right_edge[i] == IO_IO_IN_DELAY_MAX + 1 + && left_edge[i] != IO_IO_IN_DELAY_MAX + 1) { + right_edge[i] = -1; + } + //USER If a right edge has not been seen yet, then a future passing test will mark this edge as the left edge + else if (right_edge[i] == IO_IO_IN_DELAY_MAX + 1) { + left_edge[i] = -(d + 1); + } + + } + } + + DPRINT(2, + "vfifo_center[r,d=%lu]: bit_chk_test=%d left_edge[%lu]: %ld right_edge[%lu]: %ld", + d, (int)(bit_chk & 1), i, left_edge[i], i, right_edge[i]); + bit_chk = bit_chk >> 1; + } + } + } + + // Store all observed margins + + //USER Check that all bits have a window + for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { + DPRINT(2, "vfifo_center: left_edge[%lu]: %ld right_edge[%lu]: %ld", i, left_edge[i], + i, right_edge[i]); + BFM_GBL_SET(dq_read_left_edge[read_group][i], left_edge[i]); + BFM_GBL_SET(dq_read_right_edge[read_group][i], right_edge[i]); + if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) + || (right_edge[i] == IO_IO_IN_DELAY_MAX + 1)) { + + //USER Restore delay chain settings before letting the loop in + //USER rw_mgr_mem_calibrate_vfifo to retry different dqs/ck relationships + scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs); + if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { + scc_mgr_set_dqs_en_delay(read_group, start_dqs_en); + } + scc_mgr_load_dqs(read_group); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + DPRINT(1, "vfifo_center: failed to find edge [%lu]: %ld %ld", i, + left_edge[i], right_edge[i]); + if (use_read_test) { + set_failing_group_stage(read_group * RW_MGR_MEM_DQ_PER_READ_DQS + i, + CAL_STAGE_VFIFO, CAL_SUBSTAGE_VFIFO_CENTER); + } else { + set_failing_group_stage(read_group * RW_MGR_MEM_DQ_PER_READ_DQS + i, + CAL_STAGE_VFIFO_AFTER_WRITES, + CAL_SUBSTAGE_VFIFO_CENTER); + } + return 0; + } + } + + //USER Find middle of window for each DQ bit + mid_min = left_edge[0] - right_edge[0]; + min_index = 0; + for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { + mid = left_edge[i] - right_edge[i]; + if (mid < mid_min) { + mid_min = mid; + min_index = i; + } + } + + //USER -mid_min/2 represents the amount that we need to move DQS. If mid_min is odd and positive we'll need to add one to + //USER make sure the rounding in further calculations is correct (always bias to the right), so just add 1 for all positive values + if (mid_min > 0) { + mid_min++; + } + mid_min = mid_min / 2; + + DPRINT(1, "vfifo_center: mid_min=%ld (index=%lu)", mid_min, min_index); + + //USER Determine the amount we can change DQS (which is -mid_min) + orig_mid_min = mid_min; + new_dqs = start_dqs - mid_min; + if (new_dqs > IO_DQS_IN_DELAY_MAX) { + new_dqs = IO_DQS_IN_DELAY_MAX; + } else if (new_dqs < 0) { + new_dqs = 0; + } + mid_min = start_dqs - new_dqs; + DPRINT(1, "vfifo_center: new mid_min=%ld new_dqs=%ld", mid_min, new_dqs); + + if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { + if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX) { + mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX; + } else if (start_dqs_en - mid_min < 0) { + mid_min += start_dqs_en - mid_min; + } + } + new_dqs = start_dqs - mid_min; + + DPRINT(1, "vfifo_center: start_dqs=%ld start_dqs_en=%ld new_dqs=%ld mid_min=%ld", + start_dqs, IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1, new_dqs, mid_min); + + //USER Initialize data for export structures + dqs_margin = IO_IO_IN_DELAY_MAX + 1; + dq_margin = IO_IO_IN_DELAY_MAX + 1; + + //USER add delay to bring centre of all DQ windows to the same "level" + for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { + //USER Use values before divide by 2 to reduce round off error + shift_dq = + (left_edge[i] - right_edge[i] - + (left_edge[min_index] - right_edge[min_index])) / 2 + (orig_mid_min - mid_min); + + DPRINT(2, "vfifo_center: before: shift_dq[%lu]=%ld", i, shift_dq); + + if (shift_dq + (int32_t) READ_SCC_DQ_IN_DELAY(p) > (int32_t) IO_IO_IN_DELAY_MAX) { + shift_dq = (int32_t) IO_IO_IN_DELAY_MAX - READ_SCC_DQ_IN_DELAY(i); + } else if (shift_dq + (int32_t) READ_SCC_DQ_IN_DELAY(p) < 0) { + shift_dq = -(int32_t) READ_SCC_DQ_IN_DELAY(p); + } + DPRINT(2, "vfifo_center: after: shift_dq[%lu]=%ld", i, shift_dq); + final_dq[i] = READ_SCC_DQ_IN_DELAY(p) + shift_dq; + scc_mgr_set_dq_in_delay(write_group, p, final_dq[i]); + scc_mgr_load_dq(p); + + DPRINT(2, "vfifo_center: margin[%lu]=[%ld,%ld]", i, + left_edge[i] - shift_dq + (-mid_min), right_edge[i] + shift_dq - (-mid_min)); + //USER To determine values for export structures + if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) { + dq_margin = left_edge[i] - shift_dq + (-mid_min); + } + if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) { + dqs_margin = right_edge[i] + shift_dq - (-mid_min); + } + } + + final_dqs = new_dqs; + if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { + final_dqs_en = start_dqs_en - mid_min; + } + //USER Move DQS-en + if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { + scc_mgr_set_dqs_en_delay(read_group, final_dqs_en); + scc_mgr_load_dqs(read_group); + } + //USER Move DQS + scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs); + scc_mgr_load_dqs(read_group); + + if (update_fom) { + //USER Export values + gbl->fom_in += + (dq_margin + + dqs_margin) / (RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH); + } + + DPRINT(2, "vfifo_center: dq_margin=%ld dqs_margin=%ld", dq_margin, dqs_margin); + + //USER Do not remove this line as it makes sure all of our decisions have been applied + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + return (dq_margin >= 0) && (dqs_margin >= 0); +} + +#else + +static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn, uint32_t grp, + uint32_t test_bgn, uint32_t use_read_test) +{ + uint32_t i, p, d; + uint32_t mid; + t_btfld bit_chk; + uint32_t max_working_dq[RW_MGR_MEM_DQ_PER_READ_DQS]; + uint32_t dq_margin, dqs_margin; + uint32_t start_dqs; + + //USER per-bit deskew. + //USER start of the per-bit sweep with the minimum working delay setting for + //USER all bits. + + for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { + max_working_dq[i] = 0; + } + + for (d = 1; d <= IO_IO_IN_DELAY_MAX; d++) { + scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + if (!rw_mgr_mem_calibrate_read_test + (rank_bgn, grp, NUM_READ_PB_TESTS, PASS_ONE_BIT, &bit_chk, 0, 0)) { + break; + } else { + for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { + if (bit_chk & 1) { + max_working_dq[i] = d; + } + bit_chk = bit_chk >> 1; + } + } + } + + //USER determine minimum working value for DQ + + dq_margin = IO_IO_IN_DELAY_MAX; + + for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { + if (max_working_dq[i] < dq_margin) { + dq_margin = max_working_dq[i]; + } + } + + //USER add delay to bring all DQ windows to the same "level" + + for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { + if (max_working_dq[i] > dq_margin) { + scc_mgr_set_dq_in_delay(write_group, i, max_working_dq[i] - dq_margin); + } else { + scc_mgr_set_dq_in_delay(write_group, i, 0); + } + + scc_mgr_load_dq(p, p); + } + + //USER sweep DQS window, may potentially have more window due to per-bit-deskew that was done + //USER in the previous step. + + start_dqs = READ_SCC_DQS_IN_DELAY(grp); + + for (d = start_dqs + 1; d <= IO_DQS_IN_DELAY_MAX; d++) { + scc_mgr_set_dqs_bus_in_delay(grp, d); + scc_mgr_load_dqs(grp); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + if (!rw_mgr_mem_calibrate_read_test + (rank_bgn, grp, NUM_READ_TESTS, PASS_ALL_BITS, &bit_chk, 0, 0)) { + break; + } + } + + scc_mgr_set_dqs_bus_in_delay(grp, start_dqs); + + //USER margin on the DQS pin + + dqs_margin = d - start_dqs - 1; + + //USER find mid point, +1 so that we don't go crazy pushing DQ + + mid = (dq_margin + dqs_margin + 1) / 2; + + gbl->fom_in += dq_margin + dqs_margin; +// TCLRPT_SET(debug_summary_report->fom_in, debug_summary_report->fom_in + (dq_margin + dqs_margin)); +// TCLRPT_SET(debug_cal_report->cal_status_per_group[grp].fom_in, (dq_margin + dqs_margin)); + + //USER center DQS ... if the headroom is setup properly we shouldn't need to + + if (dqs_margin > mid) { + scc_mgr_set_dqs_bus_in_delay(grp, READ_SCC_DQS_IN_DELAY(grp) + dqs_margin - mid); + + if (DDRX) { + uint32_t delay = READ_SCC_DQS_EN_DELAY(grp) + dqs_margin - mid; + + if (delay > IO_DQS_EN_DELAY_MAX) { + delay = IO_DQS_EN_DELAY_MAX; + } + + scc_mgr_set_dqs_en_delay(grp, delay); + } + } + + scc_mgr_load_dqs(grp); + + //USER center DQ + + if (dq_margin > mid) { + for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { + scc_mgr_set_dq_in_delay(write_group, i, + READ_SCC_DQ_IN_DELAY(i) + dq_margin - mid); + scc_mgr_load_dq(p, p); + } + + dqs_margin += dq_margin - mid; + dq_margin -= dq_margin - mid; + } + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + return (dq_margin + dqs_margin) > 0; +} + +#endif + +//USER calibrate the read valid prediction FIFO. +//USER +//USER - read valid prediction will consist of finding a good DQS enable phase, DQS enable delay, DQS input phase, and DQS input delay. +//USER - we also do a per-bit deskew on the DQ lines. + +#if NEWVERSION_GW + +//USER VFIFO Calibration -- Full Calibration +static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group, uint32_t test_bgn) +{ + uint32_t p, d, rank_bgn, sr; + uint32_t dtaps_per_ptap; + uint32_t tmp_delay; + t_btfld bit_chk; + uint32_t grp_calibrated; + uint32_t write_group, write_test_bgn; + uint32_t failed_substage; + uint32_t dqs_in_dtaps, orig_start_dqs; + + //USER update info for sims + + reg_file_set_stage(CAL_STAGE_VFIFO); + + if (DDRX) { + write_group = read_group; + write_test_bgn = test_bgn; + } else { + write_group = + read_group / (RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH); + write_test_bgn = read_group * RW_MGR_MEM_DQ_PER_READ_DQS; + } + + // USER Determine number of delay taps for each phase tap + dtaps_per_ptap = 0; + tmp_delay = 0; + if (!QDRII) { + while (tmp_delay < IO_DELAY_PER_OPA_TAP) { + dtaps_per_ptap++; + tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; + } + dtaps_per_ptap--; + tmp_delay = 0; + } + //USER update info for sims + + reg_file_set_group(read_group); + + grp_calibrated = 0; + + reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); + failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; + + for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) { + + if (DDRX || RLDRAMX) { + // In RLDRAMX we may be messing the delay of pins in the same write group but outside of + // the current read group, but that's ok because we haven't calibrated the output side yet. + if (d > 0) { + scc_mgr_apply_group_all_out_delay_add_all_ranks(write_group, + write_test_bgn, d); + } + } + + for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0; p++) { + //USER set a particular dqdqs phase + if (DDRX) { + scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p); + } + //USER Previous iteration may have failed as a result of ck/dqs or ck/dk violation, + //USER in which case the device may require special recovery. + if (DDRX || RLDRAMX) { + if (d != 0 || p != 0) { + recover_mem_device_after_ck_dqs_violation(); + } + } + + DPRINT(1, "calibrate_vfifo: g=%lu p=%lu d=%lu", read_group, p, d); + BFM_GBL_SET(gwrite_pos[read_group].p, p); + BFM_GBL_SET(gwrite_pos[read_group].d, d); + + //USER Load up the patterns used by read calibration using current DQDQS phase + + rw_mgr_mem_calibrate_read_load_patterns_all_ranks(); + + if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)) { + if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks + (read_group, 1, &bit_chk)) { + DPRINT(1, "Guaranteed read test failed: g=%lu p=%lu d=%lu", + read_group, p, d); + break; + } + } + // Loop over different DQS in delay chains for the purpose of DQS Enable calibration finding one bit working + orig_start_dqs = READ_SCC_DQS_IN_DELAY(read_group); + for (dqs_in_dtaps = orig_start_dqs; + dqs_in_dtaps <= IO_DQS_IN_DELAY_MAX && grp_calibrated == 0; + dqs_in_dtaps++) { + + for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; + rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { + + if (!param->skip_shadow_regs[sr]) { + + //USER Select shadow register set + select_shadow_regs_for_update(rank_bgn, read_group, + 1); + + WRITE_SCC_DQS_IN_DELAY(read_group, dqs_in_dtaps); + scc_mgr_load_dqs(read_group); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } + } + +// case:56390 + grp_calibrated = 1; + if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay + (write_group, read_group, test_bgn)) { + // USER Read per-bit deskew can be done on a per shadow register basis + for (rank_bgn = 0, sr = 0; + rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; + rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { + //USER Determine if this set of ranks should be skipped entirely + if (!param->skip_shadow_regs[sr]) { + + //USER Select shadow register set + select_shadow_regs_for_update(rank_bgn, + read_group, + 1); + + // Before doing read deskew, set DQS in back to the reserve value + WRITE_SCC_DQS_IN_DELAY(read_group, + orig_start_dqs); + scc_mgr_load_dqs(read_group); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + // If doing read after write calibration, do not update FOM now - do it then + if (!rw_mgr_mem_calibrate_vfifo_center + (rank_bgn, write_group, read_group, + test_bgn, 1, 0)) { + grp_calibrated = 0; + failed_substage = + CAL_SUBSTAGE_VFIFO_CENTER; + } + } + } + } else { + grp_calibrated = 0; + failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; + } + } + + } + } + + if (grp_calibrated == 0) { + set_failing_group_stage(write_group, CAL_STAGE_VFIFO, failed_substage); + + return 0; + } + //USER Reset the delay chains back to zero if they have moved > 1 (check for > 1 because loop will increase d even when pass in first case) + if (DDRX || RLDRAMII) { + if (d > 2) { + scc_mgr_zero_group(write_group, write_test_bgn, 1); + } + } + + return 1; +} + +#else + +//USER VFIFO Calibration -- Full Calibration +static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t g, uint32_t test_bgn) +{ + uint32_t p, rank_bgn, sr; + uint32_t grp_calibrated; + uint32_t failed_substage; + + //USER update info for sims + + reg_file_set_stage(CAL_STAGE_VFIFO); + + reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); + + failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; + + //USER update info for sims + + reg_file_set_group(g); + + grp_calibrated = 0; + + for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0; p++) { + //USER set a particular dqdqs phase + if (DDRX) { + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); + } + //USER Load up the patterns used by read calibration using current DQDQS phase + + rw_mgr_mem_calibrate_read_load_patterns_all_ranks(); + if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)) { + if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks + (read_group, 1, &bit_chk)) { + break; + } + } + + grp_calibrated = 1; + if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay(g, g, test_bgn)) { + // USER Read per-bit deskew can be done on a per shadow register basis + for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; + rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { + + //USER Determine if this set of ranks should be skipped entirely + if (!param->skip_shadow_regs[sr]) { + + //USER Select shadow register set + select_shadow_regs_for_update(rank_bgn, read_group, 1); + + if (!rw_mgr_mem_calibrate_vfifo_center + (rank_bgn, g, test_bgn, 1)) { + grp_calibrated = 0; + failed_substage = CAL_SUBSTAGE_VFIFO_CENTER; + } + } + } + } else { + grp_calibrated = 0; + failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; + } + } + + if (grp_calibrated == 0) { + set_failing_group_stage(g, CAL_STAGE_VFIFO, failed_substage); + return 0; + } + + return 1; +} + +#endif + +//USER VFIFO Calibration -- Read Deskew Calibration after write deskew +static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group, uint32_t test_bgn) +{ + uint32_t rank_bgn, sr; + uint32_t grp_calibrated; + uint32_t write_group; + + //USER update info for sims + + reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); + reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); + + if (DDRX) { + write_group = read_group; + } else { + write_group = + read_group / (RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH); + } + + //USER update info for sims + reg_file_set_group(read_group); + + grp_calibrated = 1; + // USER Read per-bit deskew can be done on a per shadow register basis + for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; + rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { + + //USER Determine if this set of ranks should be skipped entirely + if (!param->skip_shadow_regs[sr]) { + + //USER Select shadow register set + select_shadow_regs_for_update(rank_bgn, read_group, 1); + + // This is the last calibration round, update FOM here + if (!rw_mgr_mem_calibrate_vfifo_center + (rank_bgn, write_group, read_group, test_bgn, 0, 1)) { + grp_calibrated = 0; + } + } + } + + if (grp_calibrated == 0) { + set_failing_group_stage(write_group, CAL_STAGE_VFIFO_AFTER_WRITES, + CAL_SUBSTAGE_VFIFO_CENTER); + return 0; + } + + return 1; +} + +//USER Calibrate LFIFO to find smallest read latency + +static uint32_t rw_mgr_mem_calibrate_lfifo(void) +{ + uint32_t found_one; + t_btfld bit_chk; + + //USER update info for sims + + reg_file_set_stage(CAL_STAGE_LFIFO); + reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); + + //USER Load up the patterns used by read calibration for all ranks + + rw_mgr_mem_calibrate_read_load_patterns_all_ranks(); + + found_one = 0; + + do { + IOWR_32DIRECT(PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat); + DPRINT(2, "lfifo: read_lat=%lu", gbl->curr_read_lat); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks + (0, NUM_READ_TESTS, PASS_ALL_BITS, &bit_chk, 1)) { + break; + } + + found_one = 1; + + //USER reduce read latency and see if things are working + //USER correctly + + gbl->curr_read_lat--; + } while (gbl->curr_read_lat > 0); + + //USER reset the fifos to get pointers to known state + + IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); + + if (found_one) { + //USER add a fudge factor to the read latency that was determined + gbl->curr_read_lat += 2; + IOWR_32DIRECT(PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat); + + DPRINT(2, "lfifo: success: using read_lat=%lu", gbl->curr_read_lat); + + return 1; + } else { + set_failing_group_stage(0xff, CAL_STAGE_LFIFO, CAL_SUBSTAGE_READ_LATENCY); + + DPRINT(2, "lfifo: failed at initial read_lat=%lu", gbl->curr_read_lat); + + return 0; + } +} + +//USER issue write test command. +//USER two variants are provided. one that just tests a write pattern and another that +//USER tests datamask functionality. + +static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group, uint32_t test_dm) +{ + uint32_t mcc_instruction; + uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) + && ENABLE_SUPER_QUICK_CALIBRATION) || BFM_MODE; + uint32_t rw_wl_nop_cycles; + + //USER Set counter and jump addresses for the right + //USER number of NOP cycles. + //USER The number of supported NOP cycles can range from -1 to infinity + //USER Three different cases are handled: + //USER + //USER 1. For a number of NOP cycles greater than 0, the RW Mgr looping + //USER mechanism will be used to insert the right number of NOPs + //USER + //USER 2. For a number of NOP cycles equals to 0, the micro-instruction + //USER issuing the write command will jump straight to the micro-instruction + //USER that turns on DQS (for DDRx), or outputs write data (for RLD), skipping + //USER the NOP micro-instruction all together + //USER + //USER 3. A number of NOP cycles equal to -1 indicates that DQS must be turned + //USER on in the same micro-instruction that issues the write command. Then we need + //USER to directly jump to the micro-instruction that sends out the data + //USER + //USER NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters (2 and 3). One + //USER jump-counter (0) is used to perform multiple write-read operations. + //USER one counter left to issue this command in "multiple-group" mode. + + rw_wl_nop_cycles = gbl->rw_wl_nop_cycles; + + if (rw_wl_nop_cycles == -1) { + //USER CNTR 2 - We want to execute the special write operation that + //USER turns on DQS right away and then skip directly to the instruction that + //USER sends out the data. We set the counter to a large number so that the + //USER jump is always taken + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0xFF); + + //USER CNTR 3 - Not used + if (test_dm) { + mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1; + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, + __RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP); + } else { + mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0_WL_1; + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_LFSR_WR_RD_BANK_0_DATA); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_LFSR_WR_RD_BANK_0_NOP); + } + + } else if (rw_wl_nop_cycles == 0) { + //USER CNTR 2 - We want to skip the NOP operation and go straight to + //USER the DQS enable instruction. We set the counter to a large number so that the + //USER jump is always taken + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0xFF); + + //USER CNTR 3 - Not used + if (test_dm) { + mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0; + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS); + } else { + mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0; + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_LFSR_WR_RD_BANK_0_DQS); + } + + } else { + //USER CNTR 2 - In this case we want to execute the next instruction and NOT + //USER take the jump. So we set the counter to 0. The jump address doesn't count + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0x0); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, 0x0); + + //USER CNTR 3 - Set the nop counter to the number of cycles we need to loop for, minus 1 + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_3, 0, rw_wl_nop_cycles - 1); + if (test_dm) { + mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0; + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP); + } else { + mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0; + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_LFSR_WR_RD_BANK_0_NOP); + } + } + + IOWR_32DIRECT(RW_MGR_RESET_READ_DATAPATH, 0, 0); + + if (quick_write_mode) { + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x08); + } else { + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x40); + } + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, mcc_instruction); + + //USER CNTR 1 - This is used to ensure enough time elapses for read data to come back. + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x30); + + if (test_dm) { + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT); + } else { + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_LFSR_WR_RD_BANK_0_WAIT); + } + + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, (group << 2), mcc_instruction); + +} + +//USER Test writes, can check for a single bit pass or multiple bit pass + +static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, uint32_t write_group, + uint32_t use_dm, uint32_t all_correct, + t_btfld * bit_chk, uint32_t all_ranks) +{ + uint32_t r; + t_btfld correct_mask_vg; + t_btfld tmp_bit_chk; + uint32_t vg; + uint32_t rank_end = + all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); + + *bit_chk = param->write_correct_mask; + correct_mask_vg = param->write_correct_mask_vg; + + for (r = rank_bgn; r < rank_end; r++) { + if (param->skip_ranks[r]) { + //USER request to skip the rank + + continue; + } + //USER set rank + set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); + + tmp_bit_chk = 0; + for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS - 1;; vg--) { + + //USER reset the fifos to get pointers to known state + IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); + + tmp_bit_chk = + tmp_bit_chk << (RW_MGR_MEM_DQ_PER_WRITE_DQS / + RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); + rw_mgr_mem_calibrate_write_test_issue(write_group * + RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS + + vg, use_dm); + + tmp_bit_chk = + tmp_bit_chk | (correct_mask_vg & ~(IORD_32DIRECT(BASE_RW_MGR, 0))); + DPRINT(2, + "write_test(%lu,%lu,%lu) :[%lu,%lu] " BTFLD_FMT " & ~%x => " + BTFLD_FMT " => " BTFLD_FMT, write_group, use_dm, all_correct, r, vg, + correct_mask_vg, IORD_32DIRECT(BASE_RW_MGR, 0), + correct_mask_vg & ~IORD_32DIRECT(BASE_RW_MGR, 0), tmp_bit_chk); + + if (vg == 0) { + break; + } + } + *bit_chk &= tmp_bit_chk; + } + + if (all_correct) { + set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + DPRINT(2, "write_test(%lu,%lu,ALL) : " BTFLD_FMT " == " BTFLD_FMT " => %lu", + write_group, use_dm, *bit_chk, param->write_correct_mask, + (long unsigned int)(*bit_chk == param->write_correct_mask)); + return (*bit_chk == param->write_correct_mask); + } else { + set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + DPRINT(2, "write_test(%lu,%lu,ONE) : " BTFLD_FMT " != " BTFLD_FMT " => %lu", + write_group, use_dm, *bit_chk, (long unsigned int)0, + (long unsigned int)(*bit_chk != 0)); + return (*bit_chk != 0x00); + } +} + +static inline uint32_t rw_mgr_mem_calibrate_write_test_all_ranks(uint32_t write_group, + uint32_t use_dm, + uint32_t all_correct, + t_btfld * bit_chk) +{ + return rw_mgr_mem_calibrate_write_test(0, write_group, use_dm, all_correct, bit_chk, 1); +} + +//USER level the write operations + +#if NEWVERSION_WL + +//USER Write Levelling -- Full Calibration +static uint32_t rw_mgr_mem_calibrate_wlevel(uint32_t g, uint32_t test_bgn) +{ + uint32_t p, d; + + uint32_t num_additional_fr_cycles = 0; + + t_btfld bit_chk; + uint32_t work_bgn, work_end, work_mid; + uint32_t tmp_delay; + uint32_t found_begin; + uint32_t dtaps_per_ptap; + + //USER update info for sims + + reg_file_set_stage(CAL_STAGE_WLEVEL); + reg_file_set_sub_stage(CAL_SUBSTAGE_WORKING_DELAY); + + //USER maximum phases for the sweep + + dtaps_per_ptap = IORD_32DIRECT(REG_FILE_DTAPS_PER_PTAP, 0); + + //USER starting phases + + //USER update info for sims + + reg_file_set_group(g); + + //USER starting and end range where writes work + + scc_mgr_spread_out2_delay_all_ranks(g, test_bgn); + + work_bgn = 0; + work_end = 0; + + //USER step 1: find first working phase, increment in ptaps, and then in dtaps if ptaps doesn't find a working phase + found_begin = 0; + tmp_delay = 0; + for (d = 0; d <= dtaps_per_ptap; d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) { + scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); + + work_bgn = tmp_delay; + + for (p = 0; + p <= IO_DQDQS_OUT_PHASE_MAX + num_additional_fr_cycles * IO_DLL_CHAIN_LENGTH; + p++, work_bgn += IO_DELAY_PER_OPA_TAP) { + DPRINT(2, "wlevel: begin-1: p=%lu d=%lu", p, d); + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); + + if (rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { + found_begin = 1; + break; + } + } + + if (found_begin) { + break; + } + } + + if (p > IO_DQDQS_OUT_PHASE_MAX + num_additional_fr_cycles * IO_DLL_CHAIN_LENGTH) { + //USER fail, cannot find first working phase + + set_failing_group_stage(g, CAL_STAGE_WLEVEL, CAL_SUBSTAGE_WORKING_DELAY); + + return 0; + } + + DPRINT(2, "wlevel: first valid p=%lu d=%lu", p, d); + + reg_file_set_sub_stage(CAL_SUBSTAGE_LAST_WORKING_DELAY); + + //USER If d is 0 then the working window covers a phase tap and we can follow the old procedure + //USER otherwise, we've found the beginning, and we need to increment the dtaps until we find the end + if (d == 0) { + COV(WLEVEL_PHASE_PTAP_OVERLAP); + work_end = work_bgn + IO_DELAY_PER_OPA_TAP; + + //USER step 2: if we have room, back off by one and increment in dtaps + + if (p > 0) { + int found = 0; + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p - 1); + + tmp_delay = work_bgn - IO_DELAY_PER_OPA_TAP; + + for (d = 0; d <= IO_IO_OUT1_DELAY_MAX && tmp_delay < work_bgn; + d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) { + DPRINT(2, "wlevel: begin-2: p=%lu d=%lu", (p - 1), d); + scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); + + if (rw_mgr_mem_calibrate_write_test_all_ranks + (g, 0, PASS_ONE_BIT, &bit_chk)) { + found = 1; + work_bgn = tmp_delay; + break; + } + } + + { + uint32_t d2; + uint32_t p2; + if (found) { + d2 = d; + p2 = p - 1; + } else { + d2 = 0; + p2 = p; + } + + DPRINT(2, "wlevel: found begin-A: p=%lu d=%lu ps=%lu", p2, d2, + work_bgn); + + BFM_GBL_SET(dqs_wlevel_left_edge[g].p, p2); + BFM_GBL_SET(dqs_wlevel_left_edge[g].d, d2); + BFM_GBL_SET(dqs_wlevel_left_edge[g].ps, work_bgn); + } + + scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, 0); + } else { + DPRINT(2, "wlevel: found begin-B: p=%lu d=%lu ps=%lu", p, d, work_bgn); + + BFM_GBL_SET(dqs_wlevel_left_edge[g].p, p); + BFM_GBL_SET(dqs_wlevel_left_edge[g].d, d); + BFM_GBL_SET(dqs_wlevel_left_edge[g].ps, work_bgn); + } + + //USER step 3: go forward from working phase to non working phase, increment in ptaps + + for (p = p + 1; + p <= IO_DQDQS_OUT_PHASE_MAX + num_additional_fr_cycles * IO_DLL_CHAIN_LENGTH; + p++, work_end += IO_DELAY_PER_OPA_TAP) { + DPRINT(2, "wlevel: end-0: p=%lu d=%lu", p, (long unsigned int)0); + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); + + if (!rw_mgr_mem_calibrate_write_test_all_ranks + (g, 0, PASS_ONE_BIT, &bit_chk)) { + break; + } + } + + //USER step 4: back off one from last, increment in dtaps + //USER The actual increment is done outside the if/else statement since it is shared with other code + + p = p - 1; + + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); + + work_end -= IO_DELAY_PER_OPA_TAP; + d = 0; + + } else { + //USER step 5: Window doesn't cover phase tap, just increment dtaps until failure + //USER The actual increment is done outside the if/else statement since it is shared with other code + COV(WLEVEL_PHASE_PTAP_NO_OVERLAP); + work_end = work_bgn; + DPRINT(2, "wlevel: found begin-C: p=%lu d=%lu ps=%lu", p, d, work_bgn); + BFM_GBL_SET(dqs_wlevel_left_edge[g].p, p); + BFM_GBL_SET(dqs_wlevel_left_edge[g].d, d); + BFM_GBL_SET(dqs_wlevel_left_edge[g].ps, work_bgn); + + } + + //USER The actual increment until failure + for (; d <= IO_IO_OUT1_DELAY_MAX; d++, work_end += IO_DELAY_PER_DCHAIN_TAP) { + DPRINT(2, "wlevel: end: p=%lu d=%lu", p, d); + scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); + + if (!rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { + break; + } + } + scc_mgr_zero_group(g, test_bgn, 1); + + work_end -= IO_DELAY_PER_DCHAIN_TAP; + + if (work_end >= work_bgn) { + //USER we have a working range + } else { + //USER nil range + + set_failing_group_stage(g, CAL_STAGE_WLEVEL, CAL_SUBSTAGE_LAST_WORKING_DELAY); + + return 0; + } + + DPRINT(2, "wlevel: found end: p=%lu d=%lu; range: [%lu,%lu]", p, d - 1, work_bgn, work_end); + BFM_GBL_SET(dqs_wlevel_right_edge[g].p, p); + BFM_GBL_SET(dqs_wlevel_right_edge[g].d, d - 1); + BFM_GBL_SET(dqs_wlevel_right_edge[g].ps, work_end); + + //USER center + + work_mid = (work_bgn + work_end) / 2; + + DPRINT(2, "wlevel: work_mid=%ld", work_mid); + + tmp_delay = 0; + + for (p = 0; + p <= IO_DQDQS_OUT_PHASE_MAX + num_additional_fr_cycles * IO_DLL_CHAIN_LENGTH + && tmp_delay < work_mid; p++, tmp_delay += IO_DELAY_PER_OPA_TAP) ; + + if (tmp_delay > work_mid) { + tmp_delay -= IO_DELAY_PER_OPA_TAP; + p--; + } + + while (p > IO_DQDQS_OUT_PHASE_MAX) { + tmp_delay -= IO_DELAY_PER_OPA_TAP; + p--; + } + + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); + + DPRINT(2, "wlevel: p=%lu tmp_delay=%lu left=%lu", p, tmp_delay, work_mid - tmp_delay); + + for (d = 0; d <= IO_IO_OUT1_DELAY_MAX && tmp_delay < work_mid; + d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) ; + + if (tmp_delay > work_mid) { + tmp_delay -= IO_DELAY_PER_DCHAIN_TAP; + d--; + } + + DPRINT(2, "wlevel: p=%lu d=%lu tmp_delay=%lu left=%lu", p, d, tmp_delay, + work_mid - tmp_delay); + + scc_mgr_apply_group_all_out_delay_add_all_ranks(g, test_bgn, d); + + DPRINT(2, "wlevel: found middle: p=%lu d=%lu", p, d); + BFM_GBL_SET(dqs_wlevel_mid[g].p, p); + BFM_GBL_SET(dqs_wlevel_mid[g].d, d); + BFM_GBL_SET(dqs_wlevel_mid[g].ps, work_mid); + + return 1; +} + +#else + +//USER Write Levelling -- Full Calibration +static uint32_t rw_mgr_mem_calibrate_wlevel(uint32_t g, uint32_t test_bgn) +{ + uint32_t p, d; + t_btfld bit_chk; + uint32_t work_bgn, work_end, work_mid; + uint32_t tmp_delay; + + //USER update info for sims + + reg_file_set_stage(CAL_STAGE_WLEVEL); + reg_file_set_sub_stage(CAL_SUBSTAGE_WORKING_DELAY); + + //USER maximum phases for the sweep + + //USER starting phases + + //USER update info for sims + + reg_file_set_group(g); + + //USER starting and end range where writes work + + work_bgn = 0; + work_end = 0; + + //USER step 1: find first working phase, increment in ptaps + + for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++, work_bgn += IO_DELAY_PER_OPA_TAP) { + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); + + if (rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { + break; + } + } + + if (p > IO_DQDQS_OUT_PHASE_MAX) { + //USER fail, cannot find first working phase + + set_failing_group_stage(g, CAL_STAGE_WLEVEL, CAL_SUBSTAGE_WORKING_DELAY); + + return 0; + } + + work_end = work_bgn + IO_DELAY_PER_OPA_TAP; + + reg_file_set_sub_stage(CAL_SUBSTAGE_LAST_WORKING_DELAY); + + //USER step 2: if we have room, back off by one and increment in dtaps + + if (p > 0) { + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p - 1); + + tmp_delay = work_bgn - IO_DELAY_PER_OPA_TAP; + + for (d = 0; d <= IO_IO_OUT1_DELAY_MAX && tmp_delay < work_bgn; + d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) { + scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); + + if (rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { + work_bgn = tmp_delay; + break; + } + } + + scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, 0); + } + //USER step 3: go forward from working phase to non working phase, increment in ptaps + + for (p = p + 1; p <= IO_DQDQS_OUT_PHASE_MAX; p++, work_end += IO_DELAY_PER_OPA_TAP) { + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); + + if (!rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { + break; + } + } + + //USER step 4: back off one from last, increment in dtaps + + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p - 1); + + work_end -= IO_DELAY_PER_OPA_TAP; + + for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++, work_end += IO_DELAY_PER_DCHAIN_TAP) { + scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); + + if (!rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { + break; + } + } + + scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, 0); + + if (work_end > work_bgn) { + //USER we have a working range + } else { + //USER nil range + + set_failing_group_stage(g, CAL_STAGE_WLEVEL, CAL_SUBSTAGE_LAST_WORKING_DELAY); + + return 0; + } + + //USER center + + work_mid = (work_bgn + work_end) / 2; + + tmp_delay = 0; + + for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && tmp_delay < work_mid; + p++, tmp_delay += IO_DELAY_PER_OPA_TAP) ; + + tmp_delay -= IO_DELAY_PER_OPA_TAP; + + scc_mgr_set_dqdqs_output_phase_all_ranks(g, p - 1); + + for (d = 0; d <= IO_IO_OUT1_DELAY_MAX && tmp_delay < work_mid; + d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) ; + + scc_mgr_apply_group_all_out_delay_add_all_ranks(g, test_bgn, d - 1); + + return 1; +} + +#endif + +//USER center all windows. do per-bit-deskew to possibly increase size of certain windows + +#if NEWVERSION_WRDESKEW + +static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn, uint32_t write_group, + uint32_t test_bgn) +{ + uint32_t i, p, min_index; + int32_t d; + //USER Store these as signed since there are comparisons with signed numbers + t_btfld bit_chk; + t_btfld sticky_bit_chk; + int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; + int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; + int32_t mid; + int32_t mid_min, orig_mid_min; + int32_t new_dqs, start_dqs, shift_dq; + int32_t dq_margin, dqs_margin, dm_margin; + uint32_t stop; + int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; + int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1; + int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1; + int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1; + int32_t win_best = 0; + + dm_margin = 0; + + start_dqs = READ_SCC_DQS_IO_OUT1_DELAY(); + + select_curr_shadow_reg_using_rank(rank_bgn); + + //USER per-bit deskew + + //USER set the left and right edge of each bit to an illegal value + //USER use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value + sticky_bit_chk = 0; + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; + right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; + } + + //USER Search for the left edge of the window for each bit + for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) { + scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + //USER Stop searching when the read test doesn't pass AND when we've seen a passing read on every bit + stop = + !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, PASS_ONE_BIT, + &bit_chk, 0); + sticky_bit_chk = sticky_bit_chk | bit_chk; + stop = stop && (sticky_bit_chk == param->write_correct_mask); + DPRINT(2, + "write_center(left): dtap=%lu => " BTFLD_FMT " == " BTFLD_FMT + " && %lu [bit_chk=" BTFLD_FMT "]", d, sticky_bit_chk, + param->write_correct_mask, stop, bit_chk); + + if (stop == 1) { + break; + } else { + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + if (bit_chk & 1) { + //USER Remember a passing test as the left_edge + left_edge[i] = d; + } else { + //USER If a left edge has not been seen yet, then a future passing test will mark this edge as the right edge + if (left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) { + right_edge[i] = -(d + 1); + } + } + DPRINT(2, + "write_center[l,d=%lu): bit_chk_test=%d left_edge[%lu]: %ld right_edge[%lu]: %ld", + d, (int)(bit_chk & 1), i, left_edge[i], i, right_edge[i]); + bit_chk = bit_chk >> 1; + } + } + } + + //USER Reset DQ delay chains to 0 + scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0); + sticky_bit_chk = 0; + for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) { + + DPRINT(2, "write_center: left_edge[%lu]: %ld right_edge[%lu]: %ld", i, left_edge[i], + i, right_edge[i]); + + //USER Check for cases where we haven't found the left edge, which makes our assignment of the the + //USER right edge invalid. Reset it to the illegal value. + if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) + && (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) { + right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; + DPRINT(2, "write_center: reset right_edge[%lu]: %ld", i, right_edge[i]); + } + //USER Reset sticky bit (except for bits where we have seen the left edge) + sticky_bit_chk = sticky_bit_chk << 1; + if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) { + sticky_bit_chk = sticky_bit_chk | 1; + } + + if (i == 0) { + break; + } + } + + //USER Search for the right edge of the window for each bit + for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) { + scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d + start_dqs); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + if (QDRII) { + rw_mgr_mem_dll_lock_wait(); + } + //USER Stop searching when the read test doesn't pass AND when we've seen a passing read on every bit + stop = + !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, PASS_ONE_BIT, + &bit_chk, 0); + if (stop) { + recover_mem_device_after_ck_dqs_violation(); + } + sticky_bit_chk = sticky_bit_chk | bit_chk; + stop = stop && (sticky_bit_chk == param->write_correct_mask); + + DPRINT(2, "write_center (right): dtap=%lu => " BTFLD_FMT " == " BTFLD_FMT " && %lu", + d, sticky_bit_chk, param->write_correct_mask, stop); + + if (stop == 1) { + if (d == 0) { + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + //USER d = 0 failed, but it passed when testing the left edge, so it must be marginal, set it to -1 + if (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1 + && left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1) { + right_edge[i] = -1; + } + } + } + break; + } else { + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + if (bit_chk & 1) { + //USER Remember a passing test as the right_edge + right_edge[i] = d; + } else { + if (d != 0) { + //USER If a right edge has not been seen yet, then a future passing test will mark this edge as the left edge + if (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) { + left_edge[i] = -(d + 1); + } + } else { + //USER d = 0 failed, but it passed when testing the left edge, so it must be marginal, set it to -1 + if (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1 + && left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1) { + right_edge[i] = -1; + } + //USER If a right edge has not been seen yet, then a future passing test will mark this edge as the left edge + else if (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) { + left_edge[i] = -(d + 1); + } + } + } + DPRINT(2, + "write_center[r,d=%lu): bit_chk_test=%d left_edge[%lu]: %ld right_edge[%lu]: %ld", + d, (int)(bit_chk & 1), i, left_edge[i], i, right_edge[i]); + bit_chk = bit_chk >> 1; + } + } + } + + //USER Check that all bits have a window + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + DPRINT(2, "write_center: left_edge[%lu]: %ld right_edge[%lu]: %ld", i, left_edge[i], + i, right_edge[i]); + BFM_GBL_SET(dq_write_left_edge[write_group][i], left_edge[i]); + BFM_GBL_SET(dq_write_right_edge[write_group][i], right_edge[i]); + if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) + || (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) { + set_failing_group_stage(test_bgn + i, CAL_STAGE_WRITES, + CAL_SUBSTAGE_WRITES_CENTER); + return 0; + } + } + + //USER Find middle of window for each DQ bit + mid_min = left_edge[0] - right_edge[0]; + min_index = 0; + for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + mid = left_edge[i] - right_edge[i]; + if (mid < mid_min) { + mid_min = mid; + min_index = i; + } + } + + //USER -mid_min/2 represents the amount that we need to move DQS. If mid_min is odd and positive we'll need to add one to + //USER make sure the rounding in further calculations is correct (always bias to the right), so just add 1 for all positive values + if (mid_min > 0) { + mid_min++; + } + mid_min = mid_min / 2; + + DPRINT(1, "write_center: mid_min=%ld", mid_min); + + //USER Determine the amount we can change DQS (which is -mid_min) + orig_mid_min = mid_min; + new_dqs = start_dqs; + mid_min = 0; + + DPRINT(1, "write_center: start_dqs=%ld new_dqs=%ld mid_min=%ld", start_dqs, new_dqs, + mid_min); + + //USER Initialize data for export structures + dqs_margin = IO_IO_OUT1_DELAY_MAX + 1; + dq_margin = IO_IO_OUT1_DELAY_MAX + 1; + + //USER add delay to bring centre of all DQ windows to the same "level" + for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { + //USER Use values before divide by 2 to reduce round off error + shift_dq = + (left_edge[i] - right_edge[i] - + (left_edge[min_index] - right_edge[min_index])) / 2 + (orig_mid_min - mid_min); + + DPRINT(2, "write_center: before: shift_dq[%lu]=%ld", i, shift_dq); + + if (shift_dq + (int32_t) READ_SCC_DQ_OUT1_DELAY(i) > (int32_t) IO_IO_OUT1_DELAY_MAX) { + shift_dq = (int32_t) IO_IO_OUT1_DELAY_MAX - READ_SCC_DQ_OUT1_DELAY(i); + } else if (shift_dq + (int32_t) READ_SCC_DQ_OUT1_DELAY(i) < 0) { + shift_dq = -(int32_t) READ_SCC_DQ_OUT1_DELAY(i); + } + DPRINT(2, "write_center: after: shift_dq[%lu]=%ld", i, shift_dq); + scc_mgr_set_dq_out1_delay(write_group, i, READ_SCC_DQ_OUT1_DELAY(i) + shift_dq); + scc_mgr_load_dq(i); + + DPRINT(2, "write_center: margin[%lu]=[%ld,%ld]", i, + left_edge[i] - shift_dq + (-mid_min), right_edge[i] + shift_dq - (-mid_min)); + //USER To determine values for export structures + if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) { + dq_margin = left_edge[i] - shift_dq + (-mid_min); + } + if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) { + dqs_margin = right_edge[i] + shift_dq - (-mid_min); + } + } + + //USER Move DQS + if (QDRII) { + scc_mgr_set_group_dqs_io_and_oct_out1_gradual(write_group, new_dqs); + } else { + scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } + + DPRINT(2, "write_center: DM"); + + //USER set the left and right edge of each bit to an illegal value + //USER use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value + left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; + right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; + + //USER Search for the/part of the window with DM shift + for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) { + scc_mgr_apply_group_dm_out1_delay(write_group, d); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + if (rw_mgr_mem_calibrate_write_test + (rank_bgn, write_group, 1, PASS_ALL_BITS, &bit_chk, 0)) { + + //USE Set current end of the window + end_curr = -d; + //USER If a starting edge of our window has not been seen this is our current start of the DM window + if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) { + bgn_curr = -d; + } + //USER If current window is bigger than best seen. Set best seen to be current window + if ((end_curr - bgn_curr + 1) > win_best) { + win_best = end_curr - bgn_curr + 1; + bgn_best = bgn_curr; + end_best = end_curr; + } + } else { + //USER We just saw a failing test. Reset temp edge + bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; + end_curr = IO_IO_OUT1_DELAY_MAX + 1; + } + + } + + //USER Reset DM delay chains to 0 + scc_mgr_apply_group_dm_out1_delay(write_group, 0); + + //USER Check to see if the current window nudges up aganist 0 delay. If so we need to continue the search by shifting DQS otherwise DQS search begins as a new search + if (end_curr != 0) { + bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; + end_curr = IO_IO_OUT1_DELAY_MAX + 1; + } + //USER Search for the/part of the window with DQS shifts + for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) { + // Note: This only shifts DQS, so are we limiting ourselve to + // width of DQ unnecessarily + scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d + new_dqs); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + if (rw_mgr_mem_calibrate_write_test + (rank_bgn, write_group, 1, PASS_ALL_BITS, &bit_chk, 0)) { + + //USE Set current end of the window + end_curr = d; + //USER If a beginning edge of our window has not been seen this is our current begin of the DM window + if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) { + bgn_curr = d; + } + //USER If current window is bigger than best seen. Set best seen to be current window + if ((end_curr - bgn_curr + 1) > win_best) { + win_best = end_curr - bgn_curr + 1; + bgn_best = bgn_curr; + end_best = end_curr; + } + } else { + //USER We just saw a failing test. Reset temp edge + recover_mem_device_after_ck_dqs_violation(); + bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; + end_curr = IO_IO_OUT1_DELAY_MAX + 1; + + //USER Early exit optimization: if ther remaining delay chain space is less than already seen largest window we can exit + if ((win_best - 1) > (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) { + break; + } + + } + } + + //USER assign left and right edge for cal and reporting; + left_edge[0] = -1 * bgn_best; + right_edge[0] = end_best; + + DPRINT(2, "dm_calib: left=%ld right=%ld", left_edge[0], right_edge[0]); + BFM_GBL_SET(dm_left_edge[write_group][0], left_edge[0]); + BFM_GBL_SET(dm_right_edge[write_group][0], right_edge[0]); + + //USER Move DQS (back to orig) + scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); + + //USER Move DM + + //USER Find middle of window for the DM bit + mid = (left_edge[0] - right_edge[0]) / 2; + + //USER only move right, since we are not moving DQS/DQ + if (mid < 0) { + mid = 0; + } + //dm_marign should fail if we never find a window + if (win_best == 0) { + dm_margin = -1; + } else { + dm_margin = left_edge[0] - mid; + } + + scc_mgr_apply_group_dm_out1_delay(write_group, mid); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + DPRINT(2, "dm_calib: left=%ld right=%ld mid=%ld dm_margin=%ld", + left_edge[0], right_edge[0], mid, dm_margin); + + //USER Export values + gbl->fom_out += dq_margin + dqs_margin; + + DPRINT(2, "write_center: dq_margin=%ld dqs_margin=%ld dm_margin=%ld", dq_margin, dqs_margin, + dm_margin); + + //USER Do not remove this line as it makes sure all of our decisions have been applied + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0); +} + +#else // !NEWVERSION_WRDESKEW + +static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn, uint32_t write_group, + uint32_t test_bgn) +{ + uint32_t i, p, d; + uint32_t mid; + t_btfld bit_chk, sticky_bit_chk; + uint32_t max_working_dq[RW_MGR_MEM_DQ_PER_WRITE_DQS]; + uint32_t max_working_dm[RW_MGR_MEM_DATA_MASK_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH]; + uint32_t dq_margin, dqs_margin, dm_margin; + uint32_t start_dqs; + uint32_t stop; + + //USER per-bit deskew + + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + max_working_dq[i] = 0; + } + + for (d = 1; d <= IO_IO_OUT1_DELAY_MAX; d++) { + scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + if (!rw_mgr_mem_calibrate_write_test + (rank_bgn, write_group, 0, PASS_ONE_BIT, &bit_chk, 0)) { + break; + } else { + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + if (bit_chk & 1) { + max_working_dq[i] = d; + } + bit_chk = bit_chk >> 1; + } + } + } + + scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0); + + //USER determine minimum of maximums + + dq_margin = IO_IO_OUT1_DELAY_MAX; + + for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { + if (max_working_dq[i] < dq_margin) { + dq_margin = max_working_dq[i]; + } + } + + //USER add delay to center DQ windows + + for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { + if (max_working_dq[i] > dq_margin) { + scc_mgr_set_dq_out1_delay(write_group, i, max_working_dq[i] - dq_margin); + } else { + scc_mgr_set_dq_out1_delay(write_group, i, 0); + } + + scc_mgr_load_dq(p, i); + } + + //USER sweep DQS window, may potentially have more window due to per-bit-deskew + + start_dqs = READ_SCC_DQS_IO_OUT1_DELAY(); + + for (d = start_dqs + 1; d <= IO_IO_OUT1_DELAY_MAX; d++) { + scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d); + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + if (QDRII) { + rw_mgr_mem_dll_lock_wait(); + } + + if (!rw_mgr_mem_calibrate_write_test + (rank_bgn, write_group, 0, PASS_ALL_BITS, &bit_chk, 0)) { + break; + } + } + + scc_mgr_set_dqs_out1_delay(write_group, start_dqs); + scc_mgr_set_oct_out1_delay(write_group, start_dqs); + + dqs_margin = d - start_dqs - 1; + + //USER time to center, +1 so that we don't go crazy centering DQ + + mid = (dq_margin + dqs_margin + 1) / 2; + + gbl->fom_out += dq_margin + dqs_margin; + + scc_mgr_load_dqs_io(); + scc_mgr_load_dqs_for_write_group(write_group); + + //USER center dq + + if (dq_margin > mid) { + for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { + scc_mgr_set_dq_out1_delay(write_group, i, + READ_SCC_DQ_OUT1_DELAY(i) + dq_margin - mid); + scc_mgr_load_dq(p, i); + } + dqs_margin += dq_margin - mid; + dq_margin -= dq_margin - mid; + } + //USER do dm centering + + if (!RLDRAMX) { + dm_margin = IO_IO_OUT1_DELAY_MAX; + + if (QDRII) { + sticky_bit_chk = 0; + for (i = 0; i < RW_MGR_MEM_DATA_MASK_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; + i++) { + max_working_dm[i] = 0; + } + } + + for (d = 1; d <= IO_IO_OUT1_DELAY_MAX; d++) { + scc_mgr_apply_group_dm_out1_delay(write_group, d); + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + if (DDRX) { + if (rw_mgr_mem_calibrate_write_test + (rank_bgn, write_group, 1, PASS_ALL_BITS, &bit_chk, 0)) { + max_working_dm[0] = d; + } else { + break; + } + } else { + stop = + !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, + PASS_ALL_BITS, &bit_chk, 0); + sticky_bit_chk = sticky_bit_chk | bit_chk; + stop = stop && (sticky_bit_chk == param->read_correct_mask); + + if (stop == 1) { + break; + } else { + for (i = 0; + i < + RW_MGR_MEM_DATA_MASK_WIDTH / + RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { + if ((bit_chk & param->dm_correct_mask) == + param->dm_correct_mask) { + max_working_dm[i] = d; + } + bit_chk = + bit_chk >> (RW_MGR_MEM_DATA_WIDTH / + RW_MGR_MEM_DATA_MASK_WIDTH); + } + } + } + } + + i = 0; + for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { + if (max_working_dm[i] > mid) { + scc_mgr_set_dm_out1_delay(write_group, i, max_working_dm[i] - mid); + } else { + scc_mgr_set_dm_out1_delay(write_group, i, 0); + } + + scc_mgr_load_dm(i); + + if (max_working_dm[i] < dm_margin) { + dm_margin = max_working_dm[i]; + } + } + } else { + dm_margin = 0; + } + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + return (dq_margin + dqs_margin) > 0; +} + +#endif + +//USER calibrate the write operations + +static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g, uint32_t test_bgn) +{ + + reg_file_set_stage(CAL_STAGE_WRITES); + reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); + + //USER starting phases + + //USER update info for sims + + reg_file_set_group(g); + + if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) { + set_failing_group_stage(g, CAL_STAGE_WRITES, CAL_SUBSTAGE_WRITES_CENTER); + return 0; + } + + return 1; +} + +//USER precharge all banks and activate row 0 in bank "000..." and bank "111..." +static void mem_precharge_and_activate(void) +{ + uint32_t r; + + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { + if (param->skip_ranks[r]) { + //USER request to skip the rank + + continue; + } + //USER set rank + set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); + + //USER precharge all banks ... + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_PRECHARGE_ALL); + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x0F); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_ACTIVATE_0_AND_1_WAIT1); + + IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x0F); + IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_ACTIVATE_0_AND_1_WAIT2); + + //USER activate rows + IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_ACTIVATE_0_AND_1); + } +} + +//USER perform all refreshes necessary over all ranks + +//USER Configure various memory related parameters. + +static void mem_config(void) +{ + uint32_t rlat, wlat; + uint32_t rw_wl_nop_cycles; + uint32_t max_latency; + + //USER read in write and read latency + + wlat = IORD_32DIRECT(MEM_T_WL_ADD, 0); + wlat += IORD_32DIRECT(DATA_MGR_MEM_T_ADD, 0); /* WL for hard phy does not include additive latency */ + + // YYONG: add addtional write latency to offset the address/command extra clock cycle + // YYONG: We change the AC mux setting causing AC to be delayed by one mem clock cycle + // YYONG: only do this for DDR3 + wlat = wlat + 1; + + rlat = IORD_32DIRECT(MEM_T_RL_ADD, 0); + + if (QUARTER_RATE_MODE) { + //USER In Quarter-Rate the WL-to-nop-cycles works like this + //USER 0,1 -> 0 + //USER 2,3,4,5 -> 1 + //USER 6,7,8,9 -> 2 + //USER etc... + rw_wl_nop_cycles = (wlat + 6) / 4 - 1; + } else if (HALF_RATE_MODE) { + //USER In Half-Rate the WL-to-nop-cycles works like this + //USER 0,1 -> -1 + //USER 2,3 -> 0 + //USER 4,5 -> 1 + //USER etc... + if (wlat % 2) { + rw_wl_nop_cycles = ((wlat - 1) / 2) - 1; + } else { + rw_wl_nop_cycles = (wlat / 2) - 1; + } + } else { + rw_wl_nop_cycles = wlat - 2; + } + gbl->rw_wl_nop_cycles = rw_wl_nop_cycles; + + //USER For AV/CV, lfifo is hardened and always runs at full rate + //USER so max latency in AFI clocks, used here, is correspondingly smaller + if (QUARTER_RATE_MODE) { + max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) / 4 - 1; + } else if (HALF_RATE_MODE) { + max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) / 2 - 1; + } else { + max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) / 1 - 1; + } + //USER configure for a burst length of 8 + + if (QUARTER_RATE_MODE) { + //USER write latency + wlat = (wlat + 5) / 4 + 1; + + //USER set a pretty high read latency initially + gbl->curr_read_lat = (rlat + 1) / 4 + 8; + } else if (HALF_RATE_MODE) { + //USER write latency + wlat = (wlat - 1) / 2 + 1; + + //USER set a pretty high read latency initially + gbl->curr_read_lat = (rlat + 1) / 2 + 8; + } else { + //USER write latency + // Adjust Write Latency for Hard PHY + wlat = wlat + 1; + + //USER set a pretty high read latency initially + gbl->curr_read_lat = rlat + 16; + } + + if (gbl->curr_read_lat > max_latency) { + gbl->curr_read_lat = max_latency; + } + IOWR_32DIRECT(PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat); + + //USER advertise write latency + gbl->curr_write_lat = wlat; + IOWR_32DIRECT(PHY_MGR_AFI_WLAT, 0, wlat - 2); + + //USER initialize bit slips + + mem_precharge_and_activate(); +} + +//USER Set VFIFO and LFIFO to instant-on settings in skip calibration mode + +static void mem_skip_calibrate(void) +{ + uint32_t vfifo_offset; + uint32_t i, j, r; + + // Need to update every shadow register set used by the interface + for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { + + // Strictly speaking this should be called once per group to make + // sure each group's delay chains are refreshed from the SCC register file, + // but since we're resetting all delay chains anyway, we can save some + // runtime by calling select_shadow_regs_for_update just once to switch rank. + select_shadow_regs_for_update(r, 0, 1); + + //USER Set output phase alignment settings appropriate for skip calibration + for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { + + scc_mgr_set_dqs_en_phase(i, 0); + // Case:33398 + // + // Write data arrives to the I/O two cycles before write latency is reached (720 deg). + // -> due to bit-slip in a/c bus + // -> to allow board skew where dqs is longer than ck + // -> how often can this happen!? + // -> can claim back some ptaps for high freq support if we can relax this, but i digress... + // + // The write_clk leads mem_ck by 90 deg + // The minimum ptap of the OPA is 180 deg + // Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay + // The write_clk is always delayed by 2 ptaps + // + // Hence, to make DQS aligned to CK, we need to delay DQS by: + // (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH)) + // + // Dividing the above by (360 / IO_DLL_CHAIN_LENGTH) gives us the number of ptaps, which simplies to: + // + // (1.25 * IO_DLL_CHAIN_LENGTH - 2) + scc_mgr_set_dqdqs_output_phase(i, (1.25 * IO_DLL_CHAIN_LENGTH - 2)); + } + + IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, 0xff); + IOWR_32DIRECT(SCC_MGR_DQS_IO_ENA, 0, 0xff); + + for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { + IOWR_32DIRECT(SCC_MGR_GROUP_COUNTER, 0, i); + IOWR_32DIRECT(SCC_MGR_DQ_ENA, 0, 0xff); + IOWR_32DIRECT(SCC_MGR_DM_ENA, 0, 0xff); + } + + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + } + + // Compensate for simulation model behaviour + for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { + scc_mgr_set_dqs_bus_in_delay(i, 10); + scc_mgr_load_dqs(i); + } + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + + //ArriaV has hard FIFOs that can only be initialized by incrementing in sequencer + vfifo_offset = CALIB_VFIFO_OFFSET; + for (j = 0; j < vfifo_offset; j++) { + if (HARD_PHY) { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_HARD_PHY, 0, 0xff); + } else { + IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR, 0, 0xff); + } + } + + IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); + + // For ACV with hard lfifo, we get the skip-cal setting from generation-time constant + gbl->curr_read_lat = CALIB_LFIFO_OFFSET; + IOWR_32DIRECT(PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat); +} + +//USER Memory calibration entry point + +static uint32_t mem_calibrate(void) +{ + uint32_t i; + uint32_t rank_bgn, sr; + uint32_t write_group, write_test_bgn; + uint32_t read_group, read_test_bgn; + uint32_t run_groups, current_run; + uint32_t failing_groups = 0; + uint32_t group_failed = 0; + uint32_t sr_failed = 0; + + // Initialize the data settings + DPRINT(1, "Preparing to init data"); + DPRINT(1, "Init complete"); + + gbl->error_substage = CAL_SUBSTAGE_NIL; + gbl->error_stage = CAL_STAGE_NIL; + gbl->error_group = 0xff; + gbl->fom_in = 0; + gbl->fom_out = 0; + + mem_config(); + + if (ARRIAV || CYCLONEV) { + uint32_t bypass_mode = (HARD_PHY) ? 0x1 : 0x0; + for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { + IOWR_32DIRECT(SCC_MGR_GROUP_COUNTER, 0, i); + scc_set_bypass_mode(i, bypass_mode); + } + } + + if (((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { + //USER Set VFIFO and LFIFO to instant-on settings in skip calibration mode + + mem_skip_calibrate(); + } else { + for (i = 0; i < NUM_CALIB_REPEAT; i++) { + + //USER Zero all delay chain/phase settings for all groups and all shadow register sets + scc_mgr_zero_all(); + + run_groups = ~param->skip_groups; + + for (write_group = 0, write_test_bgn = 0; + write_group < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; + write_group++, write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) { + // Initialized the group failure + group_failed = 0; + + // Mark the group as being attempted for calibration + + BFM_GBL_SET(vfifo_idx, 0); + current_run = + run_groups & ((1 << RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1); + run_groups = run_groups >> RW_MGR_NUM_DQS_PER_WRITE_GROUP; + + if (current_run == 0) { + continue; + } + + IOWR_32DIRECT(SCC_MGR_GROUP_COUNTER, 0, write_group); + scc_mgr_zero_group(write_group, write_test_bgn, 0); + + for (read_group = + write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / + RW_MGR_MEM_IF_WRITE_DQS_WIDTH, read_test_bgn = 0; + read_group < + (write_group + + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / + RW_MGR_MEM_IF_WRITE_DQS_WIDTH && group_failed == 0; + read_group++, read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) { + + //USER Calibrate the VFIFO + if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_VFIFO)) { + if (!rw_mgr_mem_calibrate_vfifo + (read_group, read_test_bgn)) { + group_failed = 1; + + if (! + (gbl-> + phy_debug_mode_flags & + PHY_DEBUG_SWEEP_ALL_GROUPS)) { + return 0; + } + } + } + } + + //USER level writes (or align DK with CK for RLDRAMX) + if (group_failed == 0) { + if ((DDRX || RLDRAMII) && !(ARRIAV || CYCLONEV)) { + if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_WLEVEL)) { + if (!rw_mgr_mem_calibrate_wlevel + (write_group, write_test_bgn)) { + group_failed = 1; + + if (! + (gbl-> + phy_debug_mode_flags & + PHY_DEBUG_SWEEP_ALL_GROUPS)) { + return 0; + } + } + } + } + } + //USER Calibrate the output side + if (group_failed == 0) { + for (rank_bgn = 0, sr = 0; + rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; + rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { + sr_failed = 0; + if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES)) { + if ((STATIC_CALIB_STEPS) & + CALIB_SKIP_DELAY_SWEEPS) { + //USER not needed in quick mode! + } else { + //USER Determine if this set of ranks should be skipped entirely + if (!param->skip_shadow_regs[sr]) { + + //USER Select shadow register set + select_shadow_regs_for_update + (rank_bgn, write_group, + 1); + + if (!rw_mgr_mem_calibrate_writes(rank_bgn, write_group, write_test_bgn)) { + sr_failed = 1; + if (! + (gbl-> + phy_debug_mode_flags + & + PHY_DEBUG_SWEEP_ALL_GROUPS)) + { + return 0; + } + } + } + } + } + if (sr_failed == 0) { + } else { + group_failed = 1; + } + } + } + + if (group_failed == 0) { + for (read_group = + write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / + RW_MGR_MEM_IF_WRITE_DQS_WIDTH, read_test_bgn = 0; + read_group < + (write_group + + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / + RW_MGR_MEM_IF_WRITE_DQS_WIDTH && group_failed == 0; + read_group++, read_test_bgn += + RW_MGR_MEM_DQ_PER_READ_DQS) { + + if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES)) { + if (!rw_mgr_mem_calibrate_vfifo_end + (read_group, read_test_bgn)) { + group_failed = 1; + + if (! + (gbl-> + phy_debug_mode_flags & + PHY_DEBUG_SWEEP_ALL_GROUPS)) { + return 0; + } + } + } + } + } + + if (group_failed == 0) { + +#if STATIC_IN_RTL_SIM +#else +#endif + } + + if (group_failed != 0) { + failing_groups++; + } + + } + + // USER If there are any failing groups then report the failure + if (failing_groups != 0) { + return 0; + } + //USER Calibrate the LFIFO + if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) { + //USER If we're skipping groups as part of debug, don't calibrate LFIFO + if (param->skip_groups == 0) { + if (!rw_mgr_mem_calibrate_lfifo()) { + return 0; + } + } + } + } + } + + //USER Do not remove this line as it makes sure all of our decisions have been applied + IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); + return 1; +} + +static uint32_t run_mem_calibrate(void) +{ + + uint32_t pass; + uint32_t debug_info; + uint32_t ctrlcfg = IORD_32DIRECT(CTRL_CONFIG_REG, 0); + + // Initialize the debug status to show that calibration has started. + // This should occur before anything else + // Reset pass/fail status shown on afi_cal_success/fail + IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_RESET); + //stop tracking manger + + IOWR_32DIRECT(CTRL_CONFIG_REG, 0, ctrlcfg & 0xFFBFFFFF); + + initialize(); + + rw_mgr_mem_initialize(); + + pass = mem_calibrate(); + + mem_precharge_and_activate(); + + //pe_checkout_pattern(); + + IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); + + if (pass) { +#ifdef TEST_SIZE + if (!check_test_mem(0)) { + gbl->error_stage = 0x92; + gbl->error_group = 0x92; + } +#endif + } + + //USER Handoff + + //USER Don't return control of the PHY back to AFI when in debug mode + if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) { + rw_mgr_mem_handoff(); + + // In Hard PHY this is a 2-bit control: + // 0: AFI Mux Select + // 1: DDIO Mux Select + IOWR_32DIRECT(PHY_MGR_MUX_SEL, 0, 0x2); + } + IOWR_32DIRECT(CTRL_CONFIG_REG, 0, ctrlcfg); + + if (pass) { + IPRINT("CALIBRATION PASSED"); + + gbl->fom_in /= 2; + gbl->fom_out /= 2; + + if (gbl->fom_in > 0xff) { + gbl->fom_in = 0xff; + } + + if (gbl->fom_out > 0xff) { + gbl->fom_out = 0xff; + } + + // Update the FOM in the register file + debug_info = gbl->fom_in; + debug_info |= gbl->fom_out << 8; + IOWR_32DIRECT(REG_FILE_FOM, 0, debug_info); + + IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, debug_info); + IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_SUCCESS); + + } else { + + IPRINT("CALIBRATION FAILED"); + + debug_info = gbl->error_stage; + debug_info |= gbl->error_substage << 8; + debug_info |= gbl->error_group << 16; + + IOWR_32DIRECT(REG_FILE_FAILING_STAGE, 0, debug_info); + IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, debug_info); + IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_FAIL); + + // Update the failing group/stage in the register file + debug_info = gbl->error_stage; + debug_info |= gbl->error_substage << 8; + debug_info |= gbl->error_group << 16; + IOWR_32DIRECT(REG_FILE_FAILING_STAGE, 0, debug_info); + + } + + // Set the debug status to show that calibration has ended. + // This should occur after everything else + return pass; + +} + +static void hc_initialize_rom_data(void) +{ + uint32_t i; + + for (i = 0; i < inst_rom_init_size; i++) { + uint32_t data = inst_rom_init[i]; + IOWR_32DIRECT(RW_MGR_INST_ROM_WRITE, (i << 2), data); + } + + for (i = 0; i < ac_rom_init_size; i++) { + uint32_t data = ac_rom_init[i]; + IOWR_32DIRECT(RW_MGR_AC_ROM_WRITE, (i << 2), data); + } +} + +static void initialize_reg_file(void) +{ + // Initialize the register file with the correct data + IOWR_32DIRECT(REG_FILE_SIGNATURE, 0, REG_FILE_INIT_SEQ_SIGNATURE); + IOWR_32DIRECT(REG_FILE_DEBUG_DATA_ADDR, 0, 0); + IOWR_32DIRECT(REG_FILE_CUR_STAGE, 0, 0); + IOWR_32DIRECT(REG_FILE_FOM, 0, 0); + IOWR_32DIRECT(REG_FILE_FAILING_STAGE, 0, 0); + IOWR_32DIRECT(REG_FILE_DEBUG1, 0, 0); + IOWR_32DIRECT(REG_FILE_DEBUG2, 0, 0); +} + +static void initialize_hps_phy(void) +{ + // These may need to be included also: + // wrap_back_en (false) + // atpg_en (false) + // pipelineglobalenable (true) + + uint32_t reg; + // Tracking also gets configured here because it's in the same register + uint32_t trk_sample_count = 7500; + uint32_t trk_long_idle_sample_count = (10 << 16) | 100; // Format is number of outer loops in the 16 MSB, sample count in 16 LSB. + + reg = 0; + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1); + // Fix for long latency VFIFO + // This field selects the intrinsic latency to RDATA_EN/FULL path. 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles. + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(trk_sample_count); + IOWR_32DIRECT(BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_OFFSET, reg); + + reg = 0; + reg |= + SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(trk_sample_count >> + SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH); + reg |= + SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(trk_long_idle_sample_count); + IOWR_32DIRECT(BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_OFFSET, reg); + + reg = 0; + reg |= + SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(trk_long_idle_sample_count + >> + SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH); + IOWR_32DIRECT(BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_OFFSET, reg); +} + +static void initialize_tracking(void) +{ + uint32_t concatenated_longidle = 0x0; + uint32_t concatenated_delays = 0x0; + uint32_t concatenated_rw_addr = 0x0; + uint32_t concatenated_refresh = 0x0; + uint32_t dtaps_per_ptap; + uint32_t tmp_delay; + + // compute usable version of value in case we skip full computation later + dtaps_per_ptap = 0; + tmp_delay = 0; + while (tmp_delay < IO_DELAY_PER_OPA_TAP) { + dtaps_per_ptap++; + tmp_delay += IO_DELAY_PER_DCHAIN_TAP; + } + dtaps_per_ptap--; + + concatenated_longidle = concatenated_longidle ^ 10; //longidle outer loop + concatenated_longidle = concatenated_longidle << 16; + concatenated_longidle = concatenated_longidle ^ 100; //longidle sample count + + concatenated_delays = concatenated_delays ^ 243; // trfc, worst case of 933Mhz 4Gb + concatenated_delays = concatenated_delays << 8; + concatenated_delays = concatenated_delays ^ 14; // trcd, worst case + concatenated_delays = concatenated_delays << 8; + concatenated_delays = concatenated_delays ^ 10; // vfifo wait + concatenated_delays = concatenated_delays << 8; + concatenated_delays = concatenated_delays ^ 4; // mux delay + + concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_IDLE; + concatenated_rw_addr = concatenated_rw_addr << 8; + concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_ACTIVATE_1; + concatenated_rw_addr = concatenated_rw_addr << 8; + concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_SGLE_READ; + concatenated_rw_addr = concatenated_rw_addr << 8; + concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_PRECHARGE_ALL; + + concatenated_refresh = concatenated_refresh ^ __RW_MGR_REFRESH_ALL; + concatenated_refresh = concatenated_refresh << 24; + concatenated_refresh = concatenated_refresh ^ 1000; // trefi + + // Initialize the register file with the correct data + IOWR_32DIRECT(REG_FILE_DTAPS_PER_PTAP, 0, dtaps_per_ptap); + IOWR_32DIRECT(REG_FILE_TRK_SAMPLE_COUNT, 0, 7500); + IOWR_32DIRECT(REG_FILE_TRK_LONGIDLE, 0, concatenated_longidle); + IOWR_32DIRECT(REG_FILE_DELAYS, 0, concatenated_delays); + IOWR_32DIRECT(REG_FILE_TRK_RW_MGR_ADDR, 0, concatenated_rw_addr); + IOWR_32DIRECT(REG_FILE_TRK_READ_DQS_WIDTH, 0, RW_MGR_MEM_IF_READ_DQS_WIDTH); + IOWR_32DIRECT(REG_FILE_TRK_RFSH, 0, concatenated_refresh); +} + +static int socfpga_mem_calibration(void) +{ + param_t my_param; + gbl_t my_gbl; + uint32_t pass; + uint32_t i; + + param = &my_param; + gbl = &my_gbl; + + // Initialize the debug mode flags + gbl->phy_debug_mode_flags = 0; + // Set the calibration enabled by default + gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; + // Only enable margining by default if requested + // Only sweep all groups (regardless of fail state) by default if requested + //Set enabled read test by default + + // Initialize the register file + initialize_reg_file(); + + // Initialize any PHY CSR + initialize_hps_phy(); + + scc_mgr_initialize(); + + initialize_tracking(); + + // Initialize the TCL report. This must occur before any printf + // but after the debug mode flags and register file + + // USER Enable all ranks, groups + for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++) { + param->skip_ranks[i] = 0; + } + for (i = 0; i < NUM_SHADOW_REGS; ++i) { + param->skip_shadow_regs[i] = 0; + } + param->skip_groups = 0; + + IPRINT("Preparing to start memory calibration"); + + DPRINT(1, + "%s%s %s ranks=%lu cs/dimm=%lu dq/dqs=%lu,%lu vg/dqs=%lu,%lu dqs=%lu,%lu dq=%lu dm=%lu " + "ptap_delay=%lu dtap_delay=%lu dtap_dqsen_delay=%lu, dll=%lu", + RDIMM ? "r" : (LRDIMM ? "l" : ""), + DDR2 ? "DDR2" : (DDR3 ? "DDR3" + : (QDRII ? "QDRII" + : (RLDRAMII ? "RLDRAMII" + : (RLDRAM3 ? "RLDRAM3" : "??PROTO??")))), + FULL_RATE ? "FR" : (HALF_RATE ? "HR" : (QUARTER_RATE ? "QR" : "??RATE??")), + (long unsigned int)RW_MGR_MEM_NUMBER_OF_RANKS, + (long unsigned int)RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM, + (long unsigned int)RW_MGR_MEM_DQ_PER_READ_DQS, + (long unsigned int)RW_MGR_MEM_DQ_PER_WRITE_DQS, + (long unsigned int)RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS, + (long unsigned int)RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS, + (long unsigned int)RW_MGR_MEM_IF_READ_DQS_WIDTH, + (long unsigned int)RW_MGR_MEM_IF_WRITE_DQS_WIDTH, + (long unsigned int)RW_MGR_MEM_DATA_WIDTH, + (long unsigned int)RW_MGR_MEM_DATA_MASK_WIDTH, + (long unsigned int)IO_DELAY_PER_OPA_TAP, (long unsigned int)IO_DELAY_PER_DCHAIN_TAP, + (long unsigned int)IO_DELAY_PER_DQS_EN_DCHAIN_TAP, + (long unsigned int)IO_DLL_CHAIN_LENGTH); + DPRINT(1, + "max values: en_p=%lu dqdqs_p=%lu en_d=%lu dqs_in_d=%lu io_in_d=%lu io_out1_d=%lu io_out2_d=%lu" + "dqs_in_reserve=%lu dqs_out_reserve=%lu", (long unsigned int)IO_DQS_EN_PHASE_MAX, + (long unsigned int)IO_DQDQS_OUT_PHASE_MAX, (long unsigned int)IO_DQS_EN_DELAY_MAX, + (long unsigned int)IO_DQS_IN_DELAY_MAX, (long unsigned int)IO_IO_IN_DELAY_MAX, + (long unsigned int)IO_IO_OUT1_DELAY_MAX, (long unsigned int)IO_IO_OUT2_DELAY_MAX, + (long unsigned int)IO_DQS_IN_RESERVE, (long unsigned int)IO_DQS_OUT_RESERVE); + + hc_initialize_rom_data(); + + //USER update info for sims + reg_file_set_stage(CAL_STAGE_NIL); + reg_file_set_group(0); + + // Load global needed for those actions that require + // some dynamic calibration support + dyn_calib_steps = STATIC_CALIB_STEPS; + + // Load global to allow dynamic selection of delay loop settings + // based on calibration mode + if (!((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_DELAY_LOOPS)) { + skip_delay_mask = 0xff; + } else { + skip_delay_mask = 0x0; + } + +#ifdef TEST_SIZE + if (!check_test_mem(1)) { + IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, 0x9090); + IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_FAIL); + } + write_test_mem(); + if (!check_test_mem(0)) { + IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, 0x9191); + IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_FAIL); + } +#endif + + pass = run_mem_calibrate(); + + // EMPTY + + return pass; +} diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-sequencer.h b/arch/arm/mach-socfpga/include/mach/cyclone5-sequencer.h new file mode 100644 index 0000000000..dd0378af34 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-sequencer.h @@ -0,0 +1,447 @@ +#ifndef _SEQUENCER_H_ +#define _SEQUENCER_H_ + +/* +* Copyright Altera Corporation (C) 2012-2014. All rights reserved +* +* SPDX-License-Identifier: BSD-3-Clause +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of Altera Corporation nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#define ALTERA_ASSERT(condition) +#define ALTERA_INFO_ASSERT(condition,text) + +#define RW_MGR_NUM_DM_PER_WRITE_GROUP (RW_MGR_MEM_DATA_MASK_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH) +#define RW_MGR_NUM_TRUE_DM_PER_WRITE_GROUP (RW_MGR_TRUE_MEM_DATA_MASK_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH) + +#define RW_MGR_NUM_DQS_PER_WRITE_GROUP (RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH) +#define NUM_RANKS_PER_SHADOW_REG (RW_MGR_MEM_NUMBER_OF_RANKS / NUM_SHADOW_REGS) + +#define RW_MGR_RUN_SINGLE_GROUP BASE_RW_MGR +#define RW_MGR_RUN_ALL_GROUPS BASE_RW_MGR + 0x0400 + +#define RW_MGR_DI_BASE (BASE_RW_MGR + 0x0020) + +#define DDR3_MR1_ODT_MASK 0xFFFFFD99 +#define DDR3_MR2_ODT_MASK 0xFFFFF9FF +#define DDR3_AC_MIRR_MASK 0x020A8 + +#define RW_MGR_LOAD_CNTR_0 BASE_RW_MGR + 0x0800 +#define RW_MGR_LOAD_CNTR_1 BASE_RW_MGR + 0x0804 +#define RW_MGR_LOAD_CNTR_2 BASE_RW_MGR + 0x0808 +#define RW_MGR_LOAD_CNTR_3 BASE_RW_MGR + 0x080C + +#define RW_MGR_LOAD_JUMP_ADD_0 BASE_RW_MGR + 0x0C00 +#define RW_MGR_LOAD_JUMP_ADD_1 BASE_RW_MGR + 0x0C04 +#define RW_MGR_LOAD_JUMP_ADD_2 BASE_RW_MGR + 0x0C08 +#define RW_MGR_LOAD_JUMP_ADD_3 BASE_RW_MGR + 0x0C0C + +#define RW_MGR_RESET_READ_DATAPATH BASE_RW_MGR + 0x1000 +#define RW_MGR_SOFT_RESET BASE_RW_MGR + 0x2000 + +#define RW_MGR_SET_CS_AND_ODT_MASK BASE_RW_MGR + 0x1400 +#define RW_MGR_SET_ACTIVE_RANK BASE_RW_MGR + 0x2400 + +#define RW_MGR_LOOPBACK_MODE BASE_RW_MGR + 0x0200 + +#define RW_MGR_ENABLE_REFRESH BASE_RW_MGR + 0x3000 + +#define RW_MGR_RANK_NONE 0xFF +#define RW_MGR_RANK_ALL 0x00 + +#define RW_MGR_ODT_MODE_OFF 0 +#define RW_MGR_ODT_MODE_READ_WRITE 1 + +#define NUM_CALIB_REPEAT 1 + +#define NUM_READ_TESTS 7 +#define NUM_READ_PB_TESTS 7 +#define NUM_WRITE_TESTS 15 +#define NUM_WRITE_PB_TESTS 31 + +#define PASS_ALL_BITS 1 +#define PASS_ONE_BIT 0 + +/* calibration stages */ + +#define CAL_STAGE_NIL 0 +#define CAL_STAGE_VFIFO 1 +#define CAL_STAGE_WLEVEL 2 +#define CAL_STAGE_LFIFO 3 +#define CAL_STAGE_WRITES 4 +#define CAL_STAGE_FULLTEST 5 +#define CAL_STAGE_REFRESH 6 +#define CAL_STAGE_CAL_SKIPPED 7 +#define CAL_STAGE_CAL_ABORTED 8 +#define CAL_STAGE_VFIFO_AFTER_WRITES 9 + +/* calibration substages */ + +#define CAL_SUBSTAGE_NIL 0 +#define CAL_SUBSTAGE_GUARANTEED_READ 1 +#define CAL_SUBSTAGE_DQS_EN_PHASE 2 +#define CAL_SUBSTAGE_VFIFO_CENTER 3 +#define CAL_SUBSTAGE_WORKING_DELAY 1 +#define CAL_SUBSTAGE_LAST_WORKING_DELAY 2 +#define CAL_SUBSTAGE_WLEVEL_COPY 3 +#define CAL_SUBSTAGE_WRITES_CENTER 1 +#define CAL_SUBSTAGE_READ_LATENCY 1 +#define CAL_SUBSTAGE_REFRESH 1 + +#define MAX_RANKS (RW_MGR_MEM_NUMBER_OF_RANKS) +#define MAX_DQS (RW_MGR_MEM_IF_WRITE_DQS_WIDTH > RW_MGR_MEM_IF_READ_DQS_WIDTH ? RW_MGR_MEM_IF_WRITE_DQS_WIDTH : RW_MGR_MEM_IF_READ_DQS_WIDTH) +#define MAX_DQ (RW_MGR_MEM_DATA_WIDTH) +#define MAX_DM (RW_MGR_MEM_DATA_MASK_WIDTH) + +/* length of VFIFO, from SW_MACROS */ +#define VFIFO_SIZE (READ_VALID_FIFO_SIZE) + +/* Memory for data transfer between TCL scripts and NIOS. + * + * - First word is a command request. + * - The remaining words are part of the transfer. + */ + +/* Define the base address of each manager. */ + +/* MarkW: how should these base addresses be done for A-V? */ +#define BASE_PTR_MGR SEQUENCER_PTR_MGR_INST_BASE +#define BASE_PHY_MGR (0x00088000) +#define BASE_RW_MGR (0x00090000) +#define BASE_DATA_MGR (0x00098000) +#define BASE_SCC_MGR SEQUENCER_SCC_MGR_INST_BASE +#define BASE_REG_FILE SEQUENCER_REG_FILE_INST_BASE +#define BASE_TIMER SEQUENCER_TIMER_INST_BASE +#define BASE_MMR (0x000C0000) +#define BASE_TRK_MGR (0x000D0000) + +/* Register file addresses. */ +#define REG_FILE_SIGNATURE (BASE_REG_FILE + 0x0000) +#define REG_FILE_DEBUG_DATA_ADDR (BASE_REG_FILE + 0x0004) +#define REG_FILE_CUR_STAGE (BASE_REG_FILE + 0x0008) +#define REG_FILE_FOM (BASE_REG_FILE + 0x000C) +#define REG_FILE_FAILING_STAGE (BASE_REG_FILE + 0x0010) +#define REG_FILE_DEBUG1 (BASE_REG_FILE + 0x0014) +#define REG_FILE_DEBUG2 (BASE_REG_FILE + 0x0018) + +#define REG_FILE_DTAPS_PER_PTAP (BASE_REG_FILE + 0x001C) +#define REG_FILE_TRK_SAMPLE_COUNT (BASE_REG_FILE + 0x0020) +#define REG_FILE_TRK_LONGIDLE (BASE_REG_FILE + 0x0024) +#define REG_FILE_DELAYS (BASE_REG_FILE + 0x0028) +#define REG_FILE_TRK_RW_MGR_ADDR (BASE_REG_FILE + 0x002C) +#define REG_FILE_TRK_READ_DQS_WIDTH (BASE_REG_FILE + 0x0030) +#define REG_FILE_TRK_RFSH (BASE_REG_FILE + 0x0034) +#define CTRL_CONFIG_REG (BASE_MMR + 0x0000) + +/* PHY manager configuration registers. */ + +#define PHY_MGR_PHY_RLAT (BASE_PHY_MGR + 0x4000) +#define PHY_MGR_RESET_MEM_STBL (BASE_PHY_MGR + 0x4004) +#define PHY_MGR_MUX_SEL (BASE_PHY_MGR + 0x4008) +#define PHY_MGR_CAL_STATUS (BASE_PHY_MGR + 0x400c) +#define PHY_MGR_CAL_DEBUG_INFO (BASE_PHY_MGR + 0x4010) +#define PHY_MGR_VFIFO_RD_EN_OVRD (BASE_PHY_MGR + 0x4014) +#define PHY_MGR_AFI_WLAT (BASE_PHY_MGR + 0x4018) +#define PHY_MGR_AFI_RLAT (BASE_PHY_MGR + 0x401c) + +#define PHY_MGR_CAL_RESET (0) +#define PHY_MGR_CAL_SUCCESS (1) +#define PHY_MGR_CAL_FAIL (2) + +/* PHY manager command addresses. */ + +#define PHY_MGR_CMD_INC_VFIFO_FR (BASE_PHY_MGR + 0x0000) +#define PHY_MGR_CMD_INC_VFIFO_HR (BASE_PHY_MGR + 0x0004) +#define PHY_MGR_CMD_INC_VFIFO_HARD_PHY (BASE_PHY_MGR + 0x0004) +#define PHY_MGR_CMD_FIFO_RESET (BASE_PHY_MGR + 0x0008) +#define PHY_MGR_CMD_INC_VFIFO_FR_HR (BASE_PHY_MGR + 0x000C) +#define PHY_MGR_CMD_INC_VFIFO_QR (BASE_PHY_MGR + 0x0010) + +/* PHY manager parameters. */ + +#define PHY_MGR_MAX_RLAT_WIDTH (BASE_PHY_MGR + 0x0000) +#define PHY_MGR_MAX_AFI_WLAT_WIDTH (BASE_PHY_MGR + 0x0004) +#define PHY_MGR_MAX_AFI_RLAT_WIDTH (BASE_PHY_MGR + 0x0008) +#define PHY_MGR_CALIB_SKIP_STEPS (BASE_PHY_MGR + 0x000c) +#define PHY_MGR_CALIB_VFIFO_OFFSET (BASE_PHY_MGR + 0x0010) +#define PHY_MGR_CALIB_LFIFO_OFFSET (BASE_PHY_MGR + 0x0014) +#define PHY_MGR_RDIMM (BASE_PHY_MGR + 0x0018) +#define PHY_MGR_MEM_T_WL (BASE_PHY_MGR + 0x001c) +#define PHY_MGR_MEM_T_RL (BASE_PHY_MGR + 0x0020) + +/* Data Manager */ +#define DATA_MGR_DRAM_CFG (BASE_DATA_MGR + 0x0000) +#define DATA_MGR_MEM_T_WL (BASE_DATA_MGR + 0x0004) +#define DATA_MGR_MEM_T_ADD (BASE_DATA_MGR + 0x0008) +#define DATA_MGR_MEM_T_RL (BASE_DATA_MGR + 0x000C) +#define DATA_MGR_MEM_T_RFC (BASE_DATA_MGR + 0x0010) +#define DATA_MGR_MEM_T_REFI (BASE_DATA_MGR + 0x0014) +#define DATA_MGR_MEM_T_WR (BASE_DATA_MGR + 0x0018) +#define DATA_MGR_MEM_T_MRD (BASE_DATA_MGR + 0x001C) +#define DATA_MGR_COL_WIDTH (BASE_DATA_MGR + 0x0020) +#define DATA_MGR_ROW_WIDTH (BASE_DATA_MGR + 0x0024) +#define DATA_MGR_BANK_WIDTH (BASE_DATA_MGR + 0x0028) +#define DATA_MGR_CS_WIDTH (BASE_DATA_MGR + 0x002C) +#define DATA_MGR_ITF_WIDTH (BASE_DATA_MGR + 0x0030) +#define DATA_MGR_DVC_WIDTH (BASE_DATA_MGR + 0x0034) + +#define MEM_T_WL_ADD DATA_MGR_MEM_T_WL +#define MEM_T_RL_ADD DATA_MGR_MEM_T_RL + +#define CALIB_SKIP_DELAY_LOOPS (1 << 0) +#define CALIB_SKIP_ALL_BITS_CHK (1 << 1) +#define CALIB_SKIP_DELAY_SWEEPS (1 << 2) +#define CALIB_SKIP_VFIFO (1 << 3) +#define CALIB_SKIP_LFIFO (1 << 4) +#define CALIB_SKIP_WLEVEL (1 << 5) +#define CALIB_SKIP_WRITES (1 << 6) +#define CALIB_SKIP_FULL_TEST (1 << 7) +#define CALIB_SKIP_ALL (CALIB_SKIP_VFIFO | CALIB_SKIP_LFIFO | CALIB_SKIP_WLEVEL | CALIB_SKIP_WRITES | CALIB_SKIP_FULL_TEST) +#define CALIB_IN_RTL_SIM (1 << 8) + +/* Scan chain manager command addresses */ + +#define WRITE_SCC_DQS_IN_DELAY(group, delay) IOWR_32DIRECT(SCC_MGR_DQS_IN_DELAY, (group) << 2, delay) +#define WRITE_SCC_DQS_EN_DELAY(group, delay) IOWR_32DIRECT(SCC_MGR_DQS_EN_DELAY, (group) << 2, (delay) + IO_DQS_EN_DELAY_OFFSET) +#define WRITE_SCC_DQS_EN_PHASE(group, phase) IOWR_32DIRECT(SCC_MGR_DQS_EN_PHASE, (group) << 2, phase) +#define WRITE_SCC_DQDQS_OUT_PHASE(group, phase) IOWR_32DIRECT(SCC_MGR_DQDQS_OUT_PHASE, (group) << 2, phase) +#define WRITE_SCC_OCT_OUT1_DELAY(group, delay) IOWR_32DIRECT(SCC_MGR_OCT_OUT1_DELAY, (group) << 2, delay) +#define WRITE_SCC_OCT_OUT2_DELAY(group, delay) +#define WRITE_SCC_DQS_BYPASS(group, bypass) + +#define WRITE_SCC_DQ_OUT1_DELAY(pin, delay) IOWR_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (pin) << 2, delay) + +#define WRITE_SCC_DQ_OUT2_DELAY(pin, delay) + +#define WRITE_SCC_DQ_IN_DELAY(pin, delay) IOWR_32DIRECT(SCC_MGR_IO_IN_DELAY, (pin) << 2, delay) + +#define WRITE_SCC_DQ_BYPASS(pin, bypass) + +#define WRITE_SCC_RFIFO_MODE(pin, mode) + +#define WRITE_SCC_HHP_EXTRAS(value) IOWR_32DIRECT(SCC_MGR_HHP_GLOBALS, SCC_MGR_HHP_EXTRAS_OFFSET, value) +#define WRITE_SCC_HHP_DQSE_MAP(value) IOWR_32DIRECT(SCC_MGR_HHP_GLOBALS, SCC_MGR_HHP_DQSE_MAP_OFFSET, value) + +#define WRITE_SCC_DQS_IO_OUT1_DELAY(delay) IOWR_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS) << 2, delay) + +#define WRITE_SCC_DQS_IO_OUT2_DELAY(delay) + +#define WRITE_SCC_DQS_IO_IN_DELAY(delay) IOWR_32DIRECT(SCC_MGR_IO_IN_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS) << 2, delay) + +#define WRITE_SCC_DM_IO_OUT1_DELAY(pin, delay) IOWR_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + pin) << 2, delay) + +#define WRITE_SCC_DM_IO_OUT2_DELAY(pin, delay) + +#define WRITE_SCC_DM_IO_IN_DELAY(pin, delay) IOWR_32DIRECT(SCC_MGR_IO_IN_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + pin) << 2, delay) + +#define WRITE_SCC_DM_BYPASS(pin, bypass) + +#define READ_SCC_DQS_IN_DELAY(group) IORD_32DIRECT(SCC_MGR_DQS_IN_DELAY, (group) << 2) +#define READ_SCC_DQS_EN_DELAY(group) (IORD_32DIRECT(SCC_MGR_DQS_EN_DELAY, (group) << 2) - IO_DQS_EN_DELAY_OFFSET) +#define READ_SCC_DQS_EN_PHASE(group) IORD_32DIRECT(SCC_MGR_DQS_EN_PHASE, (group) << 2) +#define READ_SCC_DQDQS_OUT_PHASE(group) IORD_32DIRECT(SCC_MGR_DQDQS_OUT_PHASE, (group) << 2) +#define READ_SCC_OCT_OUT1_DELAY(group) IORD_32DIRECT(SCC_MGR_OCT_OUT1_DELAY, (group * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH) << 2) +#define READ_SCC_OCT_OUT2_DELAY(group) 0 +#define READ_SCC_DQS_BYPASS(group) 0 +#define READ_SCC_DQS_BYPASS(group) 0 + +#define READ_SCC_DQ_OUT1_DELAY(pin) IORD_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (pin) << 2) +#define READ_SCC_DQ_OUT2_DELAY(pin) 0 +#define READ_SCC_DQ_IN_DELAY(pin) IORD_32DIRECT(SCC_MGR_IO_IN_DELAY, (pin) << 2) +#define READ_SCC_DQ_BYPASS(pin) 0 +#define READ_SCC_RFIFO_MODE(pin) 0 + +#define READ_SCC_DQS_IO_OUT1_DELAY() IORD_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS) << 2) +#define READ_SCC_DQS_IO_OUT2_DELAY() 0 +#define READ_SCC_DQS_IO_IN_DELAY() IORD_32DIRECT(SCC_MGR_IO_IN_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS) << 2) + +#define READ_SCC_DM_IO_OUT1_DELAY(pin) IORD_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + pin) << 2) +#define READ_SCC_DM_IO_OUT2_DELAY(pin) 0 +#define READ_SCC_DM_IO_IN_DELAY(pin) IORD_32DIRECT(SCC_MGR_IO_IN_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + pin) << 2) +#define READ_SCC_DM_BYPASS(pin) 0 + +#define SCC_MGR_GROUP_COUNTER (BASE_SCC_MGR + 0x0000) +#define SCC_MGR_DQS_IN_DELAY (BASE_SCC_MGR + 0x0100) +#define SCC_MGR_DQS_EN_PHASE (BASE_SCC_MGR + 0x0200) +#define SCC_MGR_DQS_EN_DELAY (BASE_SCC_MGR + 0x0300) +#define SCC_MGR_DQDQS_OUT_PHASE (BASE_SCC_MGR + 0x0400) +#define SCC_MGR_OCT_OUT1_DELAY (BASE_SCC_MGR + 0x0500) +#define SCC_MGR_IO_OUT1_DELAY (BASE_SCC_MGR + 0x0700) +#define SCC_MGR_IO_IN_DELAY (BASE_SCC_MGR + 0x0900) + +/* HHP-HPS-specific versions of some commands */ +#define SCC_MGR_DQS_EN_DELAY_GATE (BASE_SCC_MGR + 0x0600) +#define SCC_MGR_IO_OE_DELAY (BASE_SCC_MGR + 0x0800) +#define SCC_MGR_HHP_GLOBALS (BASE_SCC_MGR + 0x0A00) +#define SCC_MGR_HHP_RFILE (BASE_SCC_MGR + 0x0B00) + +/* HHP-HPS-specific values */ +#define SCC_MGR_HHP_EXTRAS_OFFSET 0 +#define SCC_MGR_HHP_DQSE_MAP_OFFSET 1 + +#define SCC_MGR_DQS_ENA (BASE_SCC_MGR + 0x0E00) +#define SCC_MGR_DQS_IO_ENA (BASE_SCC_MGR + 0x0E04) +#define SCC_MGR_DQ_ENA (BASE_SCC_MGR + 0x0E08) +#define SCC_MGR_DM_ENA (BASE_SCC_MGR + 0x0E0C) +#define SCC_MGR_UPD (BASE_SCC_MGR + 0x0E20) +#define SCC_MGR_ACTIVE_RANK (BASE_SCC_MGR + 0x0E40) +#define SCC_MGR_AFI_CAL_INIT (BASE_SCC_MGR + 0x0D00) + +// PHY Debug mode flag constants +#define PHY_DEBUG_IN_DEBUG_MODE 0x00000001 +#define PHY_DEBUG_ENABLE_CAL_RPT 0x00000002 +#define PHY_DEBUG_ENABLE_MARGIN_RPT 0x00000004 +#define PHY_DEBUG_SWEEP_ALL_GROUPS 0x00000008 +#define PHY_DEBUG_DISABLE_GUARANTEED_READ 0x00000010 +#define PHY_DEBUG_ENABLE_NON_DESTRUCTIVE_CALIBRATION 0x00000020 + +// Init and Reset delay constants - Only use if defined by sequencer_defines.h, +// otherwise, revert to defaults +// Default for Tinit = (0+1) * ((202+1) * (2 * 131 + 1) + 1) = 53532 = 200.75us @ 266MHz +#ifdef TINIT_CNTR0_VAL +#define SEQ_TINIT_CNTR0_VAL TINIT_CNTR0_VAL +#else +#define SEQ_TINIT_CNTR0_VAL 0 +#endif + +#ifdef TINIT_CNTR1_VAL +#define SEQ_TINIT_CNTR1_VAL TINIT_CNTR1_VAL +#else +#define SEQ_TINIT_CNTR1_VAL 202 +#endif + +#ifdef TINIT_CNTR2_VAL +#define SEQ_TINIT_CNTR2_VAL TINIT_CNTR2_VAL +#else +#define SEQ_TINIT_CNTR2_VAL 131 +#endif + +// Default for Treset = (2+1) * ((252+1) * (2 * 131 + 1) + 1) = 133563 = 500.86us @ 266MHz +#ifdef TRESET_CNTR0_VAL +#define SEQ_TRESET_CNTR0_VAL TRESET_CNTR0_VAL +#else +#define SEQ_TRESET_CNTR0_VAL 2 +#endif + +#ifdef TRESET_CNTR1_VAL +#define SEQ_TRESET_CNTR1_VAL TRESET_CNTR1_VAL +#else +#define SEQ_TRESET_CNTR1_VAL 252 +#endif + +#ifdef TRESET_CNTR2_VAL +#define SEQ_TRESET_CNTR2_VAL TRESET_CNTR2_VAL +#else +#define SEQ_TRESET_CNTR2_VAL 131 +#endif + +/* Bitfield type changes depending on protocol */ +typedef uint32_t t_btfld; + +#define RW_MGR_INST_ROM_WRITE BASE_RW_MGR + 0x1800 +#define RW_MGR_AC_ROM_WRITE BASE_RW_MGR + 0x1C00 + +static const uint32_t inst_rom_init_size; +static const uint32_t inst_rom_init[]; +static const uint32_t ac_rom_init_size; +static const uint32_t ac_rom_init[]; + +/* parameter variable holder */ + +typedef struct param_type { + t_btfld dm_correct_mask; + t_btfld read_correct_mask; + t_btfld read_correct_mask_vg; + t_btfld write_correct_mask; + t_btfld write_correct_mask_vg; + + /* set a particular entry to 1 if we need to skip a particular rank */ + + uint32_t skip_ranks[MAX_RANKS]; + + /* set a particular entry to 1 if we need to skip a particular group */ + + uint32_t skip_groups; + + /* set a particular entry to 1 if the shadow register (which represents a set of ranks) needs to be skipped */ + + uint32_t skip_shadow_regs[NUM_SHADOW_REGS]; + +} param_t; + +/* global variable holder */ + +typedef struct gbl_type { + + uint32_t phy_debug_mode_flags; + + /* current read latency */ + + uint32_t curr_read_lat; + + /* current write latency */ + + uint32_t curr_write_lat; + + /* error code */ + + uint32_t error_substage; + uint32_t error_stage; + uint32_t error_group; + + /* figure-of-merit in, figure-of-merit out */ + + uint32_t fom_in; + uint32_t fom_out; + + //USER Number of RW Mgr NOP cycles between write command and write data + uint32_t rw_wl_nop_cycles; +} gbl_t; + +// External global variables +static gbl_t *gbl; +static param_t *param; + +// External functions +static uint32_t run_mem_calibrate(void); +static void rw_mgr_mem_initialize(void); +static void rw_mgr_mem_dll_lock_wait(void); +static inline void scc_mgr_set_dq_in_delay(uint32_t write_group, uint32_t dq_in_group, + uint32_t delay); +static inline void scc_mgr_set_dq_out1_delay(uint32_t write_group, uint32_t dq_in_group, + uint32_t delay); +static inline void scc_mgr_set_dq_out2_delay(uint32_t write_group, uint32_t dq_in_group, + uint32_t delay); +static inline void scc_mgr_load_dq(uint32_t dq_in_group); +static inline void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay); +static inline void scc_mgr_load_dqs(uint32_t dqs); +static void scc_mgr_set_group_dqs_io_and_oct_out1_gradual(uint32_t write_group, uint32_t delay); +static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group, uint32_t delay); +static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group, uint32_t phase); +static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group, uint32_t phase); +static inline void scc_mgr_set_dm_out1_delay(uint32_t write_group, uint32_t dm, uint32_t delay); +static inline void scc_mgr_set_dm_out2_delay(uint32_t write_group, uint32_t dm, uint32_t delay); +static inline void scc_mgr_load_dm(uint32_t dm); +int sdram_calibration(void); +#endif diff --git a/arch/arm/mach-socfpga/include/mach/cyclone5-system-manager.h b/arch/arm/mach-socfpga/include/mach/cyclone5-system-manager.h new file mode 100644 index 0000000000..9efc37a4dc --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/cyclone5-system-manager.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _SYSTEM_MANAGER_H_ +#define _SYSTEM_MANAGER_H_ + +void socfpga_sysmgr_pinmux_init(unsigned long *sys_mgr_init_table, int num); + +/* address */ +#define CONFIG_SYSMGR_ROMCODEGRP_CTRL (CYCLONE5_SYSMGR_ADDRESS + 0xc0) + +/* FPGA interface group */ +#define SYSMGR_FPGAINTF_MODULE (CYCLONE5_SYSMGR_ADDRESS + 0x28) +/* EMAC interface selection */ +#define CONFIG_SYSMGR_EMAC_CTRL (CYCLONE5_SYSMGR_ADDRESS + 0x60) + +#define ISWGRP_HANDOFF_AXIBRIDGE SYSMGR_ISWGRP_HANDOFF0 +#define ISWGRP_HANDOFF_L3REMAP SYSMGR_ISWGRP_HANDOFF1 +#define ISWGRP_HANDOFF_FPGAINTF SYSMGR_ISWGRP_HANDOFF2 +#define ISWGRP_HANDOFF_FPGA2SDR SYSMGR_ISWGRP_HANDOFF3 + +/* pin mux */ +#define SYSMGR_PINMUXGRP (CYCLONE5_SYSMGR_ADDRESS + 0x400) +#define SYSMGR_PINMUXGRP_NANDUSEFPGA (SYSMGR_PINMUXGRP + 0x2F0) +#define SYSMGR_PINMUXGRP_EMAC1USEFPGA (SYSMGR_PINMUXGRP + 0x2F8) +#define SYSMGR_PINMUXGRP_SDMMCUSEFPGA (SYSMGR_PINMUXGRP + 0x308) +#define SYSMGR_PINMUXGRP_EMAC0USEFPGA (SYSMGR_PINMUXGRP + 0x314) +#define SYSMGR_PINMUXGRP_SPIM1USEFPGA (SYSMGR_PINMUXGRP + 0x330) +#define SYSMGR_PINMUXGRP_SPIM0USEFPGA (SYSMGR_PINMUXGRP + 0x338) + +/* bit fields */ +#define CONFIG_SYSMGR_PINMUXGRP_OFFSET (0x400) +#define SYSMGR_ROMCODEGRP_CTRL_WARMRSTCFGPINMUX (1<<0) +#define SYSMGR_ROMCODEGRP_CTRL_WARMRSTCFGIO (1<<1) +#define SYSMGR_ECC_OCRAM_EN (1<<0) +#define SYSMGR_ECC_OCRAM_SERR (1<<3) +#define SYSMGR_ECC_OCRAM_DERR (1<<4) +#define SYSMGR_FPGAINTF_USEFPGA 0x1 +#define SYSMGR_FPGAINTF_SPIM0 (1<<0) +#define SYSMGR_FPGAINTF_SPIM1 (1<<1) +#define SYSMGR_FPGAINTF_EMAC0 (1<<2) +#define SYSMGR_FPGAINTF_EMAC1 (1<<3) +#define SYSMGR_FPGAINTF_NAND (1<<4) +#define SYSMGR_FPGAINTF_SDMMC (1<<5) + +/* Enumeration: sysmgr::emacgrp::ctrl::physel::enum */ +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL0_LSB 0 +#define SYSMGR_EMACGRP_CTRL_PHYSEL1_LSB 2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 + +#endif /* _SYSTEM_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/freeze-controller.h b/arch/arm/mach-socfpga/include/mach/freeze-controller.h deleted file mode 100644 index 4253f5b38f..0000000000 --- a/arch/arm/mach-socfpga/include/mach/freeze-controller.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef _FREEZE_CONTROLLER_H_ -#define _FREEZE_CONTROLLER_H_ - -#include - -#define SYSMGR_FRZCTRL_ADDRESS 0x40 -#define SYSMGR_FRZCTRL_VIOCTRL_ADDRESS 0x40 -#define SYSMGR_FRZCTRL_HIOCTRL_ADDRESS 0x50 -#define SYSMGR_FRZCTRL_SRC_ADDRESS 0x54 -#define SYSMGR_FRZCTRL_HWCTRL_ADDRESS 0x58 - -#define SYSMGR_FRZCTRL_SRC_VIO1_ENUM_SW 0x0 -#define SYSMGR_FRZCTRL_SRC_VIO1_ENUM_HW 0x1 -#define SYSMGR_FRZCTRL_VIOCTRL_SLEW_MASK 0x00000010 -#define SYSMGR_FRZCTRL_VIOCTRL_WKPULLUP_MASK 0x00000008 -#define SYSMGR_FRZCTRL_VIOCTRL_TRISTATE_MASK 0x00000004 -#define SYSMGR_FRZCTRL_VIOCTRL_BUSHOLD_MASK 0x00000002 -#define SYSMGR_FRZCTRL_VIOCTRL_CFG_MASK 0x00000001 -#define SYSMGR_FRZCTRL_HIOCTRL_SLEW_MASK 0x00000010 -#define SYSMGR_FRZCTRL_HIOCTRL_WKPULLUP_MASK 0x00000008 -#define SYSMGR_FRZCTRL_HIOCTRL_TRISTATE_MASK 0x00000004 -#define SYSMGR_FRZCTRL_HIOCTRL_BUSHOLD_MASK 0x00000002 -#define SYSMGR_FRZCTRL_HIOCTRL_CFG_MASK 0x00000001 -#define SYSMGR_FRZCTRL_HIOCTRL_REGRST_MASK 0x00000080 -#define SYSMGR_FRZCTRL_HIOCTRL_OCTRST_MASK 0x00000040 -#define SYSMGR_FRZCTRL_HIOCTRL_OCT_CFGEN_CALSTART_MASK 0x00000100 -#define SYSMGR_FRZCTRL_HIOCTRL_DLLRST_MASK 0x00000020 -#define SYSMGR_FRZCTRL_HWCTRL_VIO1REQ_MASK 0x00000001 -#define SYSMGR_FRZCTRL_HWCTRL_VIO1STATE_ENUM_FROZEN 0x2 -#define SYSMGR_FRZCTRL_HWCTRL_VIO1STATE_ENUM_THAWED 0x1 - -#define SYSMGR_FRZCTRL_HWCTRL_VIO1STATE_GET(x) (((x) & 0x00000006) >> 1) - -/* - * FreezeChannelSelect - * Definition of enum for freeze channel - */ -enum frz_channel_id { - FREEZE_CHANNEL_0 = 0, /* EMAC_IO & MIXED2_IO */ - FREEZE_CHANNEL_1, /* MIXED1_IO and FLASH_IO */ - FREEZE_CHANNEL_2, /* General IO */ - FREEZE_CHANNEL_3, /* DDR IO */ -}; - -/* Shift count needed to calculte for FRZCTRL VIO control register offset */ -#define SYSMGR_FRZCTRL_VIOCTRL_SHIFT (2) - -/* - * Freeze HPS IOs - * - * FreezeChannelSelect [in] - Freeze channel ID - * FreezeControllerFSMSelect [in] - To use hardware or software state machine - * If FREEZE_CONTROLLER_FSM_HW is selected for FSM select then the - * the freeze channel id is input is ignored. It is default to channel 1 - */ -int sys_mgr_frzctrl_freeze_req(enum frz_channel_id channel_id); - -/* - * Unfreeze/Thaw HPS IOs - * - * FreezeChannelSelect [in] - Freeze channel ID - * FreezeControllerFSMSelect [in] - To use hardware or software state machine - * If FREEZE_CONTROLLER_FSM_HW is selected for FSM select then the - * the freeze channel id is input is ignored. It is default to channel 1 - */ -int sys_mgr_frzctrl_thaw_req(enum frz_channel_id channel_id); - -#endif /* _FREEZE_CONTROLLER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/pll_config.h b/arch/arm/mach-socfpga/include/mach/pll_config.h index bb491d82f1..1a7e851eda 100644 --- a/arch/arm/mach-socfpga/include/mach/pll_config.h +++ b/arch/arm/mach-socfpga/include/mach/pll_config.h @@ -1,5 +1,5 @@ -#include +#include static struct socfpga_cm_config cm_default_cfg = { /* main group */ diff --git a/arch/arm/mach-socfpga/include/mach/reset-manager.h b/arch/arm/mach-socfpga/include/mach/reset-manager.h deleted file mode 100644 index 899401ce3c..0000000000 --- a/arch/arm/mach-socfpga/include/mach/reset-manager.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef _RESET_MANAGER_H_ -#define _RESET_MANAGER_H_ - -#define RESET_MGR_STATUS_OFS 0x0 -#define RESET_MGR_CTRL_OFS 0x4 -#define RESET_MGR_COUNTS_OFS 0x8 -#define RESET_MGR_MPU_MOD_RESET_OFS 0x10 -#define RESET_MGR_PER_MOD_RESET_OFS 0x14 -#define RESET_MGR_PER2_MOD_RESET_OFS 0x18 -#define RESET_MGR_BRG_MOD_RESET_OFS 0x1c - -#define RSTMGR_CTRL_SWWARMRSTREQ_LSB 1 -#define RSTMGR_PERMODRST_OSC1TIMER0_LSB 8 - -#define RSTMGR_PERMODRST_EMAC0_LSB 0 -#define RSTMGR_PERMODRST_EMAC1_LSB 1 -#define RSTMGR_PERMODRST_L4WD0_LSB 6 -#define RSTMGR_PERMODRST_SDR_LSB 29 -#define RSTMGR_BRGMODRST_HPS2FPGA_MASK 0x00000001 -#define RSTMGR_BRGMODRST_LWHPS2FPGA_MASK 0x00000002 -#define RSTMGR_BRGMODRST_FPGA2HPS_MASK 0x00000004 - -/* Warm Reset mask */ -#define RSTMGR_STAT_L4WD1RST_MASK 0x00008000 -#define RSTMGR_STAT_L4WD0RST_MASK 0x00004000 -#define RSTMGR_STAT_MPUWD1RST_MASK 0x00002000 -#define RSTMGR_STAT_MPUWD0RST_MASK 0x00001000 -#define RSTMGR_STAT_SWWARMRST_MASK 0x00000400 -#define RSTMGR_STAT_FPGAWARMRST_MASK 0x00000200 -#define RSTMGR_STAT_NRSTPINRST_MASK 0x00000100 -#define RSTMGR_WARMRST_MASK 0x0000f700 - -#define RSTMGR_CTRL_SDRSELFREFEN_MASK 0x00000010 -#define RSTMGR_CTRL_FPGAHSEN_MASK 0x00010000 -#define RSTMGR_CTRL_ETRSTALLEN_MASK 0x00100000 - -#define RSTMGR_PERMODRST_EMAC0 (1 << 0) -#define RSTMGR_PERMODRST_EMAC1 (1 << 1) -#define RSTMGR_PERMODRST_USB0 (1 << 2) -#define RSTMGR_PERMODRST_USB1 (1 << 3) -#define RSTMGR_PERMODRST_NAND (1 << 4) -#define RSTMGR_PERMODRST_QSPI (1 << 5) -#define RSTMGR_PERMODRST_L4WD0 (1 << 6) -#define RSTMGR_PERMODRST_L4WD1 (1 << 7) -#define RSTMGR_PERMODRST_OSC1TIMER1 (1 << 9) -#define RSTMGR_PERMODRST_SPTIMER0 (1 << 10) -#define RSTMGR_PERMODRST_SPTIMER1 (1 << 11) -#define RSTMGR_PERMODRST_I2C0 (1 << 12) -#define RSTMGR_PERMODRST_I2C1 (1 << 13) -#define RSTMGR_PERMODRST_I2C2 (1 << 14) -#define RSTMGR_PERMODRST_I2C3 (1 << 15) -#define RSTMGR_PERMODRST_UART0 (1 << 16) -#define RSTMGR_PERMODRST_UART1 (1 << 17) -#define RSTMGR_PERMODRST_SPIM0 (1 << 18) -#define RSTMGR_PERMODRST_SPIM1 (1 << 19) -#define RSTMGR_PERMODRST_SPIS0 (1 << 20) -#define RSTMGR_PERMODRST_SPIS1 (1 << 21) -#define RSTMGR_PERMODRST_SDMMC (1 << 22) -#define RSTMGR_PERMODRST_CAN0 (1 << 23) -#define RSTMGR_PERMODRST_CAN1 (1 << 24) -#define RSTMGR_PERMODRST_GPIO0 (1 << 25) -#define RSTMGR_PERMODRST_GPIO1 (1 << 26) -#define RSTMGR_PERMODRST_GPIO2 (1 << 27) -#define RSTMGR_PERMODRST_DMA (1 << 28) -#define RSTMGR_PERMODRST_SDR (1 << 29) - -#define RSTMGR_PER2MODRST_DMAIF0 (1 << 0) -#define RSTMGR_PER2MODRST_DMAIF1 (1 << 1) -#define RSTMGR_PER2MODRST_DMAIF2 (1 << 2) -#define RSTMGR_PER2MODRST_DMAIF3 (1 << 3) -#define RSTMGR_PER2MODRST_DMAIF4 (1 << 4) -#define RSTMGR_PER2MODRST_DMAIF5 (1 << 5) -#define RSTMGR_PER2MODRST_DMAIF6 (1 << 6) -#define RSTMGR_PER2MODRST_DMAIF7 (1 << 7) - -#endif /* _RESET_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/scan-manager.h b/arch/arm/mach-socfpga/include/mach/scan-manager.h deleted file mode 100644 index 568bedfde1..0000000000 --- a/arch/arm/mach-socfpga/include/mach/scan-manager.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef _SCAN_MANAGER_H_ -#define _SCAN_MANAGER_H_ - -#include -#include - -/*********************************************************** - * * - * Cyclone5 specific stuff. Get rid of this. * - * * - ***********************************************************/ -#define CONFIG_HPS_IOCSR_SCANCHAIN0_LENGTH (764) -#define CONFIG_HPS_IOCSR_SCANCHAIN1_LENGTH (1719) -#define CONFIG_HPS_IOCSR_SCANCHAIN2_LENGTH (955) -#define CONFIG_HPS_IOCSR_SCANCHAIN3_LENGTH (16766) - -typedef unsigned long Scan_mgr_entry_t; - -#define NUM_OF_CHAINS (4) -#define SHIFT_COUNT_32BIT (5) -#define MASK_COUNT_32BIT (0x1F) - -#define SCANMGR_STAT_ADDRESS 0x0 -#define SCANMGR_EN_ADDRESS 0x4 -#define SCANMGR_FIFOSINGLEBYTE_ADDRESS 0x10 -#define SCANMGR_FIFODOUBLEBYTE_ADDRESS 0x14 -#define SCANMGR_FIFOQUADBYTE_ADDRESS 0x1c - -#define SCANMGR_STAT_ACTIVE_GET(x) (((x) & 0x80000000) >> 31) -#define SCANMGR_STAT_WFIFOCNT_GET(x) (((x) & 0x70000000) >> 28) - -enum io_scan_chain { - IO_SCAN_CHAIN_0 = 0, /* EMAC_IO and MIXED2_IO */ - IO_SCAN_CHAIN_1, /* MIXED1_IO and FLASH_IO */ - IO_SCAN_CHAIN_2, /* General IO */ - IO_SCAN_CHAIN_3, /* DDR IO */ - IO_SCAN_CHAIN_UNDEFINED -}; - -#define IO_SCAN_CHAIN_NUM NUM_OF_CHAINS -/* Maximum number of IO scan chains */ - -#define IO_SCAN_CHAIN_128BIT_SHIFT (7) -/* - * Shift count to get number of IO scan chain data in granularity - * of 128-bit ( N / 128 ) - */ - -#define IO_SCAN_CHAIN_128BIT_MASK (0x7F) -/* - * Mask to get residual IO scan chain data in - * granularity of 128-bit ( N mod 128 ) - */ - -#define IO_SCAN_CHAIN_32BIT_SHIFT SHIFT_COUNT_32BIT -/* - * Shift count to get number of IO scan chain - * data in granularity of 32-bit ( N / 32 ) - */ - -#define IO_SCAN_CHAIN_32BIT_MASK MASK_COUNT_32BIT -/* - * Mask to get residual IO scan chain data in - * granularity of 32-bit ( N mod 32 ) - */ - -#define IO_SCAN_CHAIN_BYTE_MASK (0xFF) -/* Byte mask */ - -#define IO_SCAN_CHAIN_PAYLOAD_24BIT (24) -/* 24-bits (3 bytes) IO scan chain payload definition */ - -#define TDI_TDO_MAX_PAYLOAD (127) -/* - * Maximum length of TDI_TDO packet payload is 128 bits, - * represented by (length - 1) in TDI_TDO header - */ - -#define TDI_TDO_HEADER_FIRST_BYTE (0x80) -/* TDI_TDO packet header for IO scan chain program */ - -#define TDI_TDO_HEADER_SECOND_BYTE_SHIFT (8) -/* Position of second command byte for TDI_TDO packet */ - -#define MAX_WAITING_DELAY_IO_SCAN_ENGINE (100) -/* - * Maximum polling loop to wait for IO scan chain engine - * becomes idle to prevent infinite loop - */ - -/* - * scan_mgr_io_scan_chain_prg - * - * Program HPS IO Scan Chain - * - * io_scan_chain_id @ref IOScanChainSelect [in] - IO scan chain ID with - * range of enumIOScanChainSelect * - * io_scan_chain_len_in_bits uint32_t [in] - IO scan chain length in bits - * *iocsr_scan_chain @ref Scan_mgr_entry_t [in] - IO scan chain table - */ -int scan_mgr_io_scan_chain_prg(enum io_scan_chain io_scan_chain_id, - uint32_t io_scan_chain_len_in_bits, - const unsigned long *iocsr_scan_chain); - -struct socfpga_io_config { - unsigned long *pinmux; - unsigned int num_pin; - const unsigned long *iocsr_emac_mixed2; - const unsigned long *iocsr_mixed1_flash; - const unsigned long *iocsr_general; - const unsigned long *iocsr_ddr; -}; - -#endif /* _SCAN_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/sdram.h b/arch/arm/mach-socfpga/include/mach/sdram.h deleted file mode 100644 index ebd331e83e..0000000000 --- a/arch/arm/mach-socfpga/include/mach/sdram.h +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef _SDRAM_H_ -#define _SDRAM_H_ - -/* Group: sdr.phygrp.sccgrp */ -#define SDR_PHYGRP_SCCGRP_ADDRESS 0x0 -/* Group: sdr.phygrp.phymgrgrp */ -#define SDR_PHYGRP_PHYMGRGRP_ADDRESS 0x1000 -/* Group: sdr.phygrp.rwmgrgrp */ -#define SDR_PHYGRP_RWMGRGRP_ADDRESS 0x2000 -/* Group: sdr.phygrp.datamgrgrp */ -#define SDR_PHYGRP_DATAMGRGRP_ADDRESS 0x4000 -/* Group: sdr.phygrp.regfilegrp */ -#define SDR_PHYGRP_REGFILEGRP_ADDRESS 0x4800 -/* Group: sdr.ctrlgrp */ -#define SDR_CTRLGRP_ADDRESS 0x5000 -/* Register: sdr.ctrlgrp.ctrlcfg */ -#define SDR_CTRLGRP_CTRLCFG_ADDRESS 0x5000 -/* Register: sdr.ctrlgrp.dramtiming1 */ -#define SDR_CTRLGRP_DRAMTIMING1_ADDRESS 0x5004 -/* Register: sdr.ctrlgrp.dramtiming2 */ -#define SDR_CTRLGRP_DRAMTIMING2_ADDRESS 0x5008 -/* Register: sdr.ctrlgrp.dramtiming3 */ -#define SDR_CTRLGRP_DRAMTIMING3_ADDRESS 0x500c -/* Register: sdr.ctrlgrp.dramtiming4 */ -#define SDR_CTRLGRP_DRAMTIMING4_ADDRESS 0x5010 -/* Register: sdr.ctrlgrp.lowpwrtiming */ -#define SDR_CTRLGRP_LOWPWRTIMING_ADDRESS 0x5014 -/* Register: sdr.ctrlgrp.dramodt */ -#define SDR_CTRLGRP_DRAMODT_ADDRESS 0x5018 -/* Register: sdr.ctrlgrp.dramaddrw */ -#define SDR_CTRLGRP_DRAMADDRW_ADDRESS 0x502c -/* Register: sdr.ctrlgrp.dramifwidth */ -#define SDR_CTRLGRP_DRAMIFWIDTH_ADDRESS 0x5030 -/* Register: sdr.ctrlgrp.dramdevwidth */ -#define SDR_CTRLGRP_DRAMDEVWIDTH_ADDRESS 0x5034 -/* Register: sdr.ctrlgrp.dramsts */ -#define SDR_CTRLGRP_DRAMSTS_ADDRESS 0x5038 -/* Register: sdr.ctrlgrp.dramintr */ -#define SDR_CTRLGRP_DRAMINTR_ADDRESS 0x503c -/* Register: sdr.ctrlgrp.sbecount */ -#define SDR_CTRLGRP_SBECOUNT_ADDRESS 0x5040 -/* Register: sdr.ctrlgrp.dbecount */ -#define SDR_CTRLGRP_DBECOUNT_ADDRESS 0x5044 -/* Register: sdr.ctrlgrp.erraddr */ -#define SDR_CTRLGRP_ERRADDR_ADDRESS 0x5048 -/* Register: sdr.ctrlgrp.dropcount */ -#define SDR_CTRLGRP_DROPCOUNT_ADDRESS 0x504c -/* Register: sdr.ctrlgrp.dropaddr */ -#define SDR_CTRLGRP_DROPADDR_ADDRESS 0x5050 -/* Register: sdr.ctrlgrp.staticcfg */ -#define SDR_CTRLGRP_STATICCFG_ADDRESS 0x505c -/* Register: sdr.ctrlgrp.ctrlwidth */ -#define SDR_CTRLGRP_CTRLWIDTH_ADDRESS 0x5060 -/* Register: sdr.ctrlgrp.cportwidth */ -#define SDR_CTRLGRP_CPORTWIDTH_ADDRESS 0x5064 -/* Register: sdr.ctrlgrp.cportwmap */ -#define SDR_CTRLGRP_CPORTWMAP_ADDRESS 0x5068 -/* Register: sdr.ctrlgrp.cportrmap */ -#define SDR_CTRLGRP_CPORTRMAP_ADDRESS 0x506c -/* Register: sdr.ctrlgrp.rfifocmap */ -#define SDR_CTRLGRP_RFIFOCMAP_ADDRESS 0x5070 -/* Register: sdr.ctrlgrp.wfifocmap */ -#define SDR_CTRLGRP_WFIFOCMAP_ADDRESS 0x5074 -/* Register: sdr.ctrlgrp.cportrdwr */ -#define SDR_CTRLGRP_CPORTRDWR_ADDRESS 0x5078 -/* Register: sdr.ctrlgrp.portcfg */ -#define SDR_CTRLGRP_PORTCFG_ADDRESS 0x507c -/* Register: sdr.ctrlgrp.fpgaportrst */ -#define SDR_CTRLGRP_FPGAPORTRST_ADDRESS 0x5080 -/* Register: sdr.ctrlgrp.fifocfg */ -#define SDR_CTRLGRP_FIFOCFG_ADDRESS 0x5088 -/* Register: sdr.ctrlgrp.mppriority */ -#define SDR_CTRLGRP_MPPRIORITY_ADDRESS 0x50ac -/* Wide Register: sdr.ctrlgrp.mpweight */ -#define SDR_CTRLGRP_MPWEIGHT_ADDRESS 0x50b0 -/* Register: sdr.ctrlgrp.mpweight.mpweight_0 */ -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_ADDRESS 0x50b0 -/* Register: sdr.ctrlgrp.mpweight.mpweight_1 */ -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_ADDRESS 0x50b4 -/* Register: sdr.ctrlgrp.mpweight.mpweight_2 */ -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_ADDRESS 0x50b8 -/* Register: sdr.ctrlgrp.mpweight.mpweight_3 */ -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_ADDRESS 0x50bc -/* Register: sdr.ctrlgrp.mppacing.mppacing_0 */ -#define SDR_CTRLGRP_MPPACING_MPPACING_0_ADDRESS 0x50c0 -/* Register: sdr.ctrlgrp.mppacing.mppacing_1 */ -#define SDR_CTRLGRP_MPPACING_MPPACING_1_ADDRESS 0x50c4 -/* Register: sdr.ctrlgrp.mppacing.mppacing_2 */ -#define SDR_CTRLGRP_MPPACING_MPPACING_2_ADDRESS 0x50c8 -/* Register: sdr.ctrlgrp.mppacing.mppacing_3 */ -#define SDR_CTRLGRP_MPPACING_MPPACING_3_ADDRESS 0x50cc -/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_0 */ -#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_ADDRESS 0x50d0 -/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_1 */ -#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_ADDRESS 0x50d4 -/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_2 */ -#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_ADDRESS 0x50d8 -/* Wide Register: sdr.ctrlgrp.phyctrl */ -#define SDR_CTRLGRP_PHYCTRL_ADDRESS 0x5150 -/* Register: sdr.ctrlgrp.phyctrl.phyctrl_0 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDRESS 0x5150 -/* Register: sdr.ctrlgrp.phyctrl.phyctrl_1 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_ADDRESS 0x5154 -/* Register: sdr.ctrlgrp.phyctrl.phyctrl_2 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_ADDRESS 0x5158 -/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_0 */ -/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_0 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_OFFSET 0x150 -/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_1 */ -/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_1 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_OFFSET 0x154 -/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_2 */ -/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_2 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_OFFSET 0x158 - -/* Register template: sdr::ctrlgrp::ctrlcfg */ -#define SDR_CTRLGRP_CTRLCFG_OUTPUTREG_LSB 26 -#define SDR_CTRLGRP_CTRLCFG_OUTPUTREG_MASK 0x04000000 -#define SDR_CTRLGRP_CTRLCFG_BURSTTERMEN_LSB 25 -#define SDR_CTRLGRP_CTRLCFG_BURSTTERMEN_MASK 0x02000000 -#define SDR_CTRLGRP_CTRLCFG_BURSTINTREN_LSB 24 -#define SDR_CTRLGRP_CTRLCFG_BURSTINTREN_MASK 0x01000000 -#define SDR_CTRLGRP_CTRLCFG_NODMPINS_LSB 23 -#define SDR_CTRLGRP_CTRLCFG_NODMPINS_MASK 0x00800000 -#define SDR_CTRLGRP_CTRLCFG_DQSTRKEN_LSB 22 -#define SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK 0x00400000 -#define SDR_CTRLGRP_CTRLCFG_STARVELIMIT_LSB 16 -#define SDR_CTRLGRP_CTRLCFG_STARVELIMIT_MASK 0x003f0000 -#define SDR_CTRLGRP_CTRLCFG_REORDEREN_LSB 15 -#define SDR_CTRLGRP_CTRLCFG_REORDEREN_MASK 0x00008000 -#define SDR_CTRLGRP_CTRLCFG_GENDBE_LSB 14 -#define SDR_CTRLGRP_CTRLCFG_GENDBE_MASK 0x00004000 -#define SDR_CTRLGRP_CTRLCFG_GENSBE_LSB 13 -#define SDR_CTRLGRP_CTRLCFG_GENSBE_MASK 0x00002000 -#define SDR_CTRLGRP_CTRLCFG_CFG_ENABLE_ECC_CODE_OVERWRITES_LSB 12 -#define SDR_CTRLGRP_CTRLCFG_CFG_ENABLE_ECC_CODE_OVERWRITES_MASK 0x00001000 -#define SDR_CTRLGRP_CTRLCFG_ECCCORREN_LSB 11 -#define SDR_CTRLGRP_CTRLCFG_ECCCORREN_MASK 0x00000800 -#define SDR_CTRLGRP_CTRLCFG_ECCEN_LSB 10 -#define SDR_CTRLGRP_CTRLCFG_ECCEN_MASK 0x00000400 -#define SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB 8 -#define SDR_CTRLGRP_CTRLCFG_ADDRORDER_MASK 0x00000300 -#define SDR_CTRLGRP_CTRLCFG_MEMBL_LSB 3 -#define SDR_CTRLGRP_CTRLCFG_MEMBL_MASK 0x000000f8 -#define SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB 0 -#define SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK 0x00000007 -/* Register template: sdr::ctrlgrp::dramtiming1 */ -#define SDR_CTRLGRP_DRAMTIMING1_TRFC_LSB 24 -#define SDR_CTRLGRP_DRAMTIMING1_TRFC_MASK 0xff000000 -#define SDR_CTRLGRP_DRAMTIMING1_TFAW_LSB 18 -#define SDR_CTRLGRP_DRAMTIMING1_TFAW_MASK 0x00fc0000 -#define SDR_CTRLGRP_DRAMTIMING1_TRRD_LSB 14 -#define SDR_CTRLGRP_DRAMTIMING1_TRRD_MASK 0x0003c000 -#define SDR_CTRLGRP_DRAMTIMING1_TCL_LSB 9 -#define SDR_CTRLGRP_DRAMTIMING1_TCL_MASK 0x00003e00 -#define SDR_CTRLGRP_DRAMTIMING1_TAL_LSB 4 -#define SDR_CTRLGRP_DRAMTIMING1_TAL_MASK 0x000001f0 -#define SDR_CTRLGRP_DRAMTIMING1_TCWL_LSB 0 -#define SDR_CTRLGRP_DRAMTIMING1_TCWL_MASK 0x0000000f -/* Register template: sdr::ctrlgrp::dramtiming2 */ -#define SDR_CTRLGRP_DRAMTIMING2_TWTR_LSB 25 -#define SDR_CTRLGRP_DRAMTIMING2_TWTR_MASK 0x1e000000 -#define SDR_CTRLGRP_DRAMTIMING2_TWR_LSB 21 -#define SDR_CTRLGRP_DRAMTIMING2_TWR_MASK 0x01e00000 -#define SDR_CTRLGRP_DRAMTIMING2_TRP_LSB 17 -#define SDR_CTRLGRP_DRAMTIMING2_TRP_MASK 0x001e0000 -#define SDR_CTRLGRP_DRAMTIMING2_TRCD_LSB 13 -#define SDR_CTRLGRP_DRAMTIMING2_TRCD_MASK 0x0001e000 -#define SDR_CTRLGRP_DRAMTIMING2_TREFI_LSB 0 -#define SDR_CTRLGRP_DRAMTIMING2_TREFI_MASK 0x00001fff -/* Register template: sdr::ctrlgrp::dramtiming3 */ -#define SDR_CTRLGRP_DRAMTIMING3_TCCD_LSB 19 -#define SDR_CTRLGRP_DRAMTIMING3_TCCD_MASK 0x00780000 -#define SDR_CTRLGRP_DRAMTIMING3_TMRD_LSB 15 -#define SDR_CTRLGRP_DRAMTIMING3_TMRD_MASK 0x00078000 -#define SDR_CTRLGRP_DRAMTIMING3_TRC_LSB 9 -#define SDR_CTRLGRP_DRAMTIMING3_TRC_MASK 0x00007e00 -#define SDR_CTRLGRP_DRAMTIMING3_TRAS_LSB 4 -#define SDR_CTRLGRP_DRAMTIMING3_TRAS_MASK 0x000001f0 -#define SDR_CTRLGRP_DRAMTIMING3_TRTP_LSB 0 -#define SDR_CTRLGRP_DRAMTIMING3_TRTP_MASK 0x0000000f -/* Register template: sdr::ctrlgrp::dramtiming4 */ -#define SDR_CTRLGRP_DRAMTIMING4_MINPWRSAVECYCLES_LSB 20 -#define SDR_CTRLGRP_DRAMTIMING4_MINPWRSAVECYCLES_MASK 0x00f00000 -#define SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_LSB 10 -#define SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_MASK 0x000ffc00 -#define SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_LSB 0 -#define SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_MASK 0x000003ff -/* Register template: sdr::ctrlgrp::lowpwrtiming */ -#define SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_LSB 16 -#define SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_MASK 0x000f0000 -#define SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_LSB 0 -#define SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_MASK 0x0000ffff -/* Register template: sdr::ctrlgrp::dramaddrw */ -#define SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB 13 -#define SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK 0x0000e000 -#define SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB 10 -#define SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK 0x00001c00 -#define SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB 5 -#define SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK 0x000003e0 -#define SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB 0 -#define SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK 0x0000001f -/* Register template: sdr::ctrlgrp::dramifwidth */ -#define SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_LSB 0 -#define SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_MASK 0x000000ff -/* Register template: sdr::ctrlgrp::dramdevwidth */ -#define SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_LSB 0 -#define SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_MASK 0x0000000f -/* Register template: sdr::ctrlgrp::dramintr */ -#define SDR_CTRLGRP_DRAMINTR_INTRCLR_LSB 4 -#define SDR_CTRLGRP_DRAMINTR_INTRCLR_MASK 0x00000010 -#define SDR_CTRLGRP_DRAMINTR_CORRDROPMASK_LSB 3 -#define SDR_CTRLGRP_DRAMINTR_CORRDROPMASK_MASK 0x00000008 -#define SDR_CTRLGRP_DRAMINTR_DBEMASK_LSB 2 -#define SDR_CTRLGRP_DRAMINTR_DBEMASK_MASK 0x00000004 -#define SDR_CTRLGRP_DRAMINTR_SBEMASK_LSB 1 -#define SDR_CTRLGRP_DRAMINTR_SBEMASK_MASK 0x00000002 -#define SDR_CTRLGRP_DRAMINTR_INTREN_LSB 0 -#define SDR_CTRLGRP_DRAMINTR_INTREN_MASK 0x00000001 -/* Register template: sdr::ctrlgrp::sbecount */ -#define SDR_CTRLGRP_SBECOUNT_COUNT_LSB 0 -#define SDR_CTRLGRP_SBECOUNT_COUNT_MASK 0x000000ff -/* Register template: sdr::ctrlgrp::dbecount */ -#define SDR_CTRLGRP_DBECOUNT_COUNT_LSB 0 -#define SDR_CTRLGRP_DBECOUNT_COUNT_MASK 0x000000ff -/* Register template: sdr::ctrlgrp::staticcfg */ -#define SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB 3 -#define SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK 0x00000008 -#define SDR_CTRLGRP_STATICCFG_USEECCASDATA_LSB 2 -#define SDR_CTRLGRP_STATICCFG_USEECCASDATA_MASK 0x00000004 -#define SDR_CTRLGRP_STATICCFG_MEMBL_LSB 0 -#define SDR_CTRLGRP_STATICCFG_MEMBL_MASK 0x00000003 -/* Register template: sdr::ctrlgrp::ctrlwidth */ -#define SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_LSB 0 -#define SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_MASK 0x00000003 -/* Register template: sdr::ctrlgrp::cportwidth */ -#define SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_LSB 0 -#define SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_MASK 0x000fffff -/* Register template: sdr::ctrlgrp::cportwmap */ -#define SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_LSB 0 -#define SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_MASK 0x3fffffff -/* Register template: sdr::ctrlgrp::cportrmap */ -#define SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_LSB 0 -#define SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_MASK 0x3fffffff -/* Register template: sdr::ctrlgrp::rfifocmap */ -#define SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_LSB 0 -#define SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_MASK 0x00ffffff -/* Register template: sdr::ctrlgrp::wfifocmap */ -#define SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_LSB 0 -#define SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_MASK 0x00ffffff -/* Register template: sdr::ctrlgrp::cportrdwr */ -#define SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_LSB 0 -#define SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_MASK 0x000fffff -/* Register template: sdr::ctrlgrp::portcfg */ -#define SDR_CTRLGRP_PORTCFG_AUTOPCHEN_LSB 10 -#define SDR_CTRLGRP_PORTCFG_AUTOPCHEN_MASK 0x000ffc00 -#define SDR_CTRLGRP_PORTCFG_PORTPROTOCOL_LSB 0 -#define SDR_CTRLGRP_PORTCFG_PORTPROTOCOL_MASK 0x000003ff -/* Register template: sdr::ctrlgrp::fifocfg */ -#define SDR_CTRLGRP_FIFOCFG_INCSYNC_LSB 10 -#define SDR_CTRLGRP_FIFOCFG_INCSYNC_MASK 0x00000400 -#define SDR_CTRLGRP_FIFOCFG_SYNCMODE_LSB 0 -#define SDR_CTRLGRP_FIFOCFG_SYNCMODE_MASK 0x000003ff -/* Register template: sdr::ctrlgrp::mppriority */ -#define SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_LSB 0 -#define SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_MASK 0x3fffffff -/* Wide Register template: sdr::ctrlgrp::mpweight */ -/* Register template: sdr::ctrlgrp::mpweight::mpweight_0 */ -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_LSB 0 -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_MASK 0xffffffff -/* Register template: sdr::ctrlgrp::mpweight::mpweight_1 */ -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_LSB 18 -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_MASK 0xfffc0000 -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_LSB 0 -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_MASK 0x0003ffff -/* Register template: sdr::ctrlgrp::mpweight::mpweight_2 */ -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_LSB 0 -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_MASK 0xffffffff -/* Register template: sdr::ctrlgrp::mpweight::mpweight_3 */ -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_LSB 0 -#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_MASK 0x0003ffff -/* Wide Register template: sdr::ctrlgrp::mppacing */ -/* Register template: sdr::ctrlgrp::mppacing::mppacing_0 */ -#define SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_LSB 0 -#define SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_MASK 0xffffffff -/* Register template: sdr::ctrlgrp::mppacing::mppacing_1 */ -#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_LSB 28 -#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_MASK 0xf0000000 -#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_LSB 0 -#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_MASK 0x0fffffff -/* Register template: sdr::ctrlgrp::mppacing::mppacing_2 */ -#define SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_LSB 0 -#define SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_MASK 0xffffffff -/* Register template: sdr::ctrlgrp::mppacing::mppacing_3 */ -#define SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_LSB 0 -#define SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_MASK 0x00ffffff -/* Wide Register template: sdr::ctrlgrp::mpthresholdrst */ -/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_0 */ -#define \ -SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB 0 -#define \ -SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK \ -0xffffffff -/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_1 */ -#define \ -SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB 0 -#define \ -SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK \ -0xffffffff -/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_2 */ -#define \ -SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB 0 -#define \ -SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK \ -0x0000ffff -/* Register template: sdr::ctrlgrp::remappriority */ -#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_LSB 0 -#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_MASK 0x000000ff -/* Wide Register template: sdr::ctrlgrp::phyctrl */ -/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_0 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_LSB 12 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH 20 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_MASK 0xfffff000 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(x) \ - (((x) << 12) & 0xfffff000) -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_LSB 10 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_MASK 0x00000c00 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(x) \ - (((x) << 10) & 0x00000c00) -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_LSB 9 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_MASK 0x00000200 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(x) \ - (((x) << 9) & 0x00000200) -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_LSB 8 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_MASK 0x00000100 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(x) \ - (((x) << 8) & 0x00000100) -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_LSB 6 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_MASK 0x000000c0 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(x) \ - (((x) << 6) & 0x000000c0) -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_LSB 4 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_MASK 0x00000030 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(x) \ - (((x) << 4) & 0x00000030) -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_LSB 2 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_MASK 0x0000000c -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(x) \ - (((x) << 2) & 0x0000000c) -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_LSB 0 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_MASK 0x00000003 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(x) \ - (((x) << 0) & 0x00000003) -/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_1 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_LSB 12 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH 20 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_MASK 0xfffff000 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(x) \ - (((x) << 12) & 0xfffff000) -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_LSB 0 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_MASK 0x00000fff -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(x) \ - (((x) << 0) & 0x00000fff) -/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_2 */ -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_LSB 0 -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_MASK 0x00000fff -#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(x) \ - (((x) << 0) & 0x00000fff) -/* Register template: sdr::ctrlgrp::dramodt */ -#define SDR_CTRLGRP_DRAMODT_READ_LSB 4 -#define SDR_CTRLGRP_DRAMODT_READ_MASK 0x000000f0 -#define SDR_CTRLGRP_DRAMODT_WRITE_LSB 0 -#define SDR_CTRLGRP_DRAMODT_WRITE_MASK 0x0000000f -/* Register template: sdr::ctrlgrp::fpgaportrst */ -#define SDR_CTRLGRP_FPGAPORTRST_READ_PORT_0_LSB 0 -#define SDR_CTRLGRP_FPGAPORTRST_WRITE_PORT_0_LSB 4 -#define SDR_CTRLGRP_FPGAPORTRST_COMMAND_PORT_0_LSB 8 -/* Field instance: sdr::ctrlgrp::dramsts */ -#define SDR_CTRLGRP_DRAMSTS_DBEERR_MASK 0x00000008 -#define SDR_CTRLGRP_DRAMSTS_SBEERR_MASK 0x00000004 - -#endif /* _SDRAM_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/sdram_config.h b/arch/arm/mach-socfpga/include/mach/sdram_config.h deleted file mode 100644 index 2af797a920..0000000000 --- a/arch/arm/mach-socfpga/include/mach/sdram_config.h +++ /dev/null @@ -1,161 +0,0 @@ -#ifndef __MACH_SDRAM_CONFIG_H -#define __MACH_SDRAM_CONFIG_H - -#include -#include -#include - -static inline void sdram_write(unsigned register_offset, unsigned val) -{ - debug("0x%08x Data 0x%08x\n", - (CYCLONE5_SDR_ADDRESS + register_offset), val); - /* Write to register */ - writel(val, (CYCLONE5_SDR_ADDRESS + register_offset)); -} - -static inline void socfpga_sdram_mmr_init(void) -{ - uint32_t val; - - val = CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMTYPE << SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB | - CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMBL << SDR_CTRLGRP_CTRLCFG_MEMBL_LSB | - CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER << SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB | - CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCEN << SDR_CTRLGRP_CTRLCFG_ECCEN_LSB | - CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCCORREN << SDR_CTRLGRP_CTRLCFG_ECCCORREN_LSB | - CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_REORDEREN << SDR_CTRLGRP_CTRLCFG_REORDEREN_LSB | - CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_STARVELIMIT << SDR_CTRLGRP_CTRLCFG_STARVELIMIT_LSB | - CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_DQSTRKEN << SDR_CTRLGRP_CTRLCFG_DQSTRKEN_LSB | - CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_NODMPINS << SDR_CTRLGRP_CTRLCFG_NODMPINS_LSB; - sdram_write(SDR_CTRLGRP_CTRLCFG_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCWL << SDR_CTRLGRP_DRAMTIMING1_TCWL_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_AL << SDR_CTRLGRP_DRAMTIMING1_TAL_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCL << SDR_CTRLGRP_DRAMTIMING1_TCL_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD << SDR_CTRLGRP_DRAMTIMING1_TRRD_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW << SDR_CTRLGRP_DRAMTIMING1_TFAW_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC << SDR_CTRLGRP_DRAMTIMING1_TRFC_LSB; - sdram_write(SDR_CTRLGRP_DRAMTIMING1_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI << SDR_CTRLGRP_DRAMTIMING2_TREFI_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD << SDR_CTRLGRP_DRAMTIMING2_TRCD_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP << SDR_CTRLGRP_DRAMTIMING2_TRP_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR << SDR_CTRLGRP_DRAMTIMING2_TWR_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR << SDR_CTRLGRP_DRAMTIMING2_TWTR_LSB; - sdram_write(SDR_CTRLGRP_DRAMTIMING2_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP << SDR_CTRLGRP_DRAMTIMING3_TRTP_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS << SDR_CTRLGRP_DRAMTIMING3_TRAS_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC << SDR_CTRLGRP_DRAMTIMING3_TRC_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TMRD << SDR_CTRLGRP_DRAMTIMING3_TMRD_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TCCD << SDR_CTRLGRP_DRAMTIMING3_TCCD_LSB; - sdram_write(SDR_CTRLGRP_DRAMTIMING3_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_SELFRFSHEXIT << SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_PWRDOWNEXIT << SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_LSB; - sdram_write(SDR_CTRLGRP_DRAMTIMING4_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_AUTOPDCYCLES << SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_LSB; - sdram_write(SDR_CTRLGRP_LOWPWRTIMING_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS << SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS << SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS << SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS << SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB; - sdram_write(SDR_CTRLGRP_DRAMADDRW_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMIFWIDTH_IFWIDTH << SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_LSB; - sdram_write(SDR_CTRLGRP_DRAMIFWIDTH_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMDEVWIDTH_DEVWIDTH << SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_LSB; - sdram_write(SDR_CTRLGRP_DRAMDEVWIDTH_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMINTR_INTREN << SDR_CTRLGRP_DRAMINTR_INTREN_LSB; - sdram_write(SDR_CTRLGRP_DRAMINTR_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_STATICCFG_MEMBL << SDR_CTRLGRP_STATICCFG_MEMBL_LSB | - CONFIG_HPS_SDR_CTRLCFG_STATICCFG_USEECCASDATA << SDR_CTRLGRP_STATICCFG_USEECCASDATA_LSB; - sdram_write(SDR_CTRLGRP_STATICCFG_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_CTRLWIDTH_CTRLWIDTH << SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_LSB; - sdram_write(SDR_CTRLGRP_CTRLWIDTH_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_PORTCFG_AUTOPCHEN << SDR_CTRLGRP_PORTCFG_AUTOPCHEN_LSB; - sdram_write(SDR_CTRLGRP_PORTCFG_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_SYNCMODE << SDR_CTRLGRP_FIFOCFG_SYNCMODE_LSB | - CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_INCSYNC << SDR_CTRLGRP_FIFOCFG_INCSYNC_LSB; - sdram_write(SDR_CTRLGRP_FIFOCFG_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPPRIORITY_USERPRIORITY << SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_LSB; - sdram_write(SDR_CTRLGRP_MPPRIORITY_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_0_STATICWEIGHT_31_0 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_LSB; - sdram_write(SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_STATICWEIGHT_49_32 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_LSB | - CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_SUMOFWEIGHT_13_0 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_LSB; - sdram_write(SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_2_SUMOFWEIGHT_45_14 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_LSB; - sdram_write(SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_3_SUMOFWEIGHT_63_46 << SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_LSB; - sdram_write(SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPPACING_0_THRESHOLD1_31_0 << SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_LSB; - sdram_write(SDR_CTRLGRP_MPPACING_MPPACING_0_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD1_59_32 << SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_LSB | - CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0 << - SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_LSB; - sdram_write(SDR_CTRLGRP_MPPACING_MPPACING_1_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4 << SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_LSB; - sdram_write(SDR_CTRLGRP_MPPACING_MPPACING_2_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36 << SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_LSB; - sdram_write(SDR_CTRLGRP_MPPACING_MPPACING_3_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0 << - SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB; - sdram_write(SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32 << - SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB; - sdram_write(SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64 << - SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB; - sdram_write(SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_PHYCTRL_PHYCTRL_0; - sdram_write(SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_CPORTWIDTH_CPORTWIDTH << SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_LSB; - sdram_write(SDR_CTRLGRP_CPORTWIDTH_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_CPORTWMAP_CPORTWMAP << SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_LSB; - sdram_write(SDR_CTRLGRP_CPORTWMAP_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_CPORTRMAP_CPORTRMAP << SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_LSB; - sdram_write(SDR_CTRLGRP_CPORTRMAP_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_RFIFOCMAP_RFIFOCMAP << SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_LSB; - sdram_write(SDR_CTRLGRP_RFIFOCMAP_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_WFIFOCMAP_WFIFOCMAP << SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_LSB; - sdram_write(SDR_CTRLGRP_WFIFOCMAP_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_CPORTRDWR_CPORTRDWR << SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_LSB; - sdram_write(SDR_CTRLGRP_CPORTRDWR_ADDRESS, val); - - val = CONFIG_HPS_SDR_CTRLCFG_DRAMODT_READ << SDR_CTRLGRP_DRAMODT_READ_LSB | - CONFIG_HPS_SDR_CTRLCFG_DRAMODT_WRITE << SDR_CTRLGRP_DRAMODT_WRITE_LSB; - sdram_write(SDR_CTRLGRP_DRAMODT_ADDRESS, val); - - val = readl(CYCLONE5_SDR_ADDRESS + SDR_CTRLGRP_STATICCFG_ADDRESS); - val &= ~(SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK); - val |= 1 << SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB; - writel(val, (CYCLONE5_SDR_ADDRESS + SDR_CTRLGRP_STATICCFG_ADDRESS)); -} -#endif /* __MACH_SDRAM_CONFIG_H */ diff --git a/arch/arm/mach-socfpga/include/mach/sdram_io.h b/arch/arm/mach-socfpga/include/mach/sdram_io.h old mode 100755 new mode 100644 index 62698000f6..ef87bdaf63 --- a/arch/arm/mach-socfpga/include/mach/sdram_io.h +++ b/arch/arm/mach-socfpga/include/mach/sdram_io.h @@ -26,7 +26,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include +#include #define MGR_SELECT_MASK 0xf8000 diff --git a/arch/arm/mach-socfpga/include/mach/sequencer.c b/arch/arm/mach-socfpga/include/mach/sequencer.c deleted file mode 100644 index d2338e6406..0000000000 --- a/arch/arm/mach-socfpga/include/mach/sequencer.c +++ /dev/null @@ -1,5243 +0,0 @@ -/* -* Copyright Altera Corporation (C) 2012-2014. All rights reserved -* -* SPDX-License-Identifier: BSD-3-Clause -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in the -* documentation and/or other materials provided with the distribution. -* * Neither the name of Altera Corporation nor the -* names of its contributors may be used to endorse or promote products -* derived from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -* DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY -* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#include "sequencer_defines.h" - -#include "system.h" -#include "sdram_io.h" -#include "sequencer.h" -#include "tclrpt.h" - -/****************************************************************************** - ****************************************************************************** - ** NOTE: Special Rules for Globale Variables ** - ** ** - ** All global variables that are explicitly initialized (including ** - ** explicitly initialized to zero), are only initialized once, during ** - ** configuration time, and not again on reset. This means that they ** - ** preserve their current contents across resets, which is needed for some ** - ** special cases involving communication with external modules. In ** - ** addition, this avoids paying the price to have the memory initialized, ** - ** even for zeroed data, provided it is explicitly set to zero in the code, ** - ** and doesn't rely on implicit initialization. ** - ****************************************************************************** - ******************************************************************************/ - -#ifndef ARMCOMPILER - -// Temporary workaround to place the initial stack pointer at a safe offset from end -#define STRINGIFY(s) STRINGIFY_STR(s) -#define STRINGIFY_STR(s) #s -asm(".global __alt_stack_pointer"); -asm("__alt_stack_pointer = " STRINGIFY(STACK_POINTER)); -#endif - -#include - -#define NEWVERSION_RDDESKEW 1 -#define NEWVERSION_WRDESKEW 1 -#define NEWVERSION_GW 1 -#define NEWVERSION_WL 1 -#define NEWVERSION_DQSEN 1 - -// Just to make the debugging code more uniform - -#define HALF_RATE_MODE 0 - -#define QUARTER_RATE_MODE 0 -#define DELTA_D 1 - -// case:56390 -// VFIFO_CONTROL_WIDTH_PER_DQS is the number of VFIFOs actually instantiated per DQS. This is always one except: -// AV QDRII where it is 2 for x18 and x18w2, and 4 for x36 and x36w2 -// RLDRAMII x36 and x36w2 where it is 2. -// In 12.0sp1 we set this to 4 for all of the special cases above to keep it simple. -// In 12.0sp2 or 12.1 this should get moved to generation and unified with the same constant used in the phy mgr - -#define VFIFO_CONTROL_WIDTH_PER_DQS 1 - -// In order to reduce ROM size, most of the selectable calibration steps are -// decided at compile time based on the user's calibration mode selection, -// as captured by the STATIC_CALIB_STEPS selection below. -// -// However, to support simulation-time selection of fast simulation mode, where -// we skip everything except the bare minimum, we need a few of the steps to -// be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the -// check, which is based on the rtl-supplied value, or we dynamically compute the -// value to use based on the dynamically-chosen calibration mode - -#define BTFLD_FMT "%lx" - -// For HPS running on actual hardware - -#define DLEVEL 0 -#ifdef HPS_HW_SERIAL_SUPPORT -// space around comma is required for varargs macro to remove comma if args is empty -#define DPRINT(level, fmt, args...) if (DLEVEL >= (level)) printf("SEQ.C: " fmt "\n" , ## args) -#define IPRINT(fmt, args...) printf("SEQ.C: " fmt "\n" , ## args) -#else -#define DPRINT(level, fmt, args...) -#define IPRINT(fmt, args...) -#endif -#define BFM_GBL_SET(field,value) -#define BFM_GBL_GET(field) ((long unsigned int)0) -#define BFM_STAGE(stage) -#define BFM_INC_VFIFO -#define COV(label) - -#define TRACE_FUNC(fmt, args...) DPRINT(1, "%s[%d]: " fmt, __func__, __LINE__ , ## args) - -#define DYNAMIC_CALIB_STEPS (dyn_calib_steps) - -#define STATIC_IN_RTL_SIM 0 - -#define STATIC_SKIP_DELAY_LOOPS 0 - -#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | STATIC_SKIP_DELAY_LOOPS) - -// calibration steps requested by the rtl -static uint16_t dyn_calib_steps = 0; - -// To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option -// instead of static, we use boolean logic to select between -// non-skip and skip values -// -// The mask is set to include all bits when not-skipping, but is -// zero when skipping - -static uint16_t skip_delay_mask = 0; // mask off bits when skipping/not-skipping - -#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ - ((non_skip_value) & skip_delay_mask) - -// TODO: The skip group strategy is completely missing - -static gbl_t *gbl = 0; -static param_t *param = 0; - -static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, uint32_t write_group, - uint32_t use_dm, uint32_t all_correct, - t_btfld * bit_chk, uint32_t all_ranks); - -// This (TEST_SIZE) is used to test handling of large roms, to make -// sure we are sizing things correctly -// Note, the initialized data takes up twice the space in rom, since -// there needs to be a copy with the initial value and a copy that is -// written too, since on soft-reset, it needs to have the initial values -// without reloading the memory from external sources - -// #define TEST_SIZE (6*1024) - -#ifdef TEST_SIZE - -#define PRE_POST_TEST_SIZE 3 - -static unsigned int pre_test_size_mem[PRE_POST_TEST_SIZE] = { 1, 2, 3 }; - -static unsigned int test_size_mem[TEST_SIZE / sizeof(unsigned int)] = { 100, 200, 300 }; - -static unsigned int post_test_size_mem[PRE_POST_TEST_SIZE] = { 10, 20, 30 }; - -static void write_test_mem(void) -{ - int i; - - for (i = 0; i < PRE_POST_TEST_SIZE; i++) { - pre_test_size_mem[i] = (i + 1) * 10; - post_test_size_mem[i] = (i + 1); - } - - for (i = 0; i < sizeof(test_size_mem) / sizeof(unsigned int); i++) { - test_size_mem[i] = i; - } - -} - -static int check_test_mem(int start) -{ - int i; - - for (i = 0; i < PRE_POST_TEST_SIZE; i++) { - if (start) { - if (pre_test_size_mem[i] != (i + 1)) { - return 0; - } - if (post_test_size_mem[i] != (i + 1) * 10) { - return 0; - } - } else { - if (pre_test_size_mem[i] != (i + 1) * 10) { - return 0; - } - if (post_test_size_mem[i] != (i + 1)) { - return 0; - } - } - } - - for (i = 0; i < sizeof(test_size_mem) / sizeof(unsigned int); i++) { - if (start) { - if (i < 3) { - if (test_size_mem[i] != (i + 1) * 100) { - return 0; - } - } else { - if (test_size_mem[i] != 0) { - return 0; - } - } - } else { - if (test_size_mem[i] != i) { - return 0; - } - } - } - - return 1; -} - -#endif // TEST_SIZE - -static void set_failing_group_stage(uint32_t group, uint32_t stage, uint32_t substage) -{ - if (gbl->error_stage == CAL_STAGE_NIL) { - gbl->error_substage = substage; - gbl->error_stage = stage; - gbl->error_group = group; - - } - -} - -static inline void reg_file_set_group(uint32_t set_group) -{ - // Read the current group and stage - uint32_t cur_stage_group = IORD_32DIRECT(REG_FILE_CUR_STAGE, 0); - - // Clear the group - cur_stage_group &= 0x0000FFFF; - - // Set the group - cur_stage_group |= (set_group << 16); - - // Write the data back - IOWR_32DIRECT(REG_FILE_CUR_STAGE, 0, cur_stage_group); -} - -static inline void reg_file_set_stage(uint32_t set_stage) -{ - // Read the current group and stage - uint32_t cur_stage_group = IORD_32DIRECT(REG_FILE_CUR_STAGE, 0); - - // Clear the stage and substage - cur_stage_group &= 0xFFFF0000; - - // Set the stage - cur_stage_group |= (set_stage & 0x000000FF); - - // Write the data back - IOWR_32DIRECT(REG_FILE_CUR_STAGE, 0, cur_stage_group); -} - -static inline void reg_file_set_sub_stage(uint32_t set_sub_stage) -{ - // Read the current group and stage - uint32_t cur_stage_group = IORD_32DIRECT(REG_FILE_CUR_STAGE, 0); - - // Clear the substage - cur_stage_group &= 0xFFFF00FF; - - // Set the sub stage - cur_stage_group |= ((set_sub_stage << 8) & 0x0000FF00); - - // Write the data back - IOWR_32DIRECT(REG_FILE_CUR_STAGE, 0, cur_stage_group); -} - -static inline uint32_t is_write_group_enabled_for_dm(uint32_t write_group) -{ - return 1; -} - -static inline void select_curr_shadow_reg_using_rank(uint32_t rank) -{ -} - -static void initialize(void) -{ - IOWR_32DIRECT(PHY_MGR_MUX_SEL, 0, 0x3); - - //USER memory clock is not stable we begin initialization - - IOWR_32DIRECT(PHY_MGR_RESET_MEM_STBL, 0, 0); - - //USER calibration status all set to zero - - IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, 0); - IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, 0); - - if (((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) { - param->read_correct_mask_vg = - ((t_btfld) 1 << - (RW_MGR_MEM_DQ_PER_READ_DQS / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; - param->write_correct_mask_vg = - ((t_btfld) 1 << - (RW_MGR_MEM_DQ_PER_READ_DQS / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1; - param->read_correct_mask = ((t_btfld) 1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1; - param->write_correct_mask = ((t_btfld) 1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1; - param->dm_correct_mask = - ((t_btfld) 1 << (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH)) - 1; - } -} - -static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode) -{ - uint32_t odt_mask_0 = 0; - uint32_t odt_mask_1 = 0; - uint32_t cs_and_odt_mask; - - if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) { - - if (LRDIMM) { - // USER LRDIMMs have two cases to consider: single-slot and dual-slot. - // USER In single-slot, assert ODT for write only. - // USER In dual-slot, assert ODT for both slots for write, - // USER and on the opposite slot only for reads. - // USER - // USER Further complicating this is that both DIMMs have either 1 or 2 ODT - // USER inputs, which do the same thing (only one is actually required). - if ((RW_MGR_MEM_CHIP_SELECT_WIDTH / RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM) == 1) { - // USER Single-slot case - if (RW_MGR_MEM_ODT_WIDTH == 1) { - // USER Read = 0, Write = 1 - odt_mask_0 = 0x0; - odt_mask_1 = 0x1; - } else if (RW_MGR_MEM_ODT_WIDTH == 2) { - // USER Read = 00, Write = 11 - odt_mask_0 = 0x0; - odt_mask_1 = 0x3; - } - } else if ((RW_MGR_MEM_CHIP_SELECT_WIDTH / RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM) - == 2) { - // USER Dual-slot case - if (RW_MGR_MEM_ODT_WIDTH == 2) { - // USER Read: asserted for opposite slot, Write: asserted for both - odt_mask_0 = (rank < 2) ? 0x2 : 0x1; - odt_mask_1 = 0x3; - } else if (RW_MGR_MEM_ODT_WIDTH == 4) { - // USER Read: asserted for opposite slot, Write: asserted for both - odt_mask_0 = (rank < 2) ? 0xC : 0x3; - odt_mask_1 = 0xF; - } - } - } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) { - //USER 1 Rank - //USER Read: ODT = 0 - //USER Write: ODT = 1 - odt_mask_0 = 0x0; - odt_mask_1 = 0x1; - } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) { - //USER 2 Ranks - if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1 || - (RDIMM && RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 2 - && RW_MGR_MEM_CHIP_SELECT_WIDTH == 4)) { - //USER - Dual-Slot , Single-Rank (1 chip-select per DIMM) - //USER OR - //USER - RDIMM, 4 total CS (2 CS per DIMM) means 2 DIMM - //USER Since MEM_NUMBER_OF_RANKS is 2 they are both single rank - //USER with 2 CS each (special for RDIMM) - //USER Read: Turn on ODT on the opposite rank - //USER Write: Turn on ODT on all ranks - odt_mask_0 = 0x3 & ~(1 << rank); - odt_mask_1 = 0x3; - } else { - //USER - Single-Slot , Dual-rank DIMMs (2 chip-selects per DIMM) - //USER Read: Turn on ODT off on all ranks - //USER Write: Turn on ODT on active rank - odt_mask_0 = 0x0; - odt_mask_1 = 0x3 & (1 << rank); - } - } else { - //USER 4 Ranks - //USER Read: - //USER ----------+-----------------------+ - //USER | | - //USER | ODT | - //USER Read From +-----------------------+ - //USER Rank | 3 | 2 | 1 | 0 | - //USER ----------+-----+-----+-----+-----+ - //USER 0 | 0 | 1 | 0 | 0 | - //USER 1 | 1 | 0 | 0 | 0 | - //USER 2 | 0 | 0 | 0 | 1 | - //USER 3 | 0 | 0 | 1 | 0 | - //USER ----------+-----+-----+-----+-----+ - //USER - //USER Write: - //USER ----------+-----------------------+ - //USER | | - //USER | ODT | - //USER Write To +-----------------------+ - //USER Rank | 3 | 2 | 1 | 0 | - //USER ----------+-----+-----+-----+-----+ - //USER 0 | 0 | 1 | 0 | 1 | - //USER 1 | 1 | 0 | 1 | 0 | - //USER 2 | 0 | 1 | 0 | 1 | - //USER 3 | 1 | 0 | 1 | 0 | - //USER ----------+-----+-----+-----+-----+ - switch (rank) { - case 0: - odt_mask_0 = 0x4; - odt_mask_1 = 0x5; - break; - case 1: - odt_mask_0 = 0x8; - odt_mask_1 = 0xA; - break; - case 2: - odt_mask_0 = 0x1; - odt_mask_1 = 0x5; - break; - case 3: - odt_mask_0 = 0x2; - odt_mask_1 = 0xA; - break; - } - } - } else { - odt_mask_0 = 0x0; - odt_mask_1 = 0x0; - } - - if (RDIMM && RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 2 - && RW_MGR_MEM_CHIP_SELECT_WIDTH == 4 && RW_MGR_MEM_NUMBER_OF_RANKS == 2) { - //USER See RDIMM special case above - cs_and_odt_mask = - (0xFF & ~(1 << (2 * rank))) | - ((0xFF & odt_mask_0) << 8) | ((0xFF & odt_mask_1) << 16); - } else if (LRDIMM) { - } else { - cs_and_odt_mask = - (0xFF & ~(1 << rank)) | - ((0xFF & odt_mask_0) << 8) | ((0xFF & odt_mask_1) << 16); - } - - IOWR_32DIRECT(RW_MGR_SET_CS_AND_ODT_MASK, 0, cs_and_odt_mask); -} - -//USER Given a rank, select the set of shadow registers that is responsible for the -//USER delays of such rank, so that subsequent SCC updates will go to those shadow -//USER registers. -static void select_shadow_regs_for_update(uint32_t rank, uint32_t group, - uint32_t update_scan_chains) -{ -} - -static void scc_mgr_initialize(void) -{ - // Clear register file for HPS - // 16 (2^4) is the size of the full register file in the scc mgr: - // RFILE_DEPTH = log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS + MEM_IF_READ_DQS_WIDTH - 1) + 1; - uint32_t i; - for (i = 0; i < 16; i++) { - DPRINT(1, "Clearing SCC RFILE index %lu", i); - IOWR_32DIRECT(SCC_MGR_HHP_RFILE, i << 2, 0); - } -} - -static inline void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay) -{ - WRITE_SCC_DQS_IN_DELAY(read_group, delay); - -} - -static inline void scc_mgr_set_dqs_io_in_delay(uint32_t write_group, uint32_t delay) -{ - WRITE_SCC_DQS_IO_IN_DELAY(delay); - -} - -static inline void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase) -{ - WRITE_SCC_DQS_EN_PHASE(read_group, phase); - -} - -static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group, uint32_t phase) -{ - uint32_t r; - uint32_t update_scan_chains; - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - //USER although the h/w doesn't support different phases per shadow register, - //USER for simplicity our scc manager modeling keeps different phase settings per - //USER shadow reg, and it's important for us to keep them in sync to match h/w. - //USER for efficiency, the scan chain update should occur only once to sr0. - update_scan_chains = (r == 0) ? 1 : 0; - - select_shadow_regs_for_update(r, read_group, update_scan_chains); - scc_mgr_set_dqs_en_phase(read_group, phase); - - if (update_scan_chains) { - IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, read_group); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } - } -} - -static inline void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase) -{ - WRITE_SCC_DQDQS_OUT_PHASE(write_group, phase); - -} - -static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group, uint32_t phase) -{ - uint32_t r; - uint32_t update_scan_chains; - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - //USER although the h/w doesn't support different phases per shadow register, - //USER for simplicity our scc manager modeling keeps different phase settings per - //USER shadow reg, and it's important for us to keep them in sync to match h/w. - //USER for efficiency, the scan chain update should occur only once to sr0. - update_scan_chains = (r == 0) ? 1 : 0; - - select_shadow_regs_for_update(r, write_group, update_scan_chains); - scc_mgr_set_dqdqs_output_phase(write_group, phase); - - if (update_scan_chains) { - IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, write_group); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } - } -} - -static inline void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay) -{ - WRITE_SCC_DQS_EN_DELAY(read_group, delay); - -} - -static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group, uint32_t delay) -{ - uint32_t r; - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - - select_shadow_regs_for_update(r, read_group, 0); - - scc_mgr_set_dqs_en_delay(read_group, delay); - - IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, read_group); - - // In shadow register mode, the T11 settings are stored in registers - // in the core, which are updated by the DQS_ENA signals. Not issuing - // the SCC_MGR_UPD command allows us to save lots of rank switching - // overhead, by calling select_shadow_regs_for_update with update_scan_chains - // set to 0. - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } -} - -static void scc_mgr_set_oct_out1_delay(uint32_t write_group, uint32_t delay) -{ - uint32_t read_group; - - // Load the setting in the SCC manager - // Although OCT affects only write data, the OCT delay is controlled by the DQS logic block - // which is instantiated once per read group. For protocols where a write group consists - // of multiple read groups, the setting must be set multiple times. - for (read_group = - write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; - read_group < - (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; - ++read_group) { - - WRITE_SCC_OCT_OUT1_DELAY(read_group, delay); - } - -} - -static void scc_mgr_set_oct_out2_delay(uint32_t write_group, uint32_t delay) -{ - uint32_t read_group; - - // Load the setting in the SCC manager - // Although OCT affects only write data, the OCT delay is controlled by the DQS logic block - // which is instantiated once per read group. For protocols where a write group consists - // of multiple read groups, the setting must be set multiple times. - for (read_group = - write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; - read_group < - (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; - ++read_group) { - - WRITE_SCC_OCT_OUT2_DELAY(read_group, delay); - } - -} - -static inline void scc_mgr_set_dqs_bypass(uint32_t write_group, uint32_t bypass) -{ - // Load the setting in the SCC manager - WRITE_SCC_DQS_BYPASS(write_group, bypass); -} - -static inline void scc_mgr_set_dq_out1_delay(uint32_t write_group, uint32_t dq_in_group, - uint32_t delay) -{ - - // Load the setting in the SCC manager - WRITE_SCC_DQ_OUT1_DELAY(dq_in_group, delay); - -} - -static inline void scc_mgr_set_dq_out2_delay(uint32_t write_group, uint32_t dq_in_group, - uint32_t delay) -{ - - // Load the setting in the SCC manager - WRITE_SCC_DQ_OUT2_DELAY(dq_in_group, delay); - -} - -static inline void scc_mgr_set_dq_in_delay(uint32_t write_group, uint32_t dq_in_group, - uint32_t delay) -{ - - // Load the setting in the SCC manager - WRITE_SCC_DQ_IN_DELAY(dq_in_group, delay); - -} - -static inline void scc_mgr_set_dq_bypass(uint32_t write_group, uint32_t dq_in_group, - uint32_t bypass) -{ - // Load the setting in the SCC manager - WRITE_SCC_DQ_BYPASS(dq_in_group, bypass); -} - -static inline void scc_mgr_set_rfifo_mode(uint32_t write_group, uint32_t dq_in_group, uint32_t mode) -{ - // Load the setting in the SCC manager - WRITE_SCC_RFIFO_MODE(dq_in_group, mode); -} - -static inline void scc_mgr_set_hhp_extras(void) -{ - // Load the fixed setting in the SCC manager - // bits: 0:0 = 1'b1 - dqs bypass - // bits: 1:1 = 1'b1 - dq bypass - // bits: 4:2 = 3'b001 - rfifo_mode - // bits: 6:5 = 2'b01 - rfifo clock_select - // bits: 7:7 = 1'b0 - separate gating from ungating setting - // bits: 8:8 = 1'b0 - separate OE from Output delay setting - uint32_t value = (0 << 8) | (0 << 7) | (1 << 5) | (1 << 2) | (1 << 1) | (1 << 0); - WRITE_SCC_HHP_EXTRAS(value); -} - -static inline void scc_mgr_set_hhp_dqse_map(void) -{ - // Load the fixed setting in the SCC manager - WRITE_SCC_HHP_DQSE_MAP(0); -} - -static inline void scc_mgr_set_dqs_out1_delay(uint32_t write_group, uint32_t delay) -{ - WRITE_SCC_DQS_IO_OUT1_DELAY(delay); - -} - -static inline void scc_mgr_set_dqs_out2_delay(uint32_t write_group, uint32_t delay) -{ - WRITE_SCC_DQS_IO_OUT2_DELAY(delay); - -} - -static inline void scc_mgr_set_dm_out1_delay(uint32_t write_group, uint32_t dm, uint32_t delay) -{ - WRITE_SCC_DM_IO_OUT1_DELAY(dm, delay); -} - -static inline void scc_mgr_set_dm_out2_delay(uint32_t write_group, uint32_t dm, uint32_t delay) -{ - WRITE_SCC_DM_IO_OUT2_DELAY(dm, delay); -} - -static inline void scc_mgr_set_dm_in_delay(uint32_t write_group, uint32_t dm, uint32_t delay) -{ - WRITE_SCC_DM_IO_IN_DELAY(dm, delay); -} - -static inline void scc_mgr_set_dm_bypass(uint32_t write_group, uint32_t dm, uint32_t bypass) -{ - // Load the setting in the SCC manager - WRITE_SCC_DM_BYPASS(dm, bypass); -} - -//USER Zero all DQS config -// TODO: maybe rename to scc_mgr_zero_dqs_config (or something) -static void scc_mgr_zero_all(void) -{ - uint32_t i, r; - - //USER Zero all DQS config settings, across all groups and all shadow registers - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - - // Strictly speaking this should be called once per group to make - // sure each group's delay chain is refreshed from the SCC register file, - // but since we're resetting all delay chains anyway, we can save some - // runtime by calling select_shadow_regs_for_update just once to switch - // rank. - select_shadow_regs_for_update(r, 0, 1); - - for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { - // The phases actually don't exist on a per-rank basis, but there's - // no harm updating them several times, so let's keep the code simple. - scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE); - scc_mgr_set_dqs_en_phase(i, 0); - scc_mgr_set_dqs_en_delay(i, 0); - } - - for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { - scc_mgr_set_dqdqs_output_phase(i, 0); - // av/cv don't have out2 - scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE); - } - - //USER multicast to all DQS group enables - IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, 0xff); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } -} - -static void scc_set_bypass_mode(uint32_t write_group, uint32_t mode) -{ - // mode = 0 : Do NOT bypass - Half Rate Mode - // mode = 1 : Bypass - Full Rate Mode - - // only need to set once for all groups, pins, dq, dqs, dm - if (write_group == 0) { - DPRINT(1, "Setting HHP Extras"); - scc_mgr_set_hhp_extras(); - DPRINT(1, "Done Setting HHP Extras"); - } - - //USER multicast to all DQ enables - IOWR_32DIRECT(SCC_MGR_DQ_ENA, 0, 0xff); - - IOWR_32DIRECT(SCC_MGR_DM_ENA, 0, 0xff); - - //USER update current DQS IO enable - IOWR_32DIRECT(SCC_MGR_DQS_IO_ENA, 0, 0); - - //USER update the DQS logic - IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, write_group); - - //USER hit update - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); -} - -// Moving up to avoid warnings -static void scc_mgr_load_dqs_for_write_group(uint32_t write_group) -{ - uint32_t read_group; - - // Although OCT affects only write data, the OCT delay is controlled by the DQS logic block - // which is instantiated once per read group. For protocols where a write group consists - // of multiple read groups, the setting must be scanned multiple times. - for (read_group = - write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; - read_group < - (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; - ++read_group) { - - IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, read_group); - } -} - -static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin, int32_t out_only) -{ - uint32_t i, r; - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - - select_shadow_regs_for_update(r, write_group, 1); - - //USER Zero all DQ config settings - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - scc_mgr_set_dq_out1_delay(write_group, i, 0); - scc_mgr_set_dq_out2_delay(write_group, i, IO_DQ_OUT_RESERVE); - if (!out_only) { - scc_mgr_set_dq_in_delay(write_group, i, 0); - } - } - - //USER multicast to all DQ enables - IOWR_32DIRECT(SCC_MGR_DQ_ENA, 0, 0xff); - - //USER Zero all DM config settings - for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { - if (!out_only) { - // Do we really need this? - scc_mgr_set_dm_in_delay(write_group, i, 0); - } - scc_mgr_set_dm_out1_delay(write_group, i, 0); - scc_mgr_set_dm_out2_delay(write_group, i, IO_DM_OUT_RESERVE); - } - - //USER multicast to all DM enables - IOWR_32DIRECT(SCC_MGR_DM_ENA, 0, 0xff); - - //USER zero all DQS io settings - if (!out_only) { - scc_mgr_set_dqs_io_in_delay(write_group, 0); - } - // av/cv don't have out2 - scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE); - scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE); - scc_mgr_load_dqs_for_write_group(write_group); - - //USER multicast to all DQS IO enables (only 1) - IOWR_32DIRECT(SCC_MGR_DQS_IO_ENA, 0, 0); - - //USER hit update to zero everything - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } -} - -//USER load up dqs config settings - -static void scc_mgr_load_dqs(uint32_t dqs) -{ - IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, dqs); -} - -//USER load up dqs io config settings - -static void scc_mgr_load_dqs_io(void) -{ - IOWR_32DIRECT(SCC_MGR_DQS_IO_ENA, 0, 0); -} - -//USER load up dq config settings - -static void scc_mgr_load_dq(uint32_t dq_in_group) -{ - IOWR_32DIRECT(SCC_MGR_DQ_ENA, 0, dq_in_group); -} - -//USER load up dm config settings - -static void scc_mgr_load_dm(uint32_t dm) -{ - IOWR_32DIRECT(SCC_MGR_DM_ENA, 0, dm); -} - -//USER apply and load a particular input delay for the DQ pins in a group -//USER group_bgn is the index of the first dq pin (in the write group) - -static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group, uint32_t group_bgn, - uint32_t delay) -{ - uint32_t i, p; - - for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { - scc_mgr_set_dq_in_delay(write_group, p, delay); - scc_mgr_load_dq(p); - } -} - -//USER apply and load a particular output delay for the DQ pins in a group - -static void scc_mgr_apply_group_dq_out1_delay(uint32_t write_group, uint32_t group_bgn, - uint32_t delay1) -{ - uint32_t i, p; - - for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { - scc_mgr_set_dq_out1_delay(write_group, i, delay1); - scc_mgr_load_dq(i); - } -} - -//USER apply and load a particular output delay for the DM pins in a group - -static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group, uint32_t delay1) -{ - uint32_t i; - - for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { - scc_mgr_set_dm_out1_delay(write_group, i, delay1); - scc_mgr_load_dm(i); - } -} - -//USER apply and load delay on both DQS and OCT out1 -static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group, uint32_t delay) -{ - scc_mgr_set_dqs_out1_delay(write_group, delay); - scc_mgr_load_dqs_io(); - - scc_mgr_set_oct_out1_delay(write_group, delay); - scc_mgr_load_dqs_for_write_group(write_group); -} - -//USER set delay on both DQS and OCT out1 by incrementally changing -//USER the settings one dtap at a time towards the target value, to avoid -//USER breaking the lock of the DLL/PLL on the memory device. -static void scc_mgr_set_group_dqs_io_and_oct_out1_gradual(uint32_t write_group, uint32_t delay) -{ - uint32_t d = READ_SCC_DQS_IO_OUT1_DELAY(); - - while (d > delay) { - --d; - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - if (QDRII) { - rw_mgr_mem_dll_lock_wait(); - } - } - while (d < delay) { - ++d; - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - if (QDRII) { - rw_mgr_mem_dll_lock_wait(); - } - } -} - -//USER apply a delay to the entire output side: DQ, DM, DQS, OCT - -static void scc_mgr_apply_group_all_out_delay(uint32_t write_group, uint32_t group_bgn, - uint32_t delay) -{ - //USER dq shift - - scc_mgr_apply_group_dq_out1_delay(write_group, group_bgn, delay); - - //USER dm shift - - scc_mgr_apply_group_dm_out1_delay(write_group, delay); - - //USER dqs and oct shift - - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, delay); -} - -//USER apply a delay to the entire output side (DQ, DM, DQS, OCT) and to all ranks -static void scc_mgr_apply_group_all_out_delay_all_ranks(uint32_t write_group, uint32_t group_bgn, - uint32_t delay) -{ - uint32_t r; - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - - select_shadow_regs_for_update(r, write_group, 1); - - scc_mgr_apply_group_all_out_delay(write_group, group_bgn, delay); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } -} - -//USER apply a delay to the entire output side: DQ, DM, DQS, OCT - -static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group, uint32_t group_bgn, - uint32_t delay) -{ - uint32_t i, p, new_delay; - - //USER dq shift - - for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { - - new_delay = READ_SCC_DQ_OUT2_DELAY(i); - new_delay += delay; - - if (new_delay > IO_IO_OUT2_DELAY_MAX) { - DPRINT(1, "%s(%lu, %lu, %lu) DQ[%lu,%lu]: %lu > %lu => %lu", - __func__, write_group, group_bgn, delay, i, p, - new_delay, (long unsigned int)IO_IO_OUT2_DELAY_MAX, - (long unsigned int)IO_IO_OUT2_DELAY_MAX); - new_delay = IO_IO_OUT2_DELAY_MAX; - } - - scc_mgr_set_dq_out2_delay(write_group, i, new_delay); - scc_mgr_load_dq(i); - } - - //USER dm shift - - for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { - new_delay = READ_SCC_DM_IO_OUT2_DELAY(i); - new_delay += delay; - - if (new_delay > IO_IO_OUT2_DELAY_MAX) { - DPRINT(1, "%s(%lu, %lu, %lu) DM[%lu]: %lu > %lu => %lu", - __func__, write_group, group_bgn, delay, i, - new_delay, (long unsigned int)IO_IO_OUT2_DELAY_MAX, - (long unsigned int)IO_IO_OUT2_DELAY_MAX); - new_delay = IO_IO_OUT2_DELAY_MAX; - } - - scc_mgr_set_dm_out2_delay(write_group, i, new_delay); - scc_mgr_load_dm(i); - } - - //USER dqs shift - - new_delay = READ_SCC_DQS_IO_OUT2_DELAY(); - new_delay += delay; - - if (new_delay > IO_IO_OUT2_DELAY_MAX) { - DPRINT(1, "%s(%lu, %lu, %lu) DQS: %lu > %d => %d; adding %lu to OUT1", - __func__, write_group, group_bgn, delay, - new_delay, IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX, - new_delay - IO_IO_OUT2_DELAY_MAX); - scc_mgr_set_dqs_out1_delay(write_group, new_delay - IO_IO_OUT2_DELAY_MAX); - new_delay = IO_IO_OUT2_DELAY_MAX; - } - - scc_mgr_set_dqs_out2_delay(write_group, new_delay); - scc_mgr_load_dqs_io(); - - //USER oct shift - - new_delay = READ_SCC_OCT_OUT2_DELAY(write_group); - new_delay += delay; - - if (new_delay > IO_IO_OUT2_DELAY_MAX) { - DPRINT(1, "%s(%lu, %lu, %lu) DQS: %lu > %d => %d; adding %lu to OUT1", - __func__, write_group, group_bgn, delay, - new_delay, IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX, - new_delay - IO_IO_OUT2_DELAY_MAX); - scc_mgr_set_oct_out1_delay(write_group, new_delay - IO_IO_OUT2_DELAY_MAX); - new_delay = IO_IO_OUT2_DELAY_MAX; - } - - scc_mgr_set_oct_out2_delay(write_group, new_delay); - scc_mgr_load_dqs_for_write_group(write_group); -} - -//USER apply a delay to the entire output side (DQ, DM, DQS, OCT) and to all ranks -static void scc_mgr_apply_group_all_out_delay_add_all_ranks(uint32_t write_group, - uint32_t group_bgn, uint32_t delay) -{ - uint32_t r; - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - - select_shadow_regs_for_update(r, write_group, 1); - - scc_mgr_apply_group_all_out_delay_add(write_group, group_bgn, delay); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } -} - -static inline void scc_mgr_spread_out2_delay_all_ranks(uint32_t write_group, uint32_t test_bgn) -{ -} - -// optimization used to recover some slots in ddr3 inst_rom -// could be applied to other protocols if we wanted to -static void set_jump_as_return(void) -{ - // to save space, we replace return with jump to special shared RETURN instruction - // so we set the counter to large value so that we always jump - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0xFF); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_RETURN); - -} - -// should always use constants as argument to ensure all computations are performed at compile time -static inline void delay_for_n_mem_clocks(const uint32_t clocks) -{ - uint32_t afi_clocks; - uint8_t inner; - uint8_t outer; - uint16_t c_loop; - - afi_clocks = (clocks + AFI_RATE_RATIO - 1) / AFI_RATE_RATIO; /* scale (rounding up) to get afi clocks */ - - // Note, we don't bother accounting for being off a little bit because of a few extra instructions in outer loops - // Note, the loops have a test at the end, and do the test before the decrement, and so always perform the loop - // 1 time more than the counter value - if (afi_clocks == 0) { - inner = outer = c_loop = 0; - } else if (afi_clocks <= 0x100) { - inner = afi_clocks - 1; - outer = 0; - c_loop = 0; - } else if (afi_clocks <= 0x10000) { - inner = 0xff; - outer = (afi_clocks - 1) >> 8; - c_loop = 0; - } else { - inner = 0xff; - outer = 0xff; - c_loop = (afi_clocks - 1) >> 16; - } - - // rom instructions are structured as follows: - // - // IDLE_LOOP2: jnz cntr0, TARGET_A - // IDLE_LOOP1: jnz cntr1, TARGET_B - // return - // - // so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and TARGET_B is - // set to IDLE_LOOP2 as well - // - // if we have no outer loop, though, then we can use IDLE_LOOP1 only, and set - // TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely - // - // a little confusing, but it helps save precious space in the inst_rom and sequencer rom - // and keeps the delays more accurate and reduces overhead - if (afi_clocks <= 0x100) { - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner)); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_IDLE_LOOP1); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_IDLE_LOOP1); - - } else { - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner)); - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer)); - - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_IDLE_LOOP2); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_IDLE_LOOP2); - - // hack to get around compiler not being smart enough - if (afi_clocks <= 0x10000) { - // only need to run once - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_IDLE_LOOP2); - } else { - do { - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_IDLE_LOOP2); - } while (c_loop-- != 0); - } - } -} - -// should always use constants as argument to ensure all computations are performed at compile time -static inline void delay_for_n_ns(const uint32_t nanoseconds) -{ - delay_for_n_mem_clocks((1000 * nanoseconds) / (1000000 / AFI_CLK_FREQ) * AFI_RATE_RATIO); -} - -// Special routine to recover memory device from illegal state after -// ck/dqs relationship is violated. -static inline void recover_mem_device_after_ck_dqs_violation(void) -{ - // Current protocol doesn't require any special recovery -} - -static void rw_mgr_rdimm_initialize(void) -{ -} - -static void rw_mgr_mem_initialize(void) -{ - uint32_t r; - - //USER The reset / cke part of initialization is broadcasted to all ranks - IOWR_32DIRECT(RW_MGR_SET_CS_AND_ODT_MASK, 0, RW_MGR_RANK_ALL); - - // Here's how you load register for a loop - //USER Counters are located @ 0x800 - //USER Jump address are located @ 0xC00 - //USER For both, registers 0 to 3 are selected using bits 3 and 2, like in - //USER 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C - // I know this ain't pretty, but Avalon bus throws away the 2 least significant bits - - //USER start with memory RESET activated - - //USER tINIT is typically 200us (but can be adjusted in the GUI) - //USER The total number of cycles required for this nested counter structure to - //USER complete is defined by: - //USER num_cycles = (CTR2 + 1) * [(CTR1 + 1) * (2 * (CTR0 + 1) + 1) + 1] + 1 - - //USER Load counters - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL)); - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL)); - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL)); - - //USER Load jump address - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_INIT_RESET_0_CKE_0); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_INIT_RESET_0_CKE_0); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_INIT_RESET_0_CKE_0); - - //USER Execute count instruction - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_INIT_RESET_0_CKE_0); - - //USER indicate that memory is stable - IOWR_32DIRECT(PHY_MGR_RESET_MEM_STBL, 0, 1); - - //USER transition the RESET to high - //USER Wait for 500us - //USER num_cycles = (CTR2 + 1) * [(CTR1 + 1) * (2 * (CTR0 + 1) + 1) + 1] + 1 - //USER Load counters - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL)); - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL)); - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL)); - - //USER Load jump address - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_INIT_RESET_1_CKE_0); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_INIT_RESET_1_CKE_0); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_INIT_RESET_1_CKE_0); - - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_INIT_RESET_1_CKE_0); - - //USER bring up clock enable - - //USER tXRP < 250 ck cycles - delay_for_n_mem_clocks(250); - - // USER initialize RDIMM buffer so MRS and RZQ Calibrate commands will be - // USER propagated to discrete memory devices - rw_mgr_rdimm_initialize(); - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { - if (param->skip_ranks[r]) { - //USER request to skip the rank - - continue; - } - - //USER set rank - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); - - //USER Use Mirror-ed commands for odd ranks if address mirrorring is on - if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS2_MIRR); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS3_MIRR); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS1_MIRR); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS0_DLL_RESET_MIRR); - } else { - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS2); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS3); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS1); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS0_DLL_RESET); - } - - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_ZQCL); - - //USER tZQinit = tDLLK = 512 ck cycles - delay_for_n_mem_clocks(512); - } -} - -static void rw_mgr_mem_dll_lock_wait(void) -{ -} - -//USER At the end of calibration we have to program the user settings in, and -//USER hand off the memory to the user. - -static void rw_mgr_mem_handoff(void) -{ - uint32_t r; - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { - if (param->skip_ranks[r]) { - //USER request to skip the rank - - continue; - } - //USER set rank - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); - - //USER precharge all banks ... - - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_PRECHARGE_ALL); - - //USER load up MR settings specified by user - - //USER Use Mirror-ed commands for odd ranks if address mirrorring is on - if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) { - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS2_MIRR); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS3_MIRR); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS1_MIRR); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS0_USER_MIRR); - } else { - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS2); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS3); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS1); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_MRS0_USER); - } - //USER need to wait tMOD (12CK or 15ns) time before issuing other commands, - //USER but we will have plenty of NIOS cycles before actual handoff so its okay. - } - -} - -//USER performs a guaranteed read on the patterns we are going to use during a read test to ensure memory works -static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn, uint32_t group, - uint32_t num_tries, t_btfld * bit_chk, - uint32_t all_ranks) -{ - uint32_t r, vg; - t_btfld correct_mask_vg; - t_btfld tmp_bit_chk; - uint32_t rank_end = - all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); - - *bit_chk = param->read_correct_mask; - correct_mask_vg = param->read_correct_mask_vg; - - for (r = rank_bgn; r < rank_end; r++) { - if (param->skip_ranks[r]) { - //USER request to skip the rank - - continue; - } - //USER set rank - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); - - //USER Load up a constant bursts of read commands - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x20); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_GUARANTEED_READ); - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x20); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_GUARANTEED_READ_CONT); - - tmp_bit_chk = 0; - for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;; vg--) { - //USER reset the fifos to get pointers to known state - - IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); - IOWR_32DIRECT(RW_MGR_RESET_READ_DATAPATH, 0, 0); - - tmp_bit_chk = - tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS / - RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); - - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, - ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + vg) << 2), - __RW_MGR_GUARANTEED_READ); - tmp_bit_chk = - tmp_bit_chk | (correct_mask_vg & ~(IORD_32DIRECT(BASE_RW_MGR, 0))); - - if (vg == 0) { - break; - } - } - *bit_chk &= tmp_bit_chk; - } - - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, (group << 2), __RW_MGR_CLEAR_DQS_ENABLE); - - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); - DPRINT(2, "test_load_patterns(%lu,ALL) => (%lu == %lu) => %lu", group, *bit_chk, - param->read_correct_mask, (long unsigned int)(*bit_chk == param->read_correct_mask)); - return (*bit_chk == param->read_correct_mask); -} - -static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks(uint32_t group, - uint32_t num_tries, - t_btfld * bit_chk) -{ - if (rw_mgr_mem_calibrate_read_test_patterns(0, group, num_tries, bit_chk, 1)) { - return 1; - } else { - // case:139851 - if guaranteed read fails, we can retry using different dqs enable phases. - // It is possible that with the initial phase, dqs enable is asserted/deasserted too close - // to an dqs edge, truncating the read burst. - uint32_t p; - for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++) { - scc_mgr_set_dqs_en_phase_all_ranks(group, p); - if (rw_mgr_mem_calibrate_read_test_patterns - (0, group, num_tries, bit_chk, 1)) { - return 1; - } - } - return 0; - } -} - -//USER load up the patterns we are going to use during a read test -static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn, uint32_t all_ranks) -{ - uint32_t r; - uint32_t rank_end = - all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); - - for (r = rank_bgn; r < rank_end; r++) { - if (param->skip_ranks[r]) { - //USER request to skip the rank - - continue; - } - //USER set rank - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); - - //USER Load up a constant bursts - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x20); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_GUARANTEED_WRITE_WAIT0); - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x20); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_GUARANTEED_WRITE_WAIT1); - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0x04); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_GUARANTEED_WRITE_WAIT2); - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_3, 0, 0x04); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_GUARANTEED_WRITE_WAIT3); - - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_GUARANTEED_WRITE); - } - - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); -} - -static inline void rw_mgr_mem_calibrate_read_load_patterns_all_ranks(void) -{ - rw_mgr_mem_calibrate_read_load_patterns(0, 1); -} - -// pe checkout pattern for harden managers -//void pe_checkout_pattern (void) -//{ -// // test RW manager -// -// // do some reads to check load buffer -// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_1, 0, 0x0); -// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_READ_B2B_WAIT1); -// -// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_2, 0, 0x0); -// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_READ_B2B_WAIT2); -// -// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_0, 0, 0x0); -// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_READ_B2B); -// -// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_3, 0, 0x0); -// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_READ_B2B); -// -// // clear error word -// IOWR_32DIRECT (RW_MGR_RESET_READ_DATAPATH, 0, 0); -// -// IOWR_32DIRECT (RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_READ_B2B); -// -// uint32_t readdata; -// -// // read error word -// readdata = IORD_32DIRECT(BASE_RW_MGR, 0); -// -// // read DI buffer -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 0*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 1*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 2*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 3*4, 0); -// -// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_1, 0, 0x0); -// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_READ_B2B_WAIT1); -// -// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_2, 0, 0x0); -// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_READ_B2B_WAIT2); -// -// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_0, 0, 0x0); -// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_READ_B2B); -// -// IOWR_32DIRECT (RW_MGR_LOAD_CNTR_3, 0, 0x0); -// IOWR_32DIRECT (RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_READ_B2B); -// -// // clear error word -// IOWR_32DIRECT (RW_MGR_RESET_READ_DATAPATH, 0, 0); -// -// // do read -// IOWR_32DIRECT (RW_MGR_LOOPBACK_MODE, 0, __RW_MGR_READ_B2B); -// -// // read error word -// readdata = IORD_32DIRECT(BASE_RW_MGR, 0); -// -// // error word should be 0x00 -// -// // read DI buffer -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 0*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 1*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 2*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 3*4, 0); -// -// // clear error word -// IOWR_32DIRECT (RW_MGR_RESET_READ_DATAPATH, 0, 0); -// -// // do dm read -// IOWR_32DIRECT (RW_MGR_LOOPBACK_MODE, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1); -// -// // read error word -// readdata = IORD_32DIRECT(BASE_RW_MGR, 0); -// -// // error word should be ff -// -// // read DI buffer -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 0*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 1*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 2*4, 0); -// readdata = IORD_32DIRECT(RW_MGR_DI_BASE + 3*4, 0); -// -// // exit loopback mode -// IOWR_32DIRECT (BASE_RW_MGR, 0, __RW_MGR_IDLE_LOOP2); -// -// // start of phy manager access -// -// readdata = IORD_32DIRECT (PHY_MGR_MAX_RLAT_WIDTH, 0); -// readdata = IORD_32DIRECT (PHY_MGR_MAX_AFI_WLAT_WIDTH, 0); -// readdata = IORD_32DIRECT (PHY_MGR_MAX_AFI_RLAT_WIDTH, 0); -// readdata = IORD_32DIRECT (PHY_MGR_CALIB_SKIP_STEPS, 0); -// readdata = IORD_32DIRECT (PHY_MGR_CALIB_VFIFO_OFFSET, 0); -// readdata = IORD_32DIRECT (PHY_MGR_CALIB_LFIFO_OFFSET, 0); -// -// // start of data manager test -// -// readdata = IORD_32DIRECT (DATA_MGR_DRAM_CFG , 0); -// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_WL , 0); -// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_ADD , 0); -// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_RL , 0); -// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_RFC , 0); -// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_REFI , 0); -// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_WR , 0); -// readdata = IORD_32DIRECT (DATA_MGR_MEM_T_MRD , 0); -// readdata = IORD_32DIRECT (DATA_MGR_COL_WIDTH , 0); -// readdata = IORD_32DIRECT (DATA_MGR_ROW_WIDTH , 0); -// readdata = IORD_32DIRECT (DATA_MGR_BANK_WIDTH , 0); -// readdata = IORD_32DIRECT (DATA_MGR_CS_WIDTH , 0); -// readdata = IORD_32DIRECT (DATA_MGR_ITF_WIDTH , 0); -// readdata = IORD_32DIRECT (DATA_MGR_DVC_WIDTH , 0); -// -//} - -//USER try a read and see if it returns correct data back. has dummy reads inserted into the mix -//USER used to align dqs enable. has more thorough checks than the regular read test. - -static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group, - uint32_t num_tries, uint32_t all_correct, - t_btfld * bit_chk, uint32_t all_groups, - uint32_t all_ranks) -{ - uint32_t r, vg; - t_btfld correct_mask_vg; - t_btfld tmp_bit_chk; - uint32_t rank_end = - all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); - uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_DELAY_SWEEPS) - && ENABLE_SUPER_QUICK_CALIBRATION) || BFM_MODE; - - *bit_chk = param->read_correct_mask; - correct_mask_vg = param->read_correct_mask_vg; - - for (r = rank_bgn; r < rank_end; r++) { - if (param->skip_ranks[r]) { - //USER request to skip the rank - - continue; - } - //USER set rank - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x10); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_READ_B2B_WAIT1); - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0x10); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_READ_B2B_WAIT2); - - if (quick_read_mode) { - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x1); /* need at least two (1+1) reads to capture failures */ - } else if (all_groups) { - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x06); - } else { - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x32); - } - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_READ_B2B); - if (all_groups) { - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_3, 0, - RW_MGR_MEM_IF_READ_DQS_WIDTH * - RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1); - } else { - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_3, 0, 0x0); - } - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_READ_B2B); - - tmp_bit_chk = 0; - for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;; vg--) { - //USER reset the fifos to get pointers to known state - - IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); - IOWR_32DIRECT(RW_MGR_RESET_READ_DATAPATH, 0, 0); - - tmp_bit_chk = - tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS / - RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS); - - IOWR_32DIRECT(all_groups ? RW_MGR_RUN_ALL_GROUPS : RW_MGR_RUN_SINGLE_GROUP, - ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS + vg) << 2), - __RW_MGR_READ_B2B); - tmp_bit_chk = - tmp_bit_chk | (correct_mask_vg & ~(IORD_32DIRECT(BASE_RW_MGR, 0))); - - if (vg == 0) { - break; - } - } - *bit_chk &= tmp_bit_chk; - } - - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, (group << 2), __RW_MGR_CLEAR_DQS_ENABLE); - - if (all_correct) { - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); - DPRINT(2, "read_test(%lu,ALL,%lu) => (%lu == %lu) => %lu", group, all_groups, - *bit_chk, param->read_correct_mask, - (long unsigned int)(*bit_chk == param->read_correct_mask)); - return (*bit_chk == param->read_correct_mask); - } else { - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); - DPRINT(2, "read_test(%lu,ONE,%lu) => (%lu != %lu) => %lu", group, all_groups, - *bit_chk, (long unsigned int)0, (long unsigned int)(*bit_chk != 0x00)); - return (*bit_chk != 0x00); - } -} - -static inline uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group, uint32_t num_tries, - uint32_t all_correct, - t_btfld * bit_chk, - uint32_t all_groups) -{ - return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct, bit_chk, all_groups, - 1); -} - -static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t * v) -{ - //USER fiddle with FIFO - if (HARD_PHY) { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_HARD_PHY, 0, grp); - } else if (QUARTER_RATE_MODE && !HARD_VFIFO) { - if ((*v & 3) == 3) { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_QR, 0, grp); - } else if ((*v & 2) == 2) { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR_HR, 0, grp); - } else if ((*v & 1) == 1) { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_HR, 0, grp); - } else { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR, 0, grp); - } - } else if (HARD_VFIFO) { - // Arria V & Cyclone V have a hard full-rate VFIFO that only has a single incr signal - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR, 0, grp); - } else { - if (!HALF_RATE_MODE || (*v & 1) == 1) { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_HR, 0, grp); - } else { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR, 0, grp); - } - } - - (*v)++; - BFM_INC_VFIFO; -} - -//Used in quick cal to properly loop through the duplicated VFIFOs in AV QDRII/RLDRAM -static inline void rw_mgr_incr_vfifo_all(uint32_t grp, uint32_t * v) -{ -#if VFIFO_CONTROL_WIDTH_PER_DQS == 1 - rw_mgr_incr_vfifo(grp, v); -#else - uint32_t i; - for (i = 0; i < VFIFO_CONTROL_WIDTH_PER_DQS; i++) { - rw_mgr_incr_vfifo(grp * VFIFO_CONTROL_WIDTH_PER_DQS + i, v); - if (i != 0) { - (*v)--; - } - } -#endif -} - -static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t * v) -{ - - uint32_t i; - - for (i = 0; i < VFIFO_SIZE - 1; i++) { - rw_mgr_incr_vfifo(grp, v); - } -} - -//USER find a good dqs enable to use - -#if NEWVERSION_DQSEN - -// Navid's version - -static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) -{ - uint32_t i, d, v, p; - uint32_t max_working_cnt; - uint32_t fail_cnt; - t_btfld bit_chk; - uint32_t dtaps_per_ptap; - uint32_t found_begin, found_end; - uint32_t work_bgn, work_mid, work_end, tmp_delay; - uint32_t test_status; - uint32_t found_passing_read, found_failing_read, initial_failing_dtap; - - reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); - - scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); - scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); - - fail_cnt = 0; - - //USER ************************************************************** - //USER * Step 0 : Determine number of delay taps for each phase tap * - - dtaps_per_ptap = 0; - tmp_delay = 0; - while (tmp_delay < IO_DELAY_PER_OPA_TAP) { - dtaps_per_ptap++; - tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; - } - dtaps_per_ptap--; - tmp_delay = 0; - - // VFIFO sweep - - //USER ********************************************************* - //USER * Step 1 : First push vfifo until we get a failing read * - for (v = 0; v < VFIFO_SIZE;) { - DPRINT(2, "find_dqs_en_phase: vfifo %lu", BFM_GBL_GET(vfifo_idx)); - test_status = - rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0); - if (!test_status) { - fail_cnt++; - - if (fail_cnt == 2) { - break; - } - } - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } - - if (v >= VFIFO_SIZE) { - //USER no failing read found!! Something must have gone wrong - DPRINT(2, "find_dqs_en_phase: vfifo failed"); - return 0; - } - - max_working_cnt = 0; - - //USER ******************************************************** - //USER * step 2: find first working phase, increment in ptaps * - found_begin = 0; - work_bgn = 0; - for (d = 0; d <= dtaps_per_ptap; d++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { - work_bgn = tmp_delay; - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - for (i = 0; i < VFIFO_SIZE; i++) { - for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++, work_bgn += IO_DELAY_PER_OPA_TAP) { - DPRINT(2, "find_dqs_en_phase: begin: vfifo=%lu ptap=%lu dtap=%lu", - BFM_GBL_GET(vfifo_idx), p, d); - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - test_status = - rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, - &bit_chk, 0); - - if (test_status) { - max_working_cnt = 1; - found_begin = 1; - break; - } - } - - if (found_begin) { - break; - } - - if (p > IO_DQS_EN_PHASE_MAX) { - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } - } - - if (found_begin) { - break; - } - } - - if (i >= VFIFO_SIZE) { - //USER cannot find working solution - DPRINT(2, "find_dqs_en_phase: no vfifo/ptap/dtap"); - return 0; - } - - work_end = work_bgn; - - //USER If d is 0 then the working window covers a phase tap and we can follow the old procedure - //USER otherwise, we've found the beginning, and we need to increment the dtaps until we find the end - if (d == 0) { - //USER ******************************************************************** - //USER * step 3a: if we have room, back off by one and increment in dtaps * - COV(EN_PHASE_PTAP_OVERLAP); - - //USER Special case code for backing up a phase - if (p == 0) { - p = IO_DQS_EN_PHASE_MAX; - rw_mgr_decr_vfifo(grp, &v); - } else { - p = p - 1; - } - tmp_delay = work_bgn - IO_DELAY_PER_OPA_TAP; - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - found_begin = 0; - for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < work_bgn; - d++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { - - DPRINT(2, "find_dqs_en_phase: begin-2: vfifo=%lu ptap=%lu dtap=%lu", - BFM_GBL_GET(vfifo_idx), p, d); - - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - if (rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - found_begin = 1; - work_bgn = tmp_delay; - break; - } - } - - //USER We have found a working dtap before the ptap found above - if (found_begin == 1) { - max_working_cnt++; - } - //USER Restore VFIFO to old state before we decremented it (if needed) - p = p + 1; - if (p > IO_DQS_EN_PHASE_MAX) { - p = 0; - rw_mgr_incr_vfifo(grp, &v); - } - - scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); - - //USER *********************************************************************************** - //USER * step 4a: go forward from working phase to non working phase, increment in ptaps * - p = p + 1; - work_end += IO_DELAY_PER_OPA_TAP; - if (p > IO_DQS_EN_PHASE_MAX) { - //USER fiddle with FIFO - p = 0; - rw_mgr_incr_vfifo(grp, &v); - } - - found_end = 0; - for (; i < VFIFO_SIZE + 1; i++) { - for (; p <= IO_DQS_EN_PHASE_MAX; p++, work_end += IO_DELAY_PER_OPA_TAP) { - DPRINT(2, "find_dqs_en_phase: end: vfifo=%lu ptap=%lu dtap=%lu", - BFM_GBL_GET(vfifo_idx), p, (long unsigned int)0); - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - if (!rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - found_end = 1; - break; - } else { - max_working_cnt++; - } - } - - if (found_end) { - break; - } - - if (p > IO_DQS_EN_PHASE_MAX) { - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - p = 0; - } - } - - if (i >= VFIFO_SIZE + 1) { - //USER cannot see edge of failing read - DPRINT(2, "find_dqs_en_phase: end: failed"); - return 0; - } - //USER ********************************************************* - //USER * step 5a: back off one from last, increment in dtaps * - - //USER Special case code for backing up a phase - if (p == 0) { - p = IO_DQS_EN_PHASE_MAX; - rw_mgr_decr_vfifo(grp, &v); - } else { - p = p - 1; - } - - work_end -= IO_DELAY_PER_OPA_TAP; - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - //USER * The actual increment of dtaps is done outside of the if/else loop to share code - d = 0; - - DPRINT(2, "find_dqs_en_phase: found end v/p: vfifo=%lu ptap=%lu", - BFM_GBL_GET(vfifo_idx), p); - } else { - - //USER ******************************************************************** - //USER * step 3-5b: Find the right edge of the window using delay taps * - COV(EN_PHASE_PTAP_NO_OVERLAP); - - DPRINT(2, "find_dqs_en_phase: begin found: vfifo=%lu ptap=%lu dtap=%lu begin=%lu", - BFM_GBL_GET(vfifo_idx), p, d, work_bgn); - BFM_GBL_SET(dqs_enable_left_edge[grp].v, BFM_GBL_GET(vfifo_idx)); - BFM_GBL_SET(dqs_enable_left_edge[grp].p, p); - BFM_GBL_SET(dqs_enable_left_edge[grp].d, d); - BFM_GBL_SET(dqs_enable_left_edge[grp].ps, work_bgn); - - work_end = work_bgn; - - //USER * The actual increment of dtaps is done outside of the if/else loop to share code - - //USER Only here to counterbalance a subtract later on which is not needed if this branch - //USER of the algorithm is taken - max_working_cnt++; - } - - //USER The dtap increment to find the failing edge is done here - for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) { - - DPRINT(2, "find_dqs_en_phase: end-2: dtap=%lu", d); - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - break; - } - } - - //USER Go back to working dtap - if (d != 0) { - work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP; - } - - DPRINT(2, "find_dqs_en_phase: found end v/p/d: vfifo=%lu ptap=%lu dtap=%lu end=%lu", - BFM_GBL_GET(vfifo_idx), p, d - 1, work_end); - BFM_GBL_SET(dqs_enable_right_edge[grp].v, BFM_GBL_GET(vfifo_idx)); - BFM_GBL_SET(dqs_enable_right_edge[grp].p, p); - BFM_GBL_SET(dqs_enable_right_edge[grp].d, d - 1); - BFM_GBL_SET(dqs_enable_right_edge[grp].ps, work_end); - - if (work_end >= work_bgn) { - //USER we have a working range - } else { - //USER nil range - DPRINT(2, "find_dqs_en_phase: end-2: failed"); - return 0; - } - - DPRINT(2, "find_dqs_en_phase: found range [%lu,%lu]", work_bgn, work_end); - - // *************************************************************** - //USER * We need to calculate the number of dtaps that equal a ptap - //USER * To do that we'll back up a ptap and re-find the edge of the - //USER * window using dtaps - - DPRINT(2, "find_dqs_en_phase: calculate dtaps_per_ptap for tracking"); - - //USER Special case code for backing up a phase - if (p == 0) { - p = IO_DQS_EN_PHASE_MAX; - rw_mgr_decr_vfifo(grp, &v); - DPRINT(2, "find_dqs_en_phase: backed up cycle/phase: v=%lu p=%lu", - BFM_GBL_GET(vfifo_idx), p); - } else { - p = p - 1; - DPRINT(2, "find_dqs_en_phase: backed up phase only: v=%lu p=%lu", - BFM_GBL_GET(vfifo_idx), p); - } - - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - //USER Increase dtap until we first see a passing read (in case the window is smaller than a ptap), - //USER and then a failing read to mark the edge of the window again - - //USER Find a passing read - DPRINT(2, "find_dqs_en_phase: find passing read"); - found_passing_read = 0; - found_failing_read = 0; - initial_failing_dtap = d; - for (; d <= IO_DQS_EN_DELAY_MAX; d++) { - DPRINT(2, "find_dqs_en_phase: testing read d=%lu", d); - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - found_passing_read = 1; - break; - } - } - - if (found_passing_read) { - //USER Find a failing read - DPRINT(2, "find_dqs_en_phase: find failing read"); - for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) { - DPRINT(2, "find_dqs_en_phase: testing read d=%lu", d); - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - if (!rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - found_failing_read = 1; - break; - } - } - } else { - DPRINT(1, - "find_dqs_en_phase: failed to calculate dtaps per ptap. Fall back on static value"); - } - - //USER The dynamically calculated dtaps_per_ptap is only valid if we found a passing/failing read - //USER If we didn't, it means d hit the max (IO_DQS_EN_DELAY_MAX). - //USER Otherwise, dtaps_per_ptap retains its statically calculated value. - if (found_passing_read && found_failing_read) { - dtaps_per_ptap = d - initial_failing_dtap; - } - - IOWR_32DIRECT(REG_FILE_DTAPS_PER_PTAP, 0, dtaps_per_ptap); - - DPRINT(2, "find_dqs_en_phase: dtaps_per_ptap=%lu - %lu = %lu", d, initial_failing_dtap, - dtaps_per_ptap); - - //USER ******************************************** - //USER * step 6: Find the centre of the window * - - work_mid = (work_bgn + work_end) / 2; - tmp_delay = 0; - - DPRINT(2, "work_bgn=%ld work_end=%ld work_mid=%ld", work_bgn, work_end, work_mid); - //USER Get the middle delay to be less than a VFIFO delay - for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++, tmp_delay += IO_DELAY_PER_OPA_TAP) ; - DPRINT(2, "vfifo ptap delay %ld", tmp_delay); - while (work_mid > tmp_delay) - work_mid -= tmp_delay; - DPRINT(2, "new work_mid %ld", work_mid); - tmp_delay = 0; - for (p = 0; p <= IO_DQS_EN_PHASE_MAX && tmp_delay < work_mid; - p++, tmp_delay += IO_DELAY_PER_OPA_TAP) ; - tmp_delay -= IO_DELAY_PER_OPA_TAP; - DPRINT(2, "new p %ld, tmp_delay=%ld", p - 1, tmp_delay); - for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < work_mid; - d++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) ; - DPRINT(2, "new d %ld, tmp_delay=%ld", d, tmp_delay); - - scc_mgr_set_dqs_en_phase_all_ranks(grp, p - 1); - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - //USER push vfifo until we can successfully calibrate. We can do this because - //USER the largest possible margin in 1 VFIFO cycle - - for (i = 0; i < VFIFO_SIZE; i++) { - DPRINT(2, "find_dqs_en_phase: center: vfifo=%lu", BFM_GBL_GET(vfifo_idx)); - if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - break; - } - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } - - if (i >= VFIFO_SIZE) { - DPRINT(2, "find_dqs_en_phase: center: failed"); - return 0; - } - DPRINT(2, "find_dqs_en_phase: center found: vfifo=%li ptap=%lu dtap=%lu", - BFM_GBL_GET(vfifo_idx), p - 1, d); - BFM_GBL_SET(dqs_enable_mid[grp].v, BFM_GBL_GET(vfifo_idx)); - BFM_GBL_SET(dqs_enable_mid[grp].p, p - 1); - BFM_GBL_SET(dqs_enable_mid[grp].d, d); - BFM_GBL_SET(dqs_enable_mid[grp].ps, work_mid); - return 1; -} - -#if 0 -// Ryan's algorithm - -static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) -{ - uint32_t i, d, v, p; - uint32_t min_working_p, max_working_p, min_working_d, max_working_d, max_working_cnt; - uint32_t fail_cnt; - t_btfld bit_chk; - uint32_t dtaps_per_ptap; - uint32_t found_begin, found_end; - uint32_t tmp_delay; - - TRACE_FUNC("%lu", grp); - - reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); - - scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); - scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); - - fail_cnt = 0; - - //USER ************************************************************** - //USER * Step 0 : Determine number of delay taps for each phase tap * - - dtaps_per_ptap = 0; - tmp_delay = 0; - while (tmp_delay < IO_DELAY_PER_OPA_TAP) { - dtaps_per_ptap++; - tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; - } - dtaps_per_ptap--; - - //USER ********************************************************* - //USER * Step 1 : First push vfifo until we get a failing read * - for (v = 0; v < VFIFO_SIZE;) { - if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - fail_cnt++; - - if (fail_cnt == 2) { - break; - } - } - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } - - if (i >= VFIFO_SIZE) { - //USER no failing read found!! Something must have gone wrong - return 0; - } - - max_working_cnt = 0; - min_working_p = 0; - - //USER ******************************************************** - //USER * step 2: find first working phase, increment in ptaps * - found_begin = 0; - for (d = 0; d <= dtaps_per_ptap; d++) { - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - for (i = 0; i < VFIFO_SIZE; i++) { - for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++) { - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - if (rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - max_working_cnt = 1; - found_begin = 1; - break; - } - } - - if (found_begin) { - break; - } - - if (p > IO_DQS_EN_PHASE_MAX) { - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } - } - - if (found_begin) { - break; - } - } - - if (i >= VFIFO_SIZE) { - //USER cannot find working solution - return 0; - } - - min_working_p = p; - - //USER If d is 0 then the working window covers a phase tap and we can follow the old procedure - //USER otherwise, we've found the beginning, and we need to increment the dtaps until we find the end - if (d == 0) { - //USER ******************************************************************** - //USER * step 3a: if we have room, back off by one and increment in dtaps * - min_working_d = 0; - - //USER Special case code for backing up a phase - if (p == 0) { - p = IO_DQS_EN_PHASE_MAX; - rw_mgr_decr_vfifo(grp, &v); - } else { - p = p - 1; - } - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - found_begin = 0; - for (d = 0; d <= dtaps_per_ptap; d++) { - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - if (rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - found_begin = 1; - min_working_d = d; - break; - } - } - - //USER We have found a working dtap before the ptap found above - if (found_begin == 1) { - min_working_p = p; - max_working_cnt++; - } - //USER Restore VFIFO to old state before we decremented it - p = p + 1; - if (p > IO_DQS_EN_PHASE_MAX) { - p = 0; - rw_mgr_incr_vfifo(grp, &v); - } - - scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); - - //USER *********************************************************************************** - //USER * step 4a: go forward from working phase to non working phase, increment in ptaps * - p = p + 1; - if (p > IO_DQS_EN_PHASE_MAX) { - //USER fiddle with FIFO - p = 0; - rw_mgr_incr_vfifo(grp, &v); - } - - found_end = 0; - for (; i < VFIFO_SIZE + 1; i++) { - for (; p <= IO_DQS_EN_PHASE_MAX; p++) { - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - if (!rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - found_end = 1; - break; - } else { - max_working_cnt++; - } - } - - if (found_end) { - break; - } - - if (p > IO_DQS_EN_PHASE_MAX) { - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - p = 0; - } - } - - if (i >= VFIFO_SIZE + 1) { - //USER cannot see edge of failing read - return 0; - } - //USER ********************************************************* - //USER * step 5a: back off one from last, increment in dtaps * - max_working_d = 0; - - //USER Special case code for backing up a phase - if (p == 0) { - p = IO_DQS_EN_PHASE_MAX; - rw_mgr_decr_vfifo(grp, &v); - } else { - p = p - 1; - } - - max_working_p = p; - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - - for (d = 0; d <= IO_DQS_EN_DELAY_MAX; d++) { - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - if (!rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - break; - } - } - - //USER Go back to working dtap - if (d != 0) { - max_working_d = d - 1; - } - - } else { - - //USER ******************************************************************** - //USER * step 3-5b: Find the right edge of the window using delay taps * - - max_working_p = min_working_p; - min_working_d = d; - - for (; d <= IO_DQS_EN_DELAY_MAX; d++) { - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - if (!rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - break; - } - } - - //USER Go back to working dtap - if (d != 0) { - max_working_d = d - 1; - } - //USER Only here to counterbalance a subtract later on which is not needed if this branch - //USER of the algorithm is taken - max_working_cnt++; - } - - //USER ******************************************** - //USER * step 6: Find the centre of the window * - - //USER If the number of working phases is even we will step back a phase and find the - //USER edge with a larger delay chain tap - if ((max_working_cnt & 1) == 0) { - p = min_working_p + (max_working_cnt - 1) / 2; - - //USER Special case code for backing up a phase - if (max_working_p == 0) { - max_working_p = IO_DQS_EN_PHASE_MAX; - rw_mgr_decr_vfifo(grp, &v); - } else { - max_working_p = max_working_p - 1; - } - - scc_mgr_set_dqs_en_phase_all_ranks(grp, max_working_p); - - //USER Code to determine at which dtap we should start searching again for a failure - //USER If we've moved back such that the max and min p are the same, we should start searching - //USER from where the window actually exists - if (max_working_p == min_working_p) { - d = min_working_d; - } else { - d = max_working_d; - } - - for (; d <= IO_DQS_EN_DELAY_MAX; d++) { - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - if (!rw_mgr_mem_calibrate_read_test_all_ranks - (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - break; - } - } - - //USER Go back to working dtap - if (d != 0) { - max_working_d = d - 1; - } - } else { - p = min_working_p + (max_working_cnt) / 2; - } - - while (p > IO_DQS_EN_PHASE_MAX) { - p -= (IO_DQS_EN_PHASE_MAX + 1); - } - - d = (min_working_d + max_working_d) / 2; - - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); - - //USER push vfifo until we can successfully calibrate - - for (i = 0; i < VFIFO_SIZE; i++) { - if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - break; - } - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } - - if (i >= VFIFO_SIZE) { - return 0; - } - - return 1; -} - -#endif - -#else -// Val's original version - -static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp) -{ - uint32_t i, j, v, d; - uint32_t min_working_d, max_working_cnt; - uint32_t fail_cnt; - t_btfld bit_chk; - uint32_t delay_per_ptap_mid; - - reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); - - scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); - scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); - - fail_cnt = 0; - - //USER first push vfifo until we get a failing read - v = 0; - for (i = 0; i < VFIFO_SIZE; i++) { - if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) { - fail_cnt++; - - if (fail_cnt == 2) { - break; - } - } - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } - - if (v >= VFIFO_SIZE) { - //USER no failing read found!! Something must have gone wrong - - return 0; - } - - max_working_cnt = 0; - min_working_d = 0; - - for (i = 0; i < VFIFO_SIZE + 1; i++) { - for (d = 0; d <= IO_DQS_EN_PHASE_MAX; d++) { - scc_mgr_set_dqs_en_phase_all_ranks(grp, d); - - rw_mgr_mem_calibrate_read_test_all_ranks(grp, NUM_READ_PB_TESTS, - PASS_ONE_BIT, &bit_chk, 0); - if (bit_chk) { - //USER passing read - - if (max_working_cnt == 0) { - min_working_d = d; - } - - max_working_cnt++; - } else { - if (max_working_cnt > 0) { - //USER already have one working value - break; - } - } - } - - if (d > IO_DQS_EN_PHASE_MAX) { - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } else { - //USER found working solution! - - d = min_working_d + (max_working_cnt - 1) / 2; - - while (d > IO_DQS_EN_PHASE_MAX) { - d -= (IO_DQS_EN_PHASE_MAX + 1); - } - - break; - } - } - - if (i >= VFIFO_SIZE + 1) { - //USER cannot find working solution or cannot see edge of failing read - - return 0; - } - //USER in the case the number of working steps is even, use 50ps taps to further center the window - - if ((max_working_cnt & 1) == 0) { - delay_per_ptap_mid = IO_DELAY_PER_OPA_TAP / 2; - - //USER increment in 50ps taps until we reach the required amount - - for (i = 0, j = 0; i <= IO_DQS_EN_DELAY_MAX && j < delay_per_ptap_mid; - i++, j += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) ; - - scc_mgr_set_dqs_en_delay_all_ranks(grp, i - 1); - } - - scc_mgr_set_dqs_en_phase_all_ranks(grp, d); - - //USER push vfifo until we can successfully calibrate - - for (i = 0; i < VFIFO_SIZE; i++) { - if (rw_mgr_mem_calibrate_read_test_all_ranks - (grp, NUM_READ_PB_TESTS, PASS_ONE_BIT, &bit_chk, 0)) { - break; - } - //USER fiddle with FIFO - rw_mgr_incr_vfifo(grp, &v); - } - - if (i >= VFIFO_SIZE) { - return 0; - } - - return 1; -} - -#endif - -// Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different dq_in_delay values -static inline uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay(uint32_t - write_group, - uint32_t - read_group, - uint32_t - test_bgn) -{ - uint32_t found; - uint32_t i; - uint32_t p; - uint32_t d; - uint32_t r; - - const uint32_t delay_step = IO_IO_IN_DELAY_MAX / (RW_MGR_MEM_DQ_PER_READ_DQS - 1); - - // try different dq_in_delays since the dq path is shorter than dqs - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - select_shadow_regs_for_update(r, write_group, 1); - for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; - i++, p++, d += delay_step) { - DPRINT(1, - "rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay: g=%lu/%lu r=%lu, i=%lu p=%lu d=%lu", - write_group, read_group, r, i, p, d); - scc_mgr_set_dq_in_delay(write_group, p, d); - scc_mgr_load_dq(p); - } - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } - - found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group); - - DPRINT(1, - "rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay: g=%lu/%lu found=%lu; Reseting delay chain to zero", - write_group, read_group, found); - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - select_shadow_regs_for_update(r, write_group, 1); - for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { - scc_mgr_set_dq_in_delay(write_group, p, 0); - scc_mgr_load_dq(p); - } - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } - - return found; -} - -//USER per-bit deskew DQ and center - -#if NEWVERSION_RDDESKEW - -static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn, uint32_t write_group, - uint32_t read_group, uint32_t test_bgn, - uint32_t use_read_test, uint32_t update_fom) -{ - uint32_t i, p, d, min_index; - //USER Store these as signed since there are comparisons with signed numbers - t_btfld bit_chk; - t_btfld sticky_bit_chk; - int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; - int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS]; - int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS]; - int32_t mid; - int32_t orig_mid_min, mid_min; - int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs, final_dqs_en; - int32_t dq_margin, dqs_margin; - uint32_t stop; - - start_dqs = READ_SCC_DQS_IN_DELAY(read_group); - if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { - start_dqs_en = READ_SCC_DQS_EN_DELAY(read_group); - } - - select_curr_shadow_reg_using_rank(rank_bgn); - - //USER per-bit deskew - - //USER set the left and right edge of each bit to an illegal value - //USER use (IO_IO_IN_DELAY_MAX + 1) as an illegal value - sticky_bit_chk = 0; - for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { - left_edge[i] = IO_IO_IN_DELAY_MAX + 1; - right_edge[i] = IO_IO_IN_DELAY_MAX + 1; - } - - //USER Search for the left edge of the window for each bit - for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) { - scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - //USER Stop searching when the read test doesn't pass AND when we've seen a passing read on every bit - if (use_read_test) { - stop = - !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group, NUM_READ_PB_TESTS, - PASS_ONE_BIT, &bit_chk, 0, 0); - } else { - rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, PASS_ONE_BIT, - &bit_chk, 0); - bit_chk = - bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * - (read_group - - (write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / - RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); - stop = (bit_chk == 0); - } - sticky_bit_chk = sticky_bit_chk | bit_chk; - stop = stop && (sticky_bit_chk == param->read_correct_mask); - DPRINT(2, "vfifo_center(left): dtap=%lu => " BTFLD_FMT " == " BTFLD_FMT " && %lu", - d, sticky_bit_chk, param->read_correct_mask, stop); - - if (stop == 1) { - break; - } else { - for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { - if (bit_chk & 1) { - //USER Remember a passing test as the left_edge - left_edge[i] = d; - } else { - //USER If a left edge has not been seen yet, then a future passing test will mark this edge as the right edge - if (left_edge[i] == IO_IO_IN_DELAY_MAX + 1) { - right_edge[i] = -(d + 1); - } - } - DPRINT(2, - "vfifo_center[l,d=%lu]: bit_chk_test=%d left_edge[%lu]: %ld right_edge[%lu]: %ld", - d, (int)(bit_chk & 1), i, left_edge[i], i, right_edge[i]); - bit_chk = bit_chk >> 1; - } - } - } - - //USER Reset DQ delay chains to 0 - scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0); - sticky_bit_chk = 0; - for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) { - - DPRINT(2, "vfifo_center: left_edge[%lu]: %ld right_edge[%lu]: %ld", i, left_edge[i], - i, right_edge[i]); - - //USER Check for cases where we haven't found the left edge, which makes our assignment of the the - //USER right edge invalid. Reset it to the illegal value. - if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) - && (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { - right_edge[i] = IO_IO_IN_DELAY_MAX + 1; - DPRINT(2, "vfifo_center: reset right_edge[%lu]: %ld", i, right_edge[i]); - } - //USER Reset sticky bit (except for bits where we have seen both the left and right edge) - sticky_bit_chk = sticky_bit_chk << 1; - if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) - && (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) { - sticky_bit_chk = sticky_bit_chk | 1; - } - - if (i == 0) { - break; - } - } - - //USER Search for the right edge of the window for each bit - for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) { - scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); - if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { - uint32_t delay = d + start_dqs_en; - if (delay > IO_DQS_EN_DELAY_MAX) { - delay = IO_DQS_EN_DELAY_MAX; - } - scc_mgr_set_dqs_en_delay(read_group, delay); - } - scc_mgr_load_dqs(read_group); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - //USER Stop searching when the read test doesn't pass AND when we've seen a passing read on every bit - if (use_read_test) { - stop = - !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group, NUM_READ_PB_TESTS, - PASS_ONE_BIT, &bit_chk, 0, 0); - } else { - rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, PASS_ONE_BIT, - &bit_chk, 0); - bit_chk = - bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS * - (read_group - - (write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / - RW_MGR_MEM_IF_WRITE_DQS_WIDTH))); - stop = (bit_chk == 0); - } - sticky_bit_chk = sticky_bit_chk | bit_chk; - stop = stop && (sticky_bit_chk == param->read_correct_mask); - - DPRINT(2, "vfifo_center(right): dtap=%lu => " BTFLD_FMT " == " BTFLD_FMT " && %lu", - d, sticky_bit_chk, param->read_correct_mask, stop); - - if (stop == 1) { - break; - } else { - for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { - if (bit_chk & 1) { - //USER Remember a passing test as the right_edge - right_edge[i] = d; - } else { - if (d != 0) { - //USER If a right edge has not been seen yet, then a future passing test will mark this edge as the left edge - if (right_edge[i] == IO_IO_IN_DELAY_MAX + 1) { - left_edge[i] = -(d + 1); - } - } else { - //USER d = 0 failed, but it passed when testing the left edge, so it must be marginal, set it to -1 - if (right_edge[i] == IO_IO_IN_DELAY_MAX + 1 - && left_edge[i] != IO_IO_IN_DELAY_MAX + 1) { - right_edge[i] = -1; - } - //USER If a right edge has not been seen yet, then a future passing test will mark this edge as the left edge - else if (right_edge[i] == IO_IO_IN_DELAY_MAX + 1) { - left_edge[i] = -(d + 1); - } - - } - } - - DPRINT(2, - "vfifo_center[r,d=%lu]: bit_chk_test=%d left_edge[%lu]: %ld right_edge[%lu]: %ld", - d, (int)(bit_chk & 1), i, left_edge[i], i, right_edge[i]); - bit_chk = bit_chk >> 1; - } - } - } - - // Store all observed margins - - //USER Check that all bits have a window - for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { - DPRINT(2, "vfifo_center: left_edge[%lu]: %ld right_edge[%lu]: %ld", i, left_edge[i], - i, right_edge[i]); - BFM_GBL_SET(dq_read_left_edge[read_group][i], left_edge[i]); - BFM_GBL_SET(dq_read_right_edge[read_group][i], right_edge[i]); - if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) - || (right_edge[i] == IO_IO_IN_DELAY_MAX + 1)) { - - //USER Restore delay chain settings before letting the loop in - //USER rw_mgr_mem_calibrate_vfifo to retry different dqs/ck relationships - scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs); - if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { - scc_mgr_set_dqs_en_delay(read_group, start_dqs_en); - } - scc_mgr_load_dqs(read_group); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - DPRINT(1, "vfifo_center: failed to find edge [%lu]: %ld %ld", i, - left_edge[i], right_edge[i]); - if (use_read_test) { - set_failing_group_stage(read_group * RW_MGR_MEM_DQ_PER_READ_DQS + i, - CAL_STAGE_VFIFO, CAL_SUBSTAGE_VFIFO_CENTER); - } else { - set_failing_group_stage(read_group * RW_MGR_MEM_DQ_PER_READ_DQS + i, - CAL_STAGE_VFIFO_AFTER_WRITES, - CAL_SUBSTAGE_VFIFO_CENTER); - } - return 0; - } - } - - //USER Find middle of window for each DQ bit - mid_min = left_edge[0] - right_edge[0]; - min_index = 0; - for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { - mid = left_edge[i] - right_edge[i]; - if (mid < mid_min) { - mid_min = mid; - min_index = i; - } - } - - //USER -mid_min/2 represents the amount that we need to move DQS. If mid_min is odd and positive we'll need to add one to - //USER make sure the rounding in further calculations is correct (always bias to the right), so just add 1 for all positive values - if (mid_min > 0) { - mid_min++; - } - mid_min = mid_min / 2; - - DPRINT(1, "vfifo_center: mid_min=%ld (index=%lu)", mid_min, min_index); - - //USER Determine the amount we can change DQS (which is -mid_min) - orig_mid_min = mid_min; - new_dqs = start_dqs - mid_min; - if (new_dqs > IO_DQS_IN_DELAY_MAX) { - new_dqs = IO_DQS_IN_DELAY_MAX; - } else if (new_dqs < 0) { - new_dqs = 0; - } - mid_min = start_dqs - new_dqs; - DPRINT(1, "vfifo_center: new mid_min=%ld new_dqs=%ld", mid_min, new_dqs); - - if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { - if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX) { - mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX; - } else if (start_dqs_en - mid_min < 0) { - mid_min += start_dqs_en - mid_min; - } - } - new_dqs = start_dqs - mid_min; - - DPRINT(1, "vfifo_center: start_dqs=%ld start_dqs_en=%ld new_dqs=%ld mid_min=%ld", - start_dqs, IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1, new_dqs, mid_min); - - //USER Initialize data for export structures - dqs_margin = IO_IO_IN_DELAY_MAX + 1; - dq_margin = IO_IO_IN_DELAY_MAX + 1; - - //USER add delay to bring centre of all DQ windows to the same "level" - for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { - //USER Use values before divide by 2 to reduce round off error - shift_dq = - (left_edge[i] - right_edge[i] - - (left_edge[min_index] - right_edge[min_index])) / 2 + (orig_mid_min - mid_min); - - DPRINT(2, "vfifo_center: before: shift_dq[%lu]=%ld", i, shift_dq); - - if (shift_dq + (int32_t) READ_SCC_DQ_IN_DELAY(p) > (int32_t) IO_IO_IN_DELAY_MAX) { - shift_dq = (int32_t) IO_IO_IN_DELAY_MAX - READ_SCC_DQ_IN_DELAY(i); - } else if (shift_dq + (int32_t) READ_SCC_DQ_IN_DELAY(p) < 0) { - shift_dq = -(int32_t) READ_SCC_DQ_IN_DELAY(p); - } - DPRINT(2, "vfifo_center: after: shift_dq[%lu]=%ld", i, shift_dq); - final_dq[i] = READ_SCC_DQ_IN_DELAY(p) + shift_dq; - scc_mgr_set_dq_in_delay(write_group, p, final_dq[i]); - scc_mgr_load_dq(p); - - DPRINT(2, "vfifo_center: margin[%lu]=[%ld,%ld]", i, - left_edge[i] - shift_dq + (-mid_min), right_edge[i] + shift_dq - (-mid_min)); - //USER To determine values for export structures - if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) { - dq_margin = left_edge[i] - shift_dq + (-mid_min); - } - if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) { - dqs_margin = right_edge[i] + shift_dq - (-mid_min); - } - } - - final_dqs = new_dqs; - if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { - final_dqs_en = start_dqs_en - mid_min; - } - //USER Move DQS-en - if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) { - scc_mgr_set_dqs_en_delay(read_group, final_dqs_en); - scc_mgr_load_dqs(read_group); - } - //USER Move DQS - scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs); - scc_mgr_load_dqs(read_group); - - if (update_fom) { - //USER Export values - gbl->fom_in += - (dq_margin + - dqs_margin) / (RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH); - } - - DPRINT(2, "vfifo_center: dq_margin=%ld dqs_margin=%ld", dq_margin, dqs_margin); - - //USER Do not remove this line as it makes sure all of our decisions have been applied - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - return (dq_margin >= 0) && (dqs_margin >= 0); -} - -#else - -static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn, uint32_t grp, - uint32_t test_bgn, uint32_t use_read_test) -{ - uint32_t i, p, d; - uint32_t mid; - t_btfld bit_chk; - uint32_t max_working_dq[RW_MGR_MEM_DQ_PER_READ_DQS]; - uint32_t dq_margin, dqs_margin; - uint32_t start_dqs; - - //USER per-bit deskew. - //USER start of the per-bit sweep with the minimum working delay setting for - //USER all bits. - - for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { - max_working_dq[i] = 0; - } - - for (d = 1; d <= IO_IO_IN_DELAY_MAX; d++) { - scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - if (!rw_mgr_mem_calibrate_read_test - (rank_bgn, grp, NUM_READ_PB_TESTS, PASS_ONE_BIT, &bit_chk, 0, 0)) { - break; - } else { - for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { - if (bit_chk & 1) { - max_working_dq[i] = d; - } - bit_chk = bit_chk >> 1; - } - } - } - - //USER determine minimum working value for DQ - - dq_margin = IO_IO_IN_DELAY_MAX; - - for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) { - if (max_working_dq[i] < dq_margin) { - dq_margin = max_working_dq[i]; - } - } - - //USER add delay to bring all DQ windows to the same "level" - - for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { - if (max_working_dq[i] > dq_margin) { - scc_mgr_set_dq_in_delay(write_group, i, max_working_dq[i] - dq_margin); - } else { - scc_mgr_set_dq_in_delay(write_group, i, 0); - } - - scc_mgr_load_dq(p, p); - } - - //USER sweep DQS window, may potentially have more window due to per-bit-deskew that was done - //USER in the previous step. - - start_dqs = READ_SCC_DQS_IN_DELAY(grp); - - for (d = start_dqs + 1; d <= IO_DQS_IN_DELAY_MAX; d++) { - scc_mgr_set_dqs_bus_in_delay(grp, d); - scc_mgr_load_dqs(grp); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - if (!rw_mgr_mem_calibrate_read_test - (rank_bgn, grp, NUM_READ_TESTS, PASS_ALL_BITS, &bit_chk, 0, 0)) { - break; - } - } - - scc_mgr_set_dqs_bus_in_delay(grp, start_dqs); - - //USER margin on the DQS pin - - dqs_margin = d - start_dqs - 1; - - //USER find mid point, +1 so that we don't go crazy pushing DQ - - mid = (dq_margin + dqs_margin + 1) / 2; - - gbl->fom_in += dq_margin + dqs_margin; -// TCLRPT_SET(debug_summary_report->fom_in, debug_summary_report->fom_in + (dq_margin + dqs_margin)); -// TCLRPT_SET(debug_cal_report->cal_status_per_group[grp].fom_in, (dq_margin + dqs_margin)); - - //USER center DQS ... if the headroom is setup properly we shouldn't need to - - if (dqs_margin > mid) { - scc_mgr_set_dqs_bus_in_delay(grp, READ_SCC_DQS_IN_DELAY(grp) + dqs_margin - mid); - - if (DDRX) { - uint32_t delay = READ_SCC_DQS_EN_DELAY(grp) + dqs_margin - mid; - - if (delay > IO_DQS_EN_DELAY_MAX) { - delay = IO_DQS_EN_DELAY_MAX; - } - - scc_mgr_set_dqs_en_delay(grp, delay); - } - } - - scc_mgr_load_dqs(grp); - - //USER center DQ - - if (dq_margin > mid) { - for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) { - scc_mgr_set_dq_in_delay(write_group, i, - READ_SCC_DQ_IN_DELAY(i) + dq_margin - mid); - scc_mgr_load_dq(p, p); - } - - dqs_margin += dq_margin - mid; - dq_margin -= dq_margin - mid; - } - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - return (dq_margin + dqs_margin) > 0; -} - -#endif - -//USER calibrate the read valid prediction FIFO. -//USER -//USER - read valid prediction will consist of finding a good DQS enable phase, DQS enable delay, DQS input phase, and DQS input delay. -//USER - we also do a per-bit deskew on the DQ lines. - -#if NEWVERSION_GW - -//USER VFIFO Calibration -- Full Calibration -static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group, uint32_t test_bgn) -{ - uint32_t p, d, rank_bgn, sr; - uint32_t dtaps_per_ptap; - uint32_t tmp_delay; - t_btfld bit_chk; - uint32_t grp_calibrated; - uint32_t write_group, write_test_bgn; - uint32_t failed_substage; - uint32_t dqs_in_dtaps, orig_start_dqs; - - //USER update info for sims - - reg_file_set_stage(CAL_STAGE_VFIFO); - - if (DDRX) { - write_group = read_group; - write_test_bgn = test_bgn; - } else { - write_group = - read_group / (RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH); - write_test_bgn = read_group * RW_MGR_MEM_DQ_PER_READ_DQS; - } - - // USER Determine number of delay taps for each phase tap - dtaps_per_ptap = 0; - tmp_delay = 0; - if (!QDRII) { - while (tmp_delay < IO_DELAY_PER_OPA_TAP) { - dtaps_per_ptap++; - tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP; - } - dtaps_per_ptap--; - tmp_delay = 0; - } - //USER update info for sims - - reg_file_set_group(read_group); - - grp_calibrated = 0; - - reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); - failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; - - for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) { - - if (DDRX || RLDRAMX) { - // In RLDRAMX we may be messing the delay of pins in the same write group but outside of - // the current read group, but that's ok because we haven't calibrated the output side yet. - if (d > 0) { - scc_mgr_apply_group_all_out_delay_add_all_ranks(write_group, - write_test_bgn, d); - } - } - - for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0; p++) { - //USER set a particular dqdqs phase - if (DDRX) { - scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p); - } - //USER Previous iteration may have failed as a result of ck/dqs or ck/dk violation, - //USER in which case the device may require special recovery. - if (DDRX || RLDRAMX) { - if (d != 0 || p != 0) { - recover_mem_device_after_ck_dqs_violation(); - } - } - - DPRINT(1, "calibrate_vfifo: g=%lu p=%lu d=%lu", read_group, p, d); - BFM_GBL_SET(gwrite_pos[read_group].p, p); - BFM_GBL_SET(gwrite_pos[read_group].d, d); - - //USER Load up the patterns used by read calibration using current DQDQS phase - - rw_mgr_mem_calibrate_read_load_patterns_all_ranks(); - - if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)) { - if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks - (read_group, 1, &bit_chk)) { - DPRINT(1, "Guaranteed read test failed: g=%lu p=%lu d=%lu", - read_group, p, d); - break; - } - } - // Loop over different DQS in delay chains for the purpose of DQS Enable calibration finding one bit working - orig_start_dqs = READ_SCC_DQS_IN_DELAY(read_group); - for (dqs_in_dtaps = orig_start_dqs; - dqs_in_dtaps <= IO_DQS_IN_DELAY_MAX && grp_calibrated == 0; - dqs_in_dtaps++) { - - for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; - rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { - - if (!param->skip_shadow_regs[sr]) { - - //USER Select shadow register set - select_shadow_regs_for_update(rank_bgn, read_group, - 1); - - WRITE_SCC_DQS_IN_DELAY(read_group, dqs_in_dtaps); - scc_mgr_load_dqs(read_group); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } - } - -// case:56390 - grp_calibrated = 1; - if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay - (write_group, read_group, test_bgn)) { - // USER Read per-bit deskew can be done on a per shadow register basis - for (rank_bgn = 0, sr = 0; - rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; - rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { - //USER Determine if this set of ranks should be skipped entirely - if (!param->skip_shadow_regs[sr]) { - - //USER Select shadow register set - select_shadow_regs_for_update(rank_bgn, - read_group, - 1); - - // Before doing read deskew, set DQS in back to the reserve value - WRITE_SCC_DQS_IN_DELAY(read_group, - orig_start_dqs); - scc_mgr_load_dqs(read_group); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - // If doing read after write calibration, do not update FOM now - do it then - if (!rw_mgr_mem_calibrate_vfifo_center - (rank_bgn, write_group, read_group, - test_bgn, 1, 0)) { - grp_calibrated = 0; - failed_substage = - CAL_SUBSTAGE_VFIFO_CENTER; - } - } - } - } else { - grp_calibrated = 0; - failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; - } - } - - } - } - - if (grp_calibrated == 0) { - set_failing_group_stage(write_group, CAL_STAGE_VFIFO, failed_substage); - - return 0; - } - //USER Reset the delay chains back to zero if they have moved > 1 (check for > 1 because loop will increase d even when pass in first case) - if (DDRX || RLDRAMII) { - if (d > 2) { - scc_mgr_zero_group(write_group, write_test_bgn, 1); - } - } - - return 1; -} - -#else - -//USER VFIFO Calibration -- Full Calibration -static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t g, uint32_t test_bgn) -{ - uint32_t p, rank_bgn, sr; - uint32_t grp_calibrated; - uint32_t failed_substage; - - //USER update info for sims - - reg_file_set_stage(CAL_STAGE_VFIFO); - - reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); - - failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; - - //USER update info for sims - - reg_file_set_group(g); - - grp_calibrated = 0; - - for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0; p++) { - //USER set a particular dqdqs phase - if (DDRX) { - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); - } - //USER Load up the patterns used by read calibration using current DQDQS phase - - rw_mgr_mem_calibrate_read_load_patterns_all_ranks(); - if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)) { - if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks - (read_group, 1, &bit_chk)) { - break; - } - } - - grp_calibrated = 1; - if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay(g, g, test_bgn)) { - // USER Read per-bit deskew can be done on a per shadow register basis - for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; - rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { - - //USER Determine if this set of ranks should be skipped entirely - if (!param->skip_shadow_regs[sr]) { - - //USER Select shadow register set - select_shadow_regs_for_update(rank_bgn, read_group, 1); - - if (!rw_mgr_mem_calibrate_vfifo_center - (rank_bgn, g, test_bgn, 1)) { - grp_calibrated = 0; - failed_substage = CAL_SUBSTAGE_VFIFO_CENTER; - } - } - } - } else { - grp_calibrated = 0; - failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; - } - } - - if (grp_calibrated == 0) { - set_failing_group_stage(g, CAL_STAGE_VFIFO, failed_substage); - return 0; - } - - return 1; -} - -#endif - -//USER VFIFO Calibration -- Read Deskew Calibration after write deskew -static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group, uint32_t test_bgn) -{ - uint32_t rank_bgn, sr; - uint32_t grp_calibrated; - uint32_t write_group; - - //USER update info for sims - - reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); - reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); - - if (DDRX) { - write_group = read_group; - } else { - write_group = - read_group / (RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH); - } - - //USER update info for sims - reg_file_set_group(read_group); - - grp_calibrated = 1; - // USER Read per-bit deskew can be done on a per shadow register basis - for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; - rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { - - //USER Determine if this set of ranks should be skipped entirely - if (!param->skip_shadow_regs[sr]) { - - //USER Select shadow register set - select_shadow_regs_for_update(rank_bgn, read_group, 1); - - // This is the last calibration round, update FOM here - if (!rw_mgr_mem_calibrate_vfifo_center - (rank_bgn, write_group, read_group, test_bgn, 0, 1)) { - grp_calibrated = 0; - } - } - } - - if (grp_calibrated == 0) { - set_failing_group_stage(write_group, CAL_STAGE_VFIFO_AFTER_WRITES, - CAL_SUBSTAGE_VFIFO_CENTER); - return 0; - } - - return 1; -} - -//USER Calibrate LFIFO to find smallest read latency - -static uint32_t rw_mgr_mem_calibrate_lfifo(void) -{ - uint32_t found_one; - t_btfld bit_chk; - - //USER update info for sims - - reg_file_set_stage(CAL_STAGE_LFIFO); - reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); - - //USER Load up the patterns used by read calibration for all ranks - - rw_mgr_mem_calibrate_read_load_patterns_all_ranks(); - - found_one = 0; - - do { - IOWR_32DIRECT(PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat); - DPRINT(2, "lfifo: read_lat=%lu", gbl->curr_read_lat); - - if (!rw_mgr_mem_calibrate_read_test_all_ranks - (0, NUM_READ_TESTS, PASS_ALL_BITS, &bit_chk, 1)) { - break; - } - - found_one = 1; - - //USER reduce read latency and see if things are working - //USER correctly - - gbl->curr_read_lat--; - } while (gbl->curr_read_lat > 0); - - //USER reset the fifos to get pointers to known state - - IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); - - if (found_one) { - //USER add a fudge factor to the read latency that was determined - gbl->curr_read_lat += 2; - IOWR_32DIRECT(PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat); - - DPRINT(2, "lfifo: success: using read_lat=%lu", gbl->curr_read_lat); - - return 1; - } else { - set_failing_group_stage(0xff, CAL_STAGE_LFIFO, CAL_SUBSTAGE_READ_LATENCY); - - DPRINT(2, "lfifo: failed at initial read_lat=%lu", gbl->curr_read_lat); - - return 0; - } -} - -//USER issue write test command. -//USER two variants are provided. one that just tests a write pattern and another that -//USER tests datamask functionality. - -static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group, uint32_t test_dm) -{ - uint32_t mcc_instruction; - uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) - && ENABLE_SUPER_QUICK_CALIBRATION) || BFM_MODE; - uint32_t rw_wl_nop_cycles; - - //USER Set counter and jump addresses for the right - //USER number of NOP cycles. - //USER The number of supported NOP cycles can range from -1 to infinity - //USER Three different cases are handled: - //USER - //USER 1. For a number of NOP cycles greater than 0, the RW Mgr looping - //USER mechanism will be used to insert the right number of NOPs - //USER - //USER 2. For a number of NOP cycles equals to 0, the micro-instruction - //USER issuing the write command will jump straight to the micro-instruction - //USER that turns on DQS (for DDRx), or outputs write data (for RLD), skipping - //USER the NOP micro-instruction all together - //USER - //USER 3. A number of NOP cycles equal to -1 indicates that DQS must be turned - //USER on in the same micro-instruction that issues the write command. Then we need - //USER to directly jump to the micro-instruction that sends out the data - //USER - //USER NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters (2 and 3). One - //USER jump-counter (0) is used to perform multiple write-read operations. - //USER one counter left to issue this command in "multiple-group" mode. - - rw_wl_nop_cycles = gbl->rw_wl_nop_cycles; - - if (rw_wl_nop_cycles == -1) { - //USER CNTR 2 - We want to execute the special write operation that - //USER turns on DQS right away and then skip directly to the instruction that - //USER sends out the data. We set the counter to a large number so that the - //USER jump is always taken - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0xFF); - - //USER CNTR 3 - Not used - if (test_dm) { - mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1; - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, - __RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP); - } else { - mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0_WL_1; - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_LFSR_WR_RD_BANK_0_DATA); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_LFSR_WR_RD_BANK_0_NOP); - } - - } else if (rw_wl_nop_cycles == 0) { - //USER CNTR 2 - We want to skip the NOP operation and go straight to - //USER the DQS enable instruction. We set the counter to a large number so that the - //USER jump is always taken - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0xFF); - - //USER CNTR 3 - Not used - if (test_dm) { - mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0; - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS); - } else { - mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0; - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, __RW_MGR_LFSR_WR_RD_BANK_0_DQS); - } - - } else { - //USER CNTR 2 - In this case we want to execute the next instruction and NOT - //USER take the jump. So we set the counter to 0. The jump address doesn't count - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_2, 0, 0x0); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_2, 0, 0x0); - - //USER CNTR 3 - Set the nop counter to the number of cycles we need to loop for, minus 1 - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_3, 0, rw_wl_nop_cycles - 1); - if (test_dm) { - mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0; - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP); - } else { - mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0; - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_3, 0, __RW_MGR_LFSR_WR_RD_BANK_0_NOP); - } - } - - IOWR_32DIRECT(RW_MGR_RESET_READ_DATAPATH, 0, 0); - - if (quick_write_mode) { - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x08); - } else { - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x40); - } - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, mcc_instruction); - - //USER CNTR 1 - This is used to ensure enough time elapses for read data to come back. - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x30); - - if (test_dm) { - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT); - } else { - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_LFSR_WR_RD_BANK_0_WAIT); - } - - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, (group << 2), mcc_instruction); - -} - -//USER Test writes, can check for a single bit pass or multiple bit pass - -static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn, uint32_t write_group, - uint32_t use_dm, uint32_t all_correct, - t_btfld * bit_chk, uint32_t all_ranks) -{ - uint32_t r; - t_btfld correct_mask_vg; - t_btfld tmp_bit_chk; - uint32_t vg; - uint32_t rank_end = - all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); - - *bit_chk = param->write_correct_mask; - correct_mask_vg = param->write_correct_mask_vg; - - for (r = rank_bgn; r < rank_end; r++) { - if (param->skip_ranks[r]) { - //USER request to skip the rank - - continue; - } - //USER set rank - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); - - tmp_bit_chk = 0; - for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS - 1;; vg--) { - - //USER reset the fifos to get pointers to known state - IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); - - tmp_bit_chk = - tmp_bit_chk << (RW_MGR_MEM_DQ_PER_WRITE_DQS / - RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS); - rw_mgr_mem_calibrate_write_test_issue(write_group * - RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS - + vg, use_dm); - - tmp_bit_chk = - tmp_bit_chk | (correct_mask_vg & ~(IORD_32DIRECT(BASE_RW_MGR, 0))); - DPRINT(2, - "write_test(%lu,%lu,%lu) :[%lu,%lu] " BTFLD_FMT " & ~%x => " - BTFLD_FMT " => " BTFLD_FMT, write_group, use_dm, all_correct, r, vg, - correct_mask_vg, IORD_32DIRECT(BASE_RW_MGR, 0), - correct_mask_vg & ~IORD_32DIRECT(BASE_RW_MGR, 0), tmp_bit_chk); - - if (vg == 0) { - break; - } - } - *bit_chk &= tmp_bit_chk; - } - - if (all_correct) { - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); - DPRINT(2, "write_test(%lu,%lu,ALL) : " BTFLD_FMT " == " BTFLD_FMT " => %lu", - write_group, use_dm, *bit_chk, param->write_correct_mask, - (long unsigned int)(*bit_chk == param->write_correct_mask)); - return (*bit_chk == param->write_correct_mask); - } else { - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); - DPRINT(2, "write_test(%lu,%lu,ONE) : " BTFLD_FMT " != " BTFLD_FMT " => %lu", - write_group, use_dm, *bit_chk, (long unsigned int)0, - (long unsigned int)(*bit_chk != 0)); - return (*bit_chk != 0x00); - } -} - -static inline uint32_t rw_mgr_mem_calibrate_write_test_all_ranks(uint32_t write_group, - uint32_t use_dm, - uint32_t all_correct, - t_btfld * bit_chk) -{ - return rw_mgr_mem_calibrate_write_test(0, write_group, use_dm, all_correct, bit_chk, 1); -} - -//USER level the write operations - -#if NEWVERSION_WL - -//USER Write Levelling -- Full Calibration -static uint32_t rw_mgr_mem_calibrate_wlevel(uint32_t g, uint32_t test_bgn) -{ - uint32_t p, d; - - uint32_t num_additional_fr_cycles = 0; - - t_btfld bit_chk; - uint32_t work_bgn, work_end, work_mid; - uint32_t tmp_delay; - uint32_t found_begin; - uint32_t dtaps_per_ptap; - - //USER update info for sims - - reg_file_set_stage(CAL_STAGE_WLEVEL); - reg_file_set_sub_stage(CAL_SUBSTAGE_WORKING_DELAY); - - //USER maximum phases for the sweep - - dtaps_per_ptap = IORD_32DIRECT(REG_FILE_DTAPS_PER_PTAP, 0); - - //USER starting phases - - //USER update info for sims - - reg_file_set_group(g); - - //USER starting and end range where writes work - - scc_mgr_spread_out2_delay_all_ranks(g, test_bgn); - - work_bgn = 0; - work_end = 0; - - //USER step 1: find first working phase, increment in ptaps, and then in dtaps if ptaps doesn't find a working phase - found_begin = 0; - tmp_delay = 0; - for (d = 0; d <= dtaps_per_ptap; d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) { - scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); - - work_bgn = tmp_delay; - - for (p = 0; - p <= IO_DQDQS_OUT_PHASE_MAX + num_additional_fr_cycles * IO_DLL_CHAIN_LENGTH; - p++, work_bgn += IO_DELAY_PER_OPA_TAP) { - DPRINT(2, "wlevel: begin-1: p=%lu d=%lu", p, d); - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); - - if (rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { - found_begin = 1; - break; - } - } - - if (found_begin) { - break; - } - } - - if (p > IO_DQDQS_OUT_PHASE_MAX + num_additional_fr_cycles * IO_DLL_CHAIN_LENGTH) { - //USER fail, cannot find first working phase - - set_failing_group_stage(g, CAL_STAGE_WLEVEL, CAL_SUBSTAGE_WORKING_DELAY); - - return 0; - } - - DPRINT(2, "wlevel: first valid p=%lu d=%lu", p, d); - - reg_file_set_sub_stage(CAL_SUBSTAGE_LAST_WORKING_DELAY); - - //USER If d is 0 then the working window covers a phase tap and we can follow the old procedure - //USER otherwise, we've found the beginning, and we need to increment the dtaps until we find the end - if (d == 0) { - COV(WLEVEL_PHASE_PTAP_OVERLAP); - work_end = work_bgn + IO_DELAY_PER_OPA_TAP; - - //USER step 2: if we have room, back off by one and increment in dtaps - - if (p > 0) { - int found = 0; - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p - 1); - - tmp_delay = work_bgn - IO_DELAY_PER_OPA_TAP; - - for (d = 0; d <= IO_IO_OUT1_DELAY_MAX && tmp_delay < work_bgn; - d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) { - DPRINT(2, "wlevel: begin-2: p=%lu d=%lu", (p - 1), d); - scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); - - if (rw_mgr_mem_calibrate_write_test_all_ranks - (g, 0, PASS_ONE_BIT, &bit_chk)) { - found = 1; - work_bgn = tmp_delay; - break; - } - } - - { - uint32_t d2; - uint32_t p2; - if (found) { - d2 = d; - p2 = p - 1; - } else { - d2 = 0; - p2 = p; - } - - DPRINT(2, "wlevel: found begin-A: p=%lu d=%lu ps=%lu", p2, d2, - work_bgn); - - BFM_GBL_SET(dqs_wlevel_left_edge[g].p, p2); - BFM_GBL_SET(dqs_wlevel_left_edge[g].d, d2); - BFM_GBL_SET(dqs_wlevel_left_edge[g].ps, work_bgn); - } - - scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, 0); - } else { - DPRINT(2, "wlevel: found begin-B: p=%lu d=%lu ps=%lu", p, d, work_bgn); - - BFM_GBL_SET(dqs_wlevel_left_edge[g].p, p); - BFM_GBL_SET(dqs_wlevel_left_edge[g].d, d); - BFM_GBL_SET(dqs_wlevel_left_edge[g].ps, work_bgn); - } - - //USER step 3: go forward from working phase to non working phase, increment in ptaps - - for (p = p + 1; - p <= IO_DQDQS_OUT_PHASE_MAX + num_additional_fr_cycles * IO_DLL_CHAIN_LENGTH; - p++, work_end += IO_DELAY_PER_OPA_TAP) { - DPRINT(2, "wlevel: end-0: p=%lu d=%lu", p, (long unsigned int)0); - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); - - if (!rw_mgr_mem_calibrate_write_test_all_ranks - (g, 0, PASS_ONE_BIT, &bit_chk)) { - break; - } - } - - //USER step 4: back off one from last, increment in dtaps - //USER The actual increment is done outside the if/else statement since it is shared with other code - - p = p - 1; - - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); - - work_end -= IO_DELAY_PER_OPA_TAP; - d = 0; - - } else { - //USER step 5: Window doesn't cover phase tap, just increment dtaps until failure - //USER The actual increment is done outside the if/else statement since it is shared with other code - COV(WLEVEL_PHASE_PTAP_NO_OVERLAP); - work_end = work_bgn; - DPRINT(2, "wlevel: found begin-C: p=%lu d=%lu ps=%lu", p, d, work_bgn); - BFM_GBL_SET(dqs_wlevel_left_edge[g].p, p); - BFM_GBL_SET(dqs_wlevel_left_edge[g].d, d); - BFM_GBL_SET(dqs_wlevel_left_edge[g].ps, work_bgn); - - } - - //USER The actual increment until failure - for (; d <= IO_IO_OUT1_DELAY_MAX; d++, work_end += IO_DELAY_PER_DCHAIN_TAP) { - DPRINT(2, "wlevel: end: p=%lu d=%lu", p, d); - scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); - - if (!rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { - break; - } - } - scc_mgr_zero_group(g, test_bgn, 1); - - work_end -= IO_DELAY_PER_DCHAIN_TAP; - - if (work_end >= work_bgn) { - //USER we have a working range - } else { - //USER nil range - - set_failing_group_stage(g, CAL_STAGE_WLEVEL, CAL_SUBSTAGE_LAST_WORKING_DELAY); - - return 0; - } - - DPRINT(2, "wlevel: found end: p=%lu d=%lu; range: [%lu,%lu]", p, d - 1, work_bgn, work_end); - BFM_GBL_SET(dqs_wlevel_right_edge[g].p, p); - BFM_GBL_SET(dqs_wlevel_right_edge[g].d, d - 1); - BFM_GBL_SET(dqs_wlevel_right_edge[g].ps, work_end); - - //USER center - - work_mid = (work_bgn + work_end) / 2; - - DPRINT(2, "wlevel: work_mid=%ld", work_mid); - - tmp_delay = 0; - - for (p = 0; - p <= IO_DQDQS_OUT_PHASE_MAX + num_additional_fr_cycles * IO_DLL_CHAIN_LENGTH - && tmp_delay < work_mid; p++, tmp_delay += IO_DELAY_PER_OPA_TAP) ; - - if (tmp_delay > work_mid) { - tmp_delay -= IO_DELAY_PER_OPA_TAP; - p--; - } - - while (p > IO_DQDQS_OUT_PHASE_MAX) { - tmp_delay -= IO_DELAY_PER_OPA_TAP; - p--; - } - - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); - - DPRINT(2, "wlevel: p=%lu tmp_delay=%lu left=%lu", p, tmp_delay, work_mid - tmp_delay); - - for (d = 0; d <= IO_IO_OUT1_DELAY_MAX && tmp_delay < work_mid; - d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) ; - - if (tmp_delay > work_mid) { - tmp_delay -= IO_DELAY_PER_DCHAIN_TAP; - d--; - } - - DPRINT(2, "wlevel: p=%lu d=%lu tmp_delay=%lu left=%lu", p, d, tmp_delay, - work_mid - tmp_delay); - - scc_mgr_apply_group_all_out_delay_add_all_ranks(g, test_bgn, d); - - DPRINT(2, "wlevel: found middle: p=%lu d=%lu", p, d); - BFM_GBL_SET(dqs_wlevel_mid[g].p, p); - BFM_GBL_SET(dqs_wlevel_mid[g].d, d); - BFM_GBL_SET(dqs_wlevel_mid[g].ps, work_mid); - - return 1; -} - -#else - -//USER Write Levelling -- Full Calibration -static uint32_t rw_mgr_mem_calibrate_wlevel(uint32_t g, uint32_t test_bgn) -{ - uint32_t p, d; - t_btfld bit_chk; - uint32_t work_bgn, work_end, work_mid; - uint32_t tmp_delay; - - //USER update info for sims - - reg_file_set_stage(CAL_STAGE_WLEVEL); - reg_file_set_sub_stage(CAL_SUBSTAGE_WORKING_DELAY); - - //USER maximum phases for the sweep - - //USER starting phases - - //USER update info for sims - - reg_file_set_group(g); - - //USER starting and end range where writes work - - work_bgn = 0; - work_end = 0; - - //USER step 1: find first working phase, increment in ptaps - - for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++, work_bgn += IO_DELAY_PER_OPA_TAP) { - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); - - if (rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { - break; - } - } - - if (p > IO_DQDQS_OUT_PHASE_MAX) { - //USER fail, cannot find first working phase - - set_failing_group_stage(g, CAL_STAGE_WLEVEL, CAL_SUBSTAGE_WORKING_DELAY); - - return 0; - } - - work_end = work_bgn + IO_DELAY_PER_OPA_TAP; - - reg_file_set_sub_stage(CAL_SUBSTAGE_LAST_WORKING_DELAY); - - //USER step 2: if we have room, back off by one and increment in dtaps - - if (p > 0) { - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p - 1); - - tmp_delay = work_bgn - IO_DELAY_PER_OPA_TAP; - - for (d = 0; d <= IO_IO_OUT1_DELAY_MAX && tmp_delay < work_bgn; - d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) { - scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); - - if (rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { - work_bgn = tmp_delay; - break; - } - } - - scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, 0); - } - //USER step 3: go forward from working phase to non working phase, increment in ptaps - - for (p = p + 1; p <= IO_DQDQS_OUT_PHASE_MAX; p++, work_end += IO_DELAY_PER_OPA_TAP) { - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p); - - if (!rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { - break; - } - } - - //USER step 4: back off one from last, increment in dtaps - - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p - 1); - - work_end -= IO_DELAY_PER_OPA_TAP; - - for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++, work_end += IO_DELAY_PER_DCHAIN_TAP) { - scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, d); - - if (!rw_mgr_mem_calibrate_write_test_all_ranks(g, 0, PASS_ONE_BIT, &bit_chk)) { - break; - } - } - - scc_mgr_apply_group_all_out_delay_all_ranks(g, test_bgn, 0); - - if (work_end > work_bgn) { - //USER we have a working range - } else { - //USER nil range - - set_failing_group_stage(g, CAL_STAGE_WLEVEL, CAL_SUBSTAGE_LAST_WORKING_DELAY); - - return 0; - } - - //USER center - - work_mid = (work_bgn + work_end) / 2; - - tmp_delay = 0; - - for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && tmp_delay < work_mid; - p++, tmp_delay += IO_DELAY_PER_OPA_TAP) ; - - tmp_delay -= IO_DELAY_PER_OPA_TAP; - - scc_mgr_set_dqdqs_output_phase_all_ranks(g, p - 1); - - for (d = 0; d <= IO_IO_OUT1_DELAY_MAX && tmp_delay < work_mid; - d++, tmp_delay += IO_DELAY_PER_DCHAIN_TAP) ; - - scc_mgr_apply_group_all_out_delay_add_all_ranks(g, test_bgn, d - 1); - - return 1; -} - -#endif - -//USER center all windows. do per-bit-deskew to possibly increase size of certain windows - -#if NEWVERSION_WRDESKEW - -static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn, uint32_t write_group, - uint32_t test_bgn) -{ - uint32_t i, p, min_index; - int32_t d; - //USER Store these as signed since there are comparisons with signed numbers - t_btfld bit_chk; - t_btfld sticky_bit_chk; - int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; - int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS]; - int32_t mid; - int32_t mid_min, orig_mid_min; - int32_t new_dqs, start_dqs, shift_dq; - int32_t dq_margin, dqs_margin, dm_margin; - uint32_t stop; - int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; - int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1; - int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1; - int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1; - int32_t win_best = 0; - - dm_margin = 0; - - start_dqs = READ_SCC_DQS_IO_OUT1_DELAY(); - - select_curr_shadow_reg_using_rank(rank_bgn); - - //USER per-bit deskew - - //USER set the left and right edge of each bit to an illegal value - //USER use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value - sticky_bit_chk = 0; - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; - right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; - } - - //USER Search for the left edge of the window for each bit - for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) { - scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - //USER Stop searching when the read test doesn't pass AND when we've seen a passing read on every bit - stop = - !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, PASS_ONE_BIT, - &bit_chk, 0); - sticky_bit_chk = sticky_bit_chk | bit_chk; - stop = stop && (sticky_bit_chk == param->write_correct_mask); - DPRINT(2, - "write_center(left): dtap=%lu => " BTFLD_FMT " == " BTFLD_FMT - " && %lu [bit_chk=" BTFLD_FMT "]", d, sticky_bit_chk, - param->write_correct_mask, stop, bit_chk); - - if (stop == 1) { - break; - } else { - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - if (bit_chk & 1) { - //USER Remember a passing test as the left_edge - left_edge[i] = d; - } else { - //USER If a left edge has not been seen yet, then a future passing test will mark this edge as the right edge - if (left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) { - right_edge[i] = -(d + 1); - } - } - DPRINT(2, - "write_center[l,d=%lu): bit_chk_test=%d left_edge[%lu]: %ld right_edge[%lu]: %ld", - d, (int)(bit_chk & 1), i, left_edge[i], i, right_edge[i]); - bit_chk = bit_chk >> 1; - } - } - } - - //USER Reset DQ delay chains to 0 - scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0); - sticky_bit_chk = 0; - for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) { - - DPRINT(2, "write_center: left_edge[%lu]: %ld right_edge[%lu]: %ld", i, left_edge[i], - i, right_edge[i]); - - //USER Check for cases where we haven't found the left edge, which makes our assignment of the the - //USER right edge invalid. Reset it to the illegal value. - if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) - && (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) { - right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1; - DPRINT(2, "write_center: reset right_edge[%lu]: %ld", i, right_edge[i]); - } - //USER Reset sticky bit (except for bits where we have seen the left edge) - sticky_bit_chk = sticky_bit_chk << 1; - if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) { - sticky_bit_chk = sticky_bit_chk | 1; - } - - if (i == 0) { - break; - } - } - - //USER Search for the right edge of the window for each bit - for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) { - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d + start_dqs); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - if (QDRII) { - rw_mgr_mem_dll_lock_wait(); - } - //USER Stop searching when the read test doesn't pass AND when we've seen a passing read on every bit - stop = - !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, PASS_ONE_BIT, - &bit_chk, 0); - if (stop) { - recover_mem_device_after_ck_dqs_violation(); - } - sticky_bit_chk = sticky_bit_chk | bit_chk; - stop = stop && (sticky_bit_chk == param->write_correct_mask); - - DPRINT(2, "write_center (right): dtap=%lu => " BTFLD_FMT " == " BTFLD_FMT " && %lu", - d, sticky_bit_chk, param->write_correct_mask, stop); - - if (stop == 1) { - if (d == 0) { - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - //USER d = 0 failed, but it passed when testing the left edge, so it must be marginal, set it to -1 - if (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1 - && left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1) { - right_edge[i] = -1; - } - } - } - break; - } else { - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - if (bit_chk & 1) { - //USER Remember a passing test as the right_edge - right_edge[i] = d; - } else { - if (d != 0) { - //USER If a right edge has not been seen yet, then a future passing test will mark this edge as the left edge - if (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) { - left_edge[i] = -(d + 1); - } - } else { - //USER d = 0 failed, but it passed when testing the left edge, so it must be marginal, set it to -1 - if (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1 - && left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1) { - right_edge[i] = -1; - } - //USER If a right edge has not been seen yet, then a future passing test will mark this edge as the left edge - else if (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) { - left_edge[i] = -(d + 1); - } - } - } - DPRINT(2, - "write_center[r,d=%lu): bit_chk_test=%d left_edge[%lu]: %ld right_edge[%lu]: %ld", - d, (int)(bit_chk & 1), i, left_edge[i], i, right_edge[i]); - bit_chk = bit_chk >> 1; - } - } - } - - //USER Check that all bits have a window - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - DPRINT(2, "write_center: left_edge[%lu]: %ld right_edge[%lu]: %ld", i, left_edge[i], - i, right_edge[i]); - BFM_GBL_SET(dq_write_left_edge[write_group][i], left_edge[i]); - BFM_GBL_SET(dq_write_right_edge[write_group][i], right_edge[i]); - if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) - || (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) { - set_failing_group_stage(test_bgn + i, CAL_STAGE_WRITES, - CAL_SUBSTAGE_WRITES_CENTER); - return 0; - } - } - - //USER Find middle of window for each DQ bit - mid_min = left_edge[0] - right_edge[0]; - min_index = 0; - for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - mid = left_edge[i] - right_edge[i]; - if (mid < mid_min) { - mid_min = mid; - min_index = i; - } - } - - //USER -mid_min/2 represents the amount that we need to move DQS. If mid_min is odd and positive we'll need to add one to - //USER make sure the rounding in further calculations is correct (always bias to the right), so just add 1 for all positive values - if (mid_min > 0) { - mid_min++; - } - mid_min = mid_min / 2; - - DPRINT(1, "write_center: mid_min=%ld", mid_min); - - //USER Determine the amount we can change DQS (which is -mid_min) - orig_mid_min = mid_min; - new_dqs = start_dqs; - mid_min = 0; - - DPRINT(1, "write_center: start_dqs=%ld new_dqs=%ld mid_min=%ld", start_dqs, new_dqs, - mid_min); - - //USER Initialize data for export structures - dqs_margin = IO_IO_OUT1_DELAY_MAX + 1; - dq_margin = IO_IO_OUT1_DELAY_MAX + 1; - - //USER add delay to bring centre of all DQ windows to the same "level" - for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { - //USER Use values before divide by 2 to reduce round off error - shift_dq = - (left_edge[i] - right_edge[i] - - (left_edge[min_index] - right_edge[min_index])) / 2 + (orig_mid_min - mid_min); - - DPRINT(2, "write_center: before: shift_dq[%lu]=%ld", i, shift_dq); - - if (shift_dq + (int32_t) READ_SCC_DQ_OUT1_DELAY(i) > (int32_t) IO_IO_OUT1_DELAY_MAX) { - shift_dq = (int32_t) IO_IO_OUT1_DELAY_MAX - READ_SCC_DQ_OUT1_DELAY(i); - } else if (shift_dq + (int32_t) READ_SCC_DQ_OUT1_DELAY(i) < 0) { - shift_dq = -(int32_t) READ_SCC_DQ_OUT1_DELAY(i); - } - DPRINT(2, "write_center: after: shift_dq[%lu]=%ld", i, shift_dq); - scc_mgr_set_dq_out1_delay(write_group, i, READ_SCC_DQ_OUT1_DELAY(i) + shift_dq); - scc_mgr_load_dq(i); - - DPRINT(2, "write_center: margin[%lu]=[%ld,%ld]", i, - left_edge[i] - shift_dq + (-mid_min), right_edge[i] + shift_dq - (-mid_min)); - //USER To determine values for export structures - if (left_edge[i] - shift_dq + (-mid_min) < dq_margin) { - dq_margin = left_edge[i] - shift_dq + (-mid_min); - } - if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin) { - dqs_margin = right_edge[i] + shift_dq - (-mid_min); - } - } - - //USER Move DQS - if (QDRII) { - scc_mgr_set_group_dqs_io_and_oct_out1_gradual(write_group, new_dqs); - } else { - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } - - DPRINT(2, "write_center: DM"); - - //USER set the left and right edge of each bit to an illegal value - //USER use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value - left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; - right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1; - - //USER Search for the/part of the window with DM shift - for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) { - scc_mgr_apply_group_dm_out1_delay(write_group, d); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - if (rw_mgr_mem_calibrate_write_test - (rank_bgn, write_group, 1, PASS_ALL_BITS, &bit_chk, 0)) { - - //USE Set current end of the window - end_curr = -d; - //USER If a starting edge of our window has not been seen this is our current start of the DM window - if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) { - bgn_curr = -d; - } - //USER If current window is bigger than best seen. Set best seen to be current window - if ((end_curr - bgn_curr + 1) > win_best) { - win_best = end_curr - bgn_curr + 1; - bgn_best = bgn_curr; - end_best = end_curr; - } - } else { - //USER We just saw a failing test. Reset temp edge - bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; - end_curr = IO_IO_OUT1_DELAY_MAX + 1; - } - - } - - //USER Reset DM delay chains to 0 - scc_mgr_apply_group_dm_out1_delay(write_group, 0); - - //USER Check to see if the current window nudges up aganist 0 delay. If so we need to continue the search by shifting DQS otherwise DQS search begins as a new search - if (end_curr != 0) { - bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; - end_curr = IO_IO_OUT1_DELAY_MAX + 1; - } - //USER Search for the/part of the window with DQS shifts - for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) { - // Note: This only shifts DQS, so are we limiting ourselve to - // width of DQ unnecessarily - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d + new_dqs); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - if (rw_mgr_mem_calibrate_write_test - (rank_bgn, write_group, 1, PASS_ALL_BITS, &bit_chk, 0)) { - - //USE Set current end of the window - end_curr = d; - //USER If a beginning edge of our window has not been seen this is our current begin of the DM window - if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1) { - bgn_curr = d; - } - //USER If current window is bigger than best seen. Set best seen to be current window - if ((end_curr - bgn_curr + 1) > win_best) { - win_best = end_curr - bgn_curr + 1; - bgn_best = bgn_curr; - end_best = end_curr; - } - } else { - //USER We just saw a failing test. Reset temp edge - recover_mem_device_after_ck_dqs_violation(); - bgn_curr = IO_IO_OUT1_DELAY_MAX + 1; - end_curr = IO_IO_OUT1_DELAY_MAX + 1; - - //USER Early exit optimization: if ther remaining delay chain space is less than already seen largest window we can exit - if ((win_best - 1) > (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) { - break; - } - - } - } - - //USER assign left and right edge for cal and reporting; - left_edge[0] = -1 * bgn_best; - right_edge[0] = end_best; - - DPRINT(2, "dm_calib: left=%ld right=%ld", left_edge[0], right_edge[0]); - BFM_GBL_SET(dm_left_edge[write_group][0], left_edge[0]); - BFM_GBL_SET(dm_right_edge[write_group][0], right_edge[0]); - - //USER Move DQS (back to orig) - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); - - //USER Move DM - - //USER Find middle of window for the DM bit - mid = (left_edge[0] - right_edge[0]) / 2; - - //USER only move right, since we are not moving DQS/DQ - if (mid < 0) { - mid = 0; - } - //dm_marign should fail if we never find a window - if (win_best == 0) { - dm_margin = -1; - } else { - dm_margin = left_edge[0] - mid; - } - - scc_mgr_apply_group_dm_out1_delay(write_group, mid); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - DPRINT(2, "dm_calib: left=%ld right=%ld mid=%ld dm_margin=%ld", - left_edge[0], right_edge[0], mid, dm_margin); - - //USER Export values - gbl->fom_out += dq_margin + dqs_margin; - - DPRINT(2, "write_center: dq_margin=%ld dqs_margin=%ld dm_margin=%ld", dq_margin, dqs_margin, - dm_margin); - - //USER Do not remove this line as it makes sure all of our decisions have been applied - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0); -} - -#else // !NEWVERSION_WRDESKEW - -static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn, uint32_t write_group, - uint32_t test_bgn) -{ - uint32_t i, p, d; - uint32_t mid; - t_btfld bit_chk, sticky_bit_chk; - uint32_t max_working_dq[RW_MGR_MEM_DQ_PER_WRITE_DQS]; - uint32_t max_working_dm[RW_MGR_MEM_DATA_MASK_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH]; - uint32_t dq_margin, dqs_margin, dm_margin; - uint32_t start_dqs; - uint32_t stop; - - //USER per-bit deskew - - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - max_working_dq[i] = 0; - } - - for (d = 1; d <= IO_IO_OUT1_DELAY_MAX; d++) { - scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - if (!rw_mgr_mem_calibrate_write_test - (rank_bgn, write_group, 0, PASS_ONE_BIT, &bit_chk, 0)) { - break; - } else { - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - if (bit_chk & 1) { - max_working_dq[i] = d; - } - bit_chk = bit_chk >> 1; - } - } - } - - scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0); - - //USER determine minimum of maximums - - dq_margin = IO_IO_OUT1_DELAY_MAX; - - for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) { - if (max_working_dq[i] < dq_margin) { - dq_margin = max_working_dq[i]; - } - } - - //USER add delay to center DQ windows - - for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { - if (max_working_dq[i] > dq_margin) { - scc_mgr_set_dq_out1_delay(write_group, i, max_working_dq[i] - dq_margin); - } else { - scc_mgr_set_dq_out1_delay(write_group, i, 0); - } - - scc_mgr_load_dq(p, i); - } - - //USER sweep DQS window, may potentially have more window due to per-bit-deskew - - start_dqs = READ_SCC_DQS_IO_OUT1_DELAY(); - - for (d = start_dqs + 1; d <= IO_IO_OUT1_DELAY_MAX; d++) { - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, d); - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - if (QDRII) { - rw_mgr_mem_dll_lock_wait(); - } - - if (!rw_mgr_mem_calibrate_write_test - (rank_bgn, write_group, 0, PASS_ALL_BITS, &bit_chk, 0)) { - break; - } - } - - scc_mgr_set_dqs_out1_delay(write_group, start_dqs); - scc_mgr_set_oct_out1_delay(write_group, start_dqs); - - dqs_margin = d - start_dqs - 1; - - //USER time to center, +1 so that we don't go crazy centering DQ - - mid = (dq_margin + dqs_margin + 1) / 2; - - gbl->fom_out += dq_margin + dqs_margin; - - scc_mgr_load_dqs_io(); - scc_mgr_load_dqs_for_write_group(write_group); - - //USER center dq - - if (dq_margin > mid) { - for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) { - scc_mgr_set_dq_out1_delay(write_group, i, - READ_SCC_DQ_OUT1_DELAY(i) + dq_margin - mid); - scc_mgr_load_dq(p, i); - } - dqs_margin += dq_margin - mid; - dq_margin -= dq_margin - mid; - } - //USER do dm centering - - if (!RLDRAMX) { - dm_margin = IO_IO_OUT1_DELAY_MAX; - - if (QDRII) { - sticky_bit_chk = 0; - for (i = 0; i < RW_MGR_MEM_DATA_MASK_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH; - i++) { - max_working_dm[i] = 0; - } - } - - for (d = 1; d <= IO_IO_OUT1_DELAY_MAX; d++) { - scc_mgr_apply_group_dm_out1_delay(write_group, d); - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - if (DDRX) { - if (rw_mgr_mem_calibrate_write_test - (rank_bgn, write_group, 1, PASS_ALL_BITS, &bit_chk, 0)) { - max_working_dm[0] = d; - } else { - break; - } - } else { - stop = - !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, - PASS_ALL_BITS, &bit_chk, 0); - sticky_bit_chk = sticky_bit_chk | bit_chk; - stop = stop && (sticky_bit_chk == param->read_correct_mask); - - if (stop == 1) { - break; - } else { - for (i = 0; - i < - RW_MGR_MEM_DATA_MASK_WIDTH / - RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { - if ((bit_chk & param->dm_correct_mask) == - param->dm_correct_mask) { - max_working_dm[i] = d; - } - bit_chk = - bit_chk >> (RW_MGR_MEM_DATA_WIDTH / - RW_MGR_MEM_DATA_MASK_WIDTH); - } - } - } - } - - i = 0; - for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { - if (max_working_dm[i] > mid) { - scc_mgr_set_dm_out1_delay(write_group, i, max_working_dm[i] - mid); - } else { - scc_mgr_set_dm_out1_delay(write_group, i, 0); - } - - scc_mgr_load_dm(i); - - if (max_working_dm[i] < dm_margin) { - dm_margin = max_working_dm[i]; - } - } - } else { - dm_margin = 0; - } - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - return (dq_margin + dqs_margin) > 0; -} - -#endif - -//USER calibrate the write operations - -static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g, uint32_t test_bgn) -{ - - reg_file_set_stage(CAL_STAGE_WRITES); - reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); - - //USER starting phases - - //USER update info for sims - - reg_file_set_group(g); - - if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) { - set_failing_group_stage(g, CAL_STAGE_WRITES, CAL_SUBSTAGE_WRITES_CENTER); - return 0; - } - - return 1; -} - -//USER precharge all banks and activate row 0 in bank "000..." and bank "111..." -static void mem_precharge_and_activate(void) -{ - uint32_t r; - - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) { - if (param->skip_ranks[r]) { - //USER request to skip the rank - - continue; - } - //USER set rank - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); - - //USER precharge all banks ... - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_PRECHARGE_ALL); - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_0, 0, 0x0F); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_0, 0, __RW_MGR_ACTIVATE_0_AND_1_WAIT1); - - IOWR_32DIRECT(RW_MGR_LOAD_CNTR_1, 0, 0x0F); - IOWR_32DIRECT(RW_MGR_LOAD_JUMP_ADD_1, 0, __RW_MGR_ACTIVATE_0_AND_1_WAIT2); - - //USER activate rows - IOWR_32DIRECT(RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_ACTIVATE_0_AND_1); - } -} - -//USER perform all refreshes necessary over all ranks - -//USER Configure various memory related parameters. - -static void mem_config(void) -{ - uint32_t rlat, wlat; - uint32_t rw_wl_nop_cycles; - uint32_t max_latency; - - //USER read in write and read latency - - wlat = IORD_32DIRECT(MEM_T_WL_ADD, 0); - wlat += IORD_32DIRECT(DATA_MGR_MEM_T_ADD, 0); /* WL for hard phy does not include additive latency */ - - // YYONG: add addtional write latency to offset the address/command extra clock cycle - // YYONG: We change the AC mux setting causing AC to be delayed by one mem clock cycle - // YYONG: only do this for DDR3 - wlat = wlat + 1; - - rlat = IORD_32DIRECT(MEM_T_RL_ADD, 0); - - if (QUARTER_RATE_MODE) { - //USER In Quarter-Rate the WL-to-nop-cycles works like this - //USER 0,1 -> 0 - //USER 2,3,4,5 -> 1 - //USER 6,7,8,9 -> 2 - //USER etc... - rw_wl_nop_cycles = (wlat + 6) / 4 - 1; - } else if (HALF_RATE_MODE) { - //USER In Half-Rate the WL-to-nop-cycles works like this - //USER 0,1 -> -1 - //USER 2,3 -> 0 - //USER 4,5 -> 1 - //USER etc... - if (wlat % 2) { - rw_wl_nop_cycles = ((wlat - 1) / 2) - 1; - } else { - rw_wl_nop_cycles = (wlat / 2) - 1; - } - } else { - rw_wl_nop_cycles = wlat - 2; - } - gbl->rw_wl_nop_cycles = rw_wl_nop_cycles; - - //USER For AV/CV, lfifo is hardened and always runs at full rate - //USER so max latency in AFI clocks, used here, is correspondingly smaller - if (QUARTER_RATE_MODE) { - max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) / 4 - 1; - } else if (HALF_RATE_MODE) { - max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) / 2 - 1; - } else { - max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) / 1 - 1; - } - //USER configure for a burst length of 8 - - if (QUARTER_RATE_MODE) { - //USER write latency - wlat = (wlat + 5) / 4 + 1; - - //USER set a pretty high read latency initially - gbl->curr_read_lat = (rlat + 1) / 4 + 8; - } else if (HALF_RATE_MODE) { - //USER write latency - wlat = (wlat - 1) / 2 + 1; - - //USER set a pretty high read latency initially - gbl->curr_read_lat = (rlat + 1) / 2 + 8; - } else { - //USER write latency - // Adjust Write Latency for Hard PHY - wlat = wlat + 1; - - //USER set a pretty high read latency initially - gbl->curr_read_lat = rlat + 16; - } - - if (gbl->curr_read_lat > max_latency) { - gbl->curr_read_lat = max_latency; - } - IOWR_32DIRECT(PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat); - - //USER advertise write latency - gbl->curr_write_lat = wlat; - IOWR_32DIRECT(PHY_MGR_AFI_WLAT, 0, wlat - 2); - - //USER initialize bit slips - - mem_precharge_and_activate(); -} - -//USER Set VFIFO and LFIFO to instant-on settings in skip calibration mode - -static void mem_skip_calibrate(void) -{ - uint32_t vfifo_offset; - uint32_t i, j, r; - - // Need to update every shadow register set used by the interface - for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r += NUM_RANKS_PER_SHADOW_REG) { - - // Strictly speaking this should be called once per group to make - // sure each group's delay chains are refreshed from the SCC register file, - // but since we're resetting all delay chains anyway, we can save some - // runtime by calling select_shadow_regs_for_update just once to switch rank. - select_shadow_regs_for_update(r, 0, 1); - - //USER Set output phase alignment settings appropriate for skip calibration - for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { - - scc_mgr_set_dqs_en_phase(i, 0); - // Case:33398 - // - // Write data arrives to the I/O two cycles before write latency is reached (720 deg). - // -> due to bit-slip in a/c bus - // -> to allow board skew where dqs is longer than ck - // -> how often can this happen!? - // -> can claim back some ptaps for high freq support if we can relax this, but i digress... - // - // The write_clk leads mem_ck by 90 deg - // The minimum ptap of the OPA is 180 deg - // Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay - // The write_clk is always delayed by 2 ptaps - // - // Hence, to make DQS aligned to CK, we need to delay DQS by: - // (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH)) - // - // Dividing the above by (360 / IO_DLL_CHAIN_LENGTH) gives us the number of ptaps, which simplies to: - // - // (1.25 * IO_DLL_CHAIN_LENGTH - 2) - scc_mgr_set_dqdqs_output_phase(i, (1.25 * IO_DLL_CHAIN_LENGTH - 2)); - } - - IOWR_32DIRECT(SCC_MGR_DQS_ENA, 0, 0xff); - IOWR_32DIRECT(SCC_MGR_DQS_IO_ENA, 0, 0xff); - - for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) { - IOWR_32DIRECT(SCC_MGR_GROUP_COUNTER, 0, i); - IOWR_32DIRECT(SCC_MGR_DQ_ENA, 0, 0xff); - IOWR_32DIRECT(SCC_MGR_DM_ENA, 0, 0xff); - } - - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - } - - // Compensate for simulation model behaviour - for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { - scc_mgr_set_dqs_bus_in_delay(i, 10); - scc_mgr_load_dqs(i); - } - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - - //ArriaV has hard FIFOs that can only be initialized by incrementing in sequencer - vfifo_offset = CALIB_VFIFO_OFFSET; - for (j = 0; j < vfifo_offset; j++) { - if (HARD_PHY) { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_HARD_PHY, 0, 0xff); - } else { - IOWR_32DIRECT(PHY_MGR_CMD_INC_VFIFO_FR, 0, 0xff); - } - } - - IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); - - // For ACV with hard lfifo, we get the skip-cal setting from generation-time constant - gbl->curr_read_lat = CALIB_LFIFO_OFFSET; - IOWR_32DIRECT(PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat); -} - -//USER Memory calibration entry point - -static uint32_t mem_calibrate(void) -{ - uint32_t i; - uint32_t rank_bgn, sr; - uint32_t write_group, write_test_bgn; - uint32_t read_group, read_test_bgn; - uint32_t run_groups, current_run; - uint32_t failing_groups = 0; - uint32_t group_failed = 0; - uint32_t sr_failed = 0; - - // Initialize the data settings - DPRINT(1, "Preparing to init data"); - DPRINT(1, "Init complete"); - - gbl->error_substage = CAL_SUBSTAGE_NIL; - gbl->error_stage = CAL_STAGE_NIL; - gbl->error_group = 0xff; - gbl->fom_in = 0; - gbl->fom_out = 0; - - mem_config(); - - if (ARRIAV || CYCLONEV) { - uint32_t bypass_mode = (HARD_PHY) ? 0x1 : 0x0; - for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) { - IOWR_32DIRECT(SCC_MGR_GROUP_COUNTER, 0, i); - scc_set_bypass_mode(i, bypass_mode); - } - } - - if (((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { - //USER Set VFIFO and LFIFO to instant-on settings in skip calibration mode - - mem_skip_calibrate(); - } else { - for (i = 0; i < NUM_CALIB_REPEAT; i++) { - - //USER Zero all delay chain/phase settings for all groups and all shadow register sets - scc_mgr_zero_all(); - - run_groups = ~param->skip_groups; - - for (write_group = 0, write_test_bgn = 0; - write_group < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; - write_group++, write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) { - // Initialized the group failure - group_failed = 0; - - // Mark the group as being attempted for calibration - - BFM_GBL_SET(vfifo_idx, 0); - current_run = - run_groups & ((1 << RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1); - run_groups = run_groups >> RW_MGR_NUM_DQS_PER_WRITE_GROUP; - - if (current_run == 0) { - continue; - } - - IOWR_32DIRECT(SCC_MGR_GROUP_COUNTER, 0, write_group); - scc_mgr_zero_group(write_group, write_test_bgn, 0); - - for (read_group = - write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / - RW_MGR_MEM_IF_WRITE_DQS_WIDTH, read_test_bgn = 0; - read_group < - (write_group + - 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / - RW_MGR_MEM_IF_WRITE_DQS_WIDTH && group_failed == 0; - read_group++, read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) { - - //USER Calibrate the VFIFO - if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_VFIFO)) { - if (!rw_mgr_mem_calibrate_vfifo - (read_group, read_test_bgn)) { - group_failed = 1; - - if (! - (gbl-> - phy_debug_mode_flags & - PHY_DEBUG_SWEEP_ALL_GROUPS)) { - return 0; - } - } - } - } - - //USER level writes (or align DK with CK for RLDRAMX) - if (group_failed == 0) { - if ((DDRX || RLDRAMII) && !(ARRIAV || CYCLONEV)) { - if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_WLEVEL)) { - if (!rw_mgr_mem_calibrate_wlevel - (write_group, write_test_bgn)) { - group_failed = 1; - - if (! - (gbl-> - phy_debug_mode_flags & - PHY_DEBUG_SWEEP_ALL_GROUPS)) { - return 0; - } - } - } - } - } - //USER Calibrate the output side - if (group_failed == 0) { - for (rank_bgn = 0, sr = 0; - rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS; - rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) { - sr_failed = 0; - if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES)) { - if ((STATIC_CALIB_STEPS) & - CALIB_SKIP_DELAY_SWEEPS) { - //USER not needed in quick mode! - } else { - //USER Determine if this set of ranks should be skipped entirely - if (!param->skip_shadow_regs[sr]) { - - //USER Select shadow register set - select_shadow_regs_for_update - (rank_bgn, write_group, - 1); - - if (!rw_mgr_mem_calibrate_writes(rank_bgn, write_group, write_test_bgn)) { - sr_failed = 1; - if (! - (gbl-> - phy_debug_mode_flags - & - PHY_DEBUG_SWEEP_ALL_GROUPS)) - { - return 0; - } - } - } - } - } - if (sr_failed == 0) { - } else { - group_failed = 1; - } - } - } - - if (group_failed == 0) { - for (read_group = - write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH / - RW_MGR_MEM_IF_WRITE_DQS_WIDTH, read_test_bgn = 0; - read_group < - (write_group + - 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH / - RW_MGR_MEM_IF_WRITE_DQS_WIDTH && group_failed == 0; - read_group++, read_test_bgn += - RW_MGR_MEM_DQ_PER_READ_DQS) { - - if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES)) { - if (!rw_mgr_mem_calibrate_vfifo_end - (read_group, read_test_bgn)) { - group_failed = 1; - - if (! - (gbl-> - phy_debug_mode_flags & - PHY_DEBUG_SWEEP_ALL_GROUPS)) { - return 0; - } - } - } - } - } - - if (group_failed == 0) { - -#if STATIC_IN_RTL_SIM -#else -#endif - } - - if (group_failed != 0) { - failing_groups++; - } - - } - - // USER If there are any failing groups then report the failure - if (failing_groups != 0) { - return 0; - } - //USER Calibrate the LFIFO - if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) { - //USER If we're skipping groups as part of debug, don't calibrate LFIFO - if (param->skip_groups == 0) { - if (!rw_mgr_mem_calibrate_lfifo()) { - return 0; - } - } - } - } - } - - //USER Do not remove this line as it makes sure all of our decisions have been applied - IOWR_32DIRECT(SCC_MGR_UPD, 0, 0); - return 1; -} - -static uint32_t run_mem_calibrate(void) -{ - - uint32_t pass; - uint32_t debug_info; - uint32_t ctrlcfg = IORD_32DIRECT(CTRL_CONFIG_REG, 0); - - // Initialize the debug status to show that calibration has started. - // This should occur before anything else - // Reset pass/fail status shown on afi_cal_success/fail - IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_RESET); - //stop tracking manger - - IOWR_32DIRECT(CTRL_CONFIG_REG, 0, ctrlcfg & 0xFFBFFFFF); - - initialize(); - - rw_mgr_mem_initialize(); - - pass = mem_calibrate(); - - mem_precharge_and_activate(); - - //pe_checkout_pattern(); - - IOWR_32DIRECT(PHY_MGR_CMD_FIFO_RESET, 0, 0); - - if (pass) { -#ifdef TEST_SIZE - if (!check_test_mem(0)) { - gbl->error_stage = 0x92; - gbl->error_group = 0x92; - } -#endif - } - - //USER Handoff - - //USER Don't return control of the PHY back to AFI when in debug mode - if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) { - rw_mgr_mem_handoff(); - - // In Hard PHY this is a 2-bit control: - // 0: AFI Mux Select - // 1: DDIO Mux Select - IOWR_32DIRECT(PHY_MGR_MUX_SEL, 0, 0x2); - } - IOWR_32DIRECT(CTRL_CONFIG_REG, 0, ctrlcfg); - - if (pass) { - IPRINT("CALIBRATION PASSED"); - - gbl->fom_in /= 2; - gbl->fom_out /= 2; - - if (gbl->fom_in > 0xff) { - gbl->fom_in = 0xff; - } - - if (gbl->fom_out > 0xff) { - gbl->fom_out = 0xff; - } - - // Update the FOM in the register file - debug_info = gbl->fom_in; - debug_info |= gbl->fom_out << 8; - IOWR_32DIRECT(REG_FILE_FOM, 0, debug_info); - - IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, debug_info); - IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_SUCCESS); - - } else { - - IPRINT("CALIBRATION FAILED"); - - debug_info = gbl->error_stage; - debug_info |= gbl->error_substage << 8; - debug_info |= gbl->error_group << 16; - - IOWR_32DIRECT(REG_FILE_FAILING_STAGE, 0, debug_info); - IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, debug_info); - IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_FAIL); - - // Update the failing group/stage in the register file - debug_info = gbl->error_stage; - debug_info |= gbl->error_substage << 8; - debug_info |= gbl->error_group << 16; - IOWR_32DIRECT(REG_FILE_FAILING_STAGE, 0, debug_info); - - } - - // Set the debug status to show that calibration has ended. - // This should occur after everything else - return pass; - -} - -static void hc_initialize_rom_data(void) -{ - uint32_t i; - - for (i = 0; i < inst_rom_init_size; i++) { - uint32_t data = inst_rom_init[i]; - IOWR_32DIRECT(RW_MGR_INST_ROM_WRITE, (i << 2), data); - } - - for (i = 0; i < ac_rom_init_size; i++) { - uint32_t data = ac_rom_init[i]; - IOWR_32DIRECT(RW_MGR_AC_ROM_WRITE, (i << 2), data); - } -} - -static void initialize_reg_file(void) -{ - // Initialize the register file with the correct data - IOWR_32DIRECT(REG_FILE_SIGNATURE, 0, REG_FILE_INIT_SEQ_SIGNATURE); - IOWR_32DIRECT(REG_FILE_DEBUG_DATA_ADDR, 0, 0); - IOWR_32DIRECT(REG_FILE_CUR_STAGE, 0, 0); - IOWR_32DIRECT(REG_FILE_FOM, 0, 0); - IOWR_32DIRECT(REG_FILE_FAILING_STAGE, 0, 0); - IOWR_32DIRECT(REG_FILE_DEBUG1, 0, 0); - IOWR_32DIRECT(REG_FILE_DEBUG2, 0, 0); -} - -static void initialize_hps_phy(void) -{ - // These may need to be included also: - // wrap_back_en (false) - // atpg_en (false) - // pipelineglobalenable (true) - - uint32_t reg; - // Tracking also gets configured here because it's in the same register - uint32_t trk_sample_count = 7500; - uint32_t trk_long_idle_sample_count = (10 << 16) | 100; // Format is number of outer loops in the 16 MSB, sample count in 16 LSB. - - reg = 0; - reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2); - reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1); - reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1); - reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1); - reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0); - reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1); - // Fix for long latency VFIFO - // This field selects the intrinsic latency to RDATA_EN/FULL path. 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles. - reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0); - reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(trk_sample_count); - IOWR_32DIRECT(BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_OFFSET, reg); - - reg = 0; - reg |= - SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(trk_sample_count >> - SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH); - reg |= - SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(trk_long_idle_sample_count); - IOWR_32DIRECT(BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_OFFSET, reg); - - reg = 0; - reg |= - SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(trk_long_idle_sample_count - >> - SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH); - IOWR_32DIRECT(BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_OFFSET, reg); -} - -static void initialize_tracking(void) -{ - uint32_t concatenated_longidle = 0x0; - uint32_t concatenated_delays = 0x0; - uint32_t concatenated_rw_addr = 0x0; - uint32_t concatenated_refresh = 0x0; - uint32_t dtaps_per_ptap; - uint32_t tmp_delay; - - // compute usable version of value in case we skip full computation later - dtaps_per_ptap = 0; - tmp_delay = 0; - while (tmp_delay < IO_DELAY_PER_OPA_TAP) { - dtaps_per_ptap++; - tmp_delay += IO_DELAY_PER_DCHAIN_TAP; - } - dtaps_per_ptap--; - - concatenated_longidle = concatenated_longidle ^ 10; //longidle outer loop - concatenated_longidle = concatenated_longidle << 16; - concatenated_longidle = concatenated_longidle ^ 100; //longidle sample count - - concatenated_delays = concatenated_delays ^ 243; // trfc, worst case of 933Mhz 4Gb - concatenated_delays = concatenated_delays << 8; - concatenated_delays = concatenated_delays ^ 14; // trcd, worst case - concatenated_delays = concatenated_delays << 8; - concatenated_delays = concatenated_delays ^ 10; // vfifo wait - concatenated_delays = concatenated_delays << 8; - concatenated_delays = concatenated_delays ^ 4; // mux delay - - concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_IDLE; - concatenated_rw_addr = concatenated_rw_addr << 8; - concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_ACTIVATE_1; - concatenated_rw_addr = concatenated_rw_addr << 8; - concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_SGLE_READ; - concatenated_rw_addr = concatenated_rw_addr << 8; - concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_PRECHARGE_ALL; - - concatenated_refresh = concatenated_refresh ^ __RW_MGR_REFRESH_ALL; - concatenated_refresh = concatenated_refresh << 24; - concatenated_refresh = concatenated_refresh ^ 1000; // trefi - - // Initialize the register file with the correct data - IOWR_32DIRECT(REG_FILE_DTAPS_PER_PTAP, 0, dtaps_per_ptap); - IOWR_32DIRECT(REG_FILE_TRK_SAMPLE_COUNT, 0, 7500); - IOWR_32DIRECT(REG_FILE_TRK_LONGIDLE, 0, concatenated_longidle); - IOWR_32DIRECT(REG_FILE_DELAYS, 0, concatenated_delays); - IOWR_32DIRECT(REG_FILE_TRK_RW_MGR_ADDR, 0, concatenated_rw_addr); - IOWR_32DIRECT(REG_FILE_TRK_READ_DQS_WIDTH, 0, RW_MGR_MEM_IF_READ_DQS_WIDTH); - IOWR_32DIRECT(REG_FILE_TRK_RFSH, 0, concatenated_refresh); -} - -static int socfpga_mem_calibration(void) -{ - param_t my_param; - gbl_t my_gbl; - uint32_t pass; - uint32_t i; - - param = &my_param; - gbl = &my_gbl; - - // Initialize the debug mode flags - gbl->phy_debug_mode_flags = 0; - // Set the calibration enabled by default - gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; - // Only enable margining by default if requested - // Only sweep all groups (regardless of fail state) by default if requested - //Set enabled read test by default - - // Initialize the register file - initialize_reg_file(); - - // Initialize any PHY CSR - initialize_hps_phy(); - - scc_mgr_initialize(); - - initialize_tracking(); - - // Initialize the TCL report. This must occur before any printf - // but after the debug mode flags and register file - - // USER Enable all ranks, groups - for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++) { - param->skip_ranks[i] = 0; - } - for (i = 0; i < NUM_SHADOW_REGS; ++i) { - param->skip_shadow_regs[i] = 0; - } - param->skip_groups = 0; - - IPRINT("Preparing to start memory calibration"); - - DPRINT(1, - "%s%s %s ranks=%lu cs/dimm=%lu dq/dqs=%lu,%lu vg/dqs=%lu,%lu dqs=%lu,%lu dq=%lu dm=%lu " - "ptap_delay=%lu dtap_delay=%lu dtap_dqsen_delay=%lu, dll=%lu", - RDIMM ? "r" : (LRDIMM ? "l" : ""), - DDR2 ? "DDR2" : (DDR3 ? "DDR3" - : (QDRII ? "QDRII" - : (RLDRAMII ? "RLDRAMII" - : (RLDRAM3 ? "RLDRAM3" : "??PROTO??")))), - FULL_RATE ? "FR" : (HALF_RATE ? "HR" : (QUARTER_RATE ? "QR" : "??RATE??")), - (long unsigned int)RW_MGR_MEM_NUMBER_OF_RANKS, - (long unsigned int)RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM, - (long unsigned int)RW_MGR_MEM_DQ_PER_READ_DQS, - (long unsigned int)RW_MGR_MEM_DQ_PER_WRITE_DQS, - (long unsigned int)RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS, - (long unsigned int)RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS, - (long unsigned int)RW_MGR_MEM_IF_READ_DQS_WIDTH, - (long unsigned int)RW_MGR_MEM_IF_WRITE_DQS_WIDTH, - (long unsigned int)RW_MGR_MEM_DATA_WIDTH, - (long unsigned int)RW_MGR_MEM_DATA_MASK_WIDTH, - (long unsigned int)IO_DELAY_PER_OPA_TAP, (long unsigned int)IO_DELAY_PER_DCHAIN_TAP, - (long unsigned int)IO_DELAY_PER_DQS_EN_DCHAIN_TAP, - (long unsigned int)IO_DLL_CHAIN_LENGTH); - DPRINT(1, - "max values: en_p=%lu dqdqs_p=%lu en_d=%lu dqs_in_d=%lu io_in_d=%lu io_out1_d=%lu io_out2_d=%lu" - "dqs_in_reserve=%lu dqs_out_reserve=%lu", (long unsigned int)IO_DQS_EN_PHASE_MAX, - (long unsigned int)IO_DQDQS_OUT_PHASE_MAX, (long unsigned int)IO_DQS_EN_DELAY_MAX, - (long unsigned int)IO_DQS_IN_DELAY_MAX, (long unsigned int)IO_IO_IN_DELAY_MAX, - (long unsigned int)IO_IO_OUT1_DELAY_MAX, (long unsigned int)IO_IO_OUT2_DELAY_MAX, - (long unsigned int)IO_DQS_IN_RESERVE, (long unsigned int)IO_DQS_OUT_RESERVE); - - hc_initialize_rom_data(); - - //USER update info for sims - reg_file_set_stage(CAL_STAGE_NIL); - reg_file_set_group(0); - - // Load global needed for those actions that require - // some dynamic calibration support - dyn_calib_steps = STATIC_CALIB_STEPS; - - // Load global to allow dynamic selection of delay loop settings - // based on calibration mode - if (!((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_DELAY_LOOPS)) { - skip_delay_mask = 0xff; - } else { - skip_delay_mask = 0x0; - } - -#ifdef TEST_SIZE - if (!check_test_mem(1)) { - IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, 0x9090); - IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_FAIL); - } - write_test_mem(); - if (!check_test_mem(0)) { - IOWR_32DIRECT(PHY_MGR_CAL_DEBUG_INFO, 0, 0x9191); - IOWR_32DIRECT(PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_FAIL); - } -#endif - - pass = run_mem_calibrate(); - - // EMPTY - - return pass; -} diff --git a/arch/arm/mach-socfpga/include/mach/sequencer.h b/arch/arm/mach-socfpga/include/mach/sequencer.h deleted file mode 100644 index dd0378af34..0000000000 --- a/arch/arm/mach-socfpga/include/mach/sequencer.h +++ /dev/null @@ -1,447 +0,0 @@ -#ifndef _SEQUENCER_H_ -#define _SEQUENCER_H_ - -/* -* Copyright Altera Corporation (C) 2012-2014. All rights reserved -* -* SPDX-License-Identifier: BSD-3-Clause -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in the -* documentation and/or other materials provided with the distribution. -* * Neither the name of Altera Corporation nor the -* names of its contributors may be used to endorse or promote products -* derived from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -* DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY -* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#define ALTERA_ASSERT(condition) -#define ALTERA_INFO_ASSERT(condition,text) - -#define RW_MGR_NUM_DM_PER_WRITE_GROUP (RW_MGR_MEM_DATA_MASK_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH) -#define RW_MGR_NUM_TRUE_DM_PER_WRITE_GROUP (RW_MGR_TRUE_MEM_DATA_MASK_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH) - -#define RW_MGR_NUM_DQS_PER_WRITE_GROUP (RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH) -#define NUM_RANKS_PER_SHADOW_REG (RW_MGR_MEM_NUMBER_OF_RANKS / NUM_SHADOW_REGS) - -#define RW_MGR_RUN_SINGLE_GROUP BASE_RW_MGR -#define RW_MGR_RUN_ALL_GROUPS BASE_RW_MGR + 0x0400 - -#define RW_MGR_DI_BASE (BASE_RW_MGR + 0x0020) - -#define DDR3_MR1_ODT_MASK 0xFFFFFD99 -#define DDR3_MR2_ODT_MASK 0xFFFFF9FF -#define DDR3_AC_MIRR_MASK 0x020A8 - -#define RW_MGR_LOAD_CNTR_0 BASE_RW_MGR + 0x0800 -#define RW_MGR_LOAD_CNTR_1 BASE_RW_MGR + 0x0804 -#define RW_MGR_LOAD_CNTR_2 BASE_RW_MGR + 0x0808 -#define RW_MGR_LOAD_CNTR_3 BASE_RW_MGR + 0x080C - -#define RW_MGR_LOAD_JUMP_ADD_0 BASE_RW_MGR + 0x0C00 -#define RW_MGR_LOAD_JUMP_ADD_1 BASE_RW_MGR + 0x0C04 -#define RW_MGR_LOAD_JUMP_ADD_2 BASE_RW_MGR + 0x0C08 -#define RW_MGR_LOAD_JUMP_ADD_3 BASE_RW_MGR + 0x0C0C - -#define RW_MGR_RESET_READ_DATAPATH BASE_RW_MGR + 0x1000 -#define RW_MGR_SOFT_RESET BASE_RW_MGR + 0x2000 - -#define RW_MGR_SET_CS_AND_ODT_MASK BASE_RW_MGR + 0x1400 -#define RW_MGR_SET_ACTIVE_RANK BASE_RW_MGR + 0x2400 - -#define RW_MGR_LOOPBACK_MODE BASE_RW_MGR + 0x0200 - -#define RW_MGR_ENABLE_REFRESH BASE_RW_MGR + 0x3000 - -#define RW_MGR_RANK_NONE 0xFF -#define RW_MGR_RANK_ALL 0x00 - -#define RW_MGR_ODT_MODE_OFF 0 -#define RW_MGR_ODT_MODE_READ_WRITE 1 - -#define NUM_CALIB_REPEAT 1 - -#define NUM_READ_TESTS 7 -#define NUM_READ_PB_TESTS 7 -#define NUM_WRITE_TESTS 15 -#define NUM_WRITE_PB_TESTS 31 - -#define PASS_ALL_BITS 1 -#define PASS_ONE_BIT 0 - -/* calibration stages */ - -#define CAL_STAGE_NIL 0 -#define CAL_STAGE_VFIFO 1 -#define CAL_STAGE_WLEVEL 2 -#define CAL_STAGE_LFIFO 3 -#define CAL_STAGE_WRITES 4 -#define CAL_STAGE_FULLTEST 5 -#define CAL_STAGE_REFRESH 6 -#define CAL_STAGE_CAL_SKIPPED 7 -#define CAL_STAGE_CAL_ABORTED 8 -#define CAL_STAGE_VFIFO_AFTER_WRITES 9 - -/* calibration substages */ - -#define CAL_SUBSTAGE_NIL 0 -#define CAL_SUBSTAGE_GUARANTEED_READ 1 -#define CAL_SUBSTAGE_DQS_EN_PHASE 2 -#define CAL_SUBSTAGE_VFIFO_CENTER 3 -#define CAL_SUBSTAGE_WORKING_DELAY 1 -#define CAL_SUBSTAGE_LAST_WORKING_DELAY 2 -#define CAL_SUBSTAGE_WLEVEL_COPY 3 -#define CAL_SUBSTAGE_WRITES_CENTER 1 -#define CAL_SUBSTAGE_READ_LATENCY 1 -#define CAL_SUBSTAGE_REFRESH 1 - -#define MAX_RANKS (RW_MGR_MEM_NUMBER_OF_RANKS) -#define MAX_DQS (RW_MGR_MEM_IF_WRITE_DQS_WIDTH > RW_MGR_MEM_IF_READ_DQS_WIDTH ? RW_MGR_MEM_IF_WRITE_DQS_WIDTH : RW_MGR_MEM_IF_READ_DQS_WIDTH) -#define MAX_DQ (RW_MGR_MEM_DATA_WIDTH) -#define MAX_DM (RW_MGR_MEM_DATA_MASK_WIDTH) - -/* length of VFIFO, from SW_MACROS */ -#define VFIFO_SIZE (READ_VALID_FIFO_SIZE) - -/* Memory for data transfer between TCL scripts and NIOS. - * - * - First word is a command request. - * - The remaining words are part of the transfer. - */ - -/* Define the base address of each manager. */ - -/* MarkW: how should these base addresses be done for A-V? */ -#define BASE_PTR_MGR SEQUENCER_PTR_MGR_INST_BASE -#define BASE_PHY_MGR (0x00088000) -#define BASE_RW_MGR (0x00090000) -#define BASE_DATA_MGR (0x00098000) -#define BASE_SCC_MGR SEQUENCER_SCC_MGR_INST_BASE -#define BASE_REG_FILE SEQUENCER_REG_FILE_INST_BASE -#define BASE_TIMER SEQUENCER_TIMER_INST_BASE -#define BASE_MMR (0x000C0000) -#define BASE_TRK_MGR (0x000D0000) - -/* Register file addresses. */ -#define REG_FILE_SIGNATURE (BASE_REG_FILE + 0x0000) -#define REG_FILE_DEBUG_DATA_ADDR (BASE_REG_FILE + 0x0004) -#define REG_FILE_CUR_STAGE (BASE_REG_FILE + 0x0008) -#define REG_FILE_FOM (BASE_REG_FILE + 0x000C) -#define REG_FILE_FAILING_STAGE (BASE_REG_FILE + 0x0010) -#define REG_FILE_DEBUG1 (BASE_REG_FILE + 0x0014) -#define REG_FILE_DEBUG2 (BASE_REG_FILE + 0x0018) - -#define REG_FILE_DTAPS_PER_PTAP (BASE_REG_FILE + 0x001C) -#define REG_FILE_TRK_SAMPLE_COUNT (BASE_REG_FILE + 0x0020) -#define REG_FILE_TRK_LONGIDLE (BASE_REG_FILE + 0x0024) -#define REG_FILE_DELAYS (BASE_REG_FILE + 0x0028) -#define REG_FILE_TRK_RW_MGR_ADDR (BASE_REG_FILE + 0x002C) -#define REG_FILE_TRK_READ_DQS_WIDTH (BASE_REG_FILE + 0x0030) -#define REG_FILE_TRK_RFSH (BASE_REG_FILE + 0x0034) -#define CTRL_CONFIG_REG (BASE_MMR + 0x0000) - -/* PHY manager configuration registers. */ - -#define PHY_MGR_PHY_RLAT (BASE_PHY_MGR + 0x4000) -#define PHY_MGR_RESET_MEM_STBL (BASE_PHY_MGR + 0x4004) -#define PHY_MGR_MUX_SEL (BASE_PHY_MGR + 0x4008) -#define PHY_MGR_CAL_STATUS (BASE_PHY_MGR + 0x400c) -#define PHY_MGR_CAL_DEBUG_INFO (BASE_PHY_MGR + 0x4010) -#define PHY_MGR_VFIFO_RD_EN_OVRD (BASE_PHY_MGR + 0x4014) -#define PHY_MGR_AFI_WLAT (BASE_PHY_MGR + 0x4018) -#define PHY_MGR_AFI_RLAT (BASE_PHY_MGR + 0x401c) - -#define PHY_MGR_CAL_RESET (0) -#define PHY_MGR_CAL_SUCCESS (1) -#define PHY_MGR_CAL_FAIL (2) - -/* PHY manager command addresses. */ - -#define PHY_MGR_CMD_INC_VFIFO_FR (BASE_PHY_MGR + 0x0000) -#define PHY_MGR_CMD_INC_VFIFO_HR (BASE_PHY_MGR + 0x0004) -#define PHY_MGR_CMD_INC_VFIFO_HARD_PHY (BASE_PHY_MGR + 0x0004) -#define PHY_MGR_CMD_FIFO_RESET (BASE_PHY_MGR + 0x0008) -#define PHY_MGR_CMD_INC_VFIFO_FR_HR (BASE_PHY_MGR + 0x000C) -#define PHY_MGR_CMD_INC_VFIFO_QR (BASE_PHY_MGR + 0x0010) - -/* PHY manager parameters. */ - -#define PHY_MGR_MAX_RLAT_WIDTH (BASE_PHY_MGR + 0x0000) -#define PHY_MGR_MAX_AFI_WLAT_WIDTH (BASE_PHY_MGR + 0x0004) -#define PHY_MGR_MAX_AFI_RLAT_WIDTH (BASE_PHY_MGR + 0x0008) -#define PHY_MGR_CALIB_SKIP_STEPS (BASE_PHY_MGR + 0x000c) -#define PHY_MGR_CALIB_VFIFO_OFFSET (BASE_PHY_MGR + 0x0010) -#define PHY_MGR_CALIB_LFIFO_OFFSET (BASE_PHY_MGR + 0x0014) -#define PHY_MGR_RDIMM (BASE_PHY_MGR + 0x0018) -#define PHY_MGR_MEM_T_WL (BASE_PHY_MGR + 0x001c) -#define PHY_MGR_MEM_T_RL (BASE_PHY_MGR + 0x0020) - -/* Data Manager */ -#define DATA_MGR_DRAM_CFG (BASE_DATA_MGR + 0x0000) -#define DATA_MGR_MEM_T_WL (BASE_DATA_MGR + 0x0004) -#define DATA_MGR_MEM_T_ADD (BASE_DATA_MGR + 0x0008) -#define DATA_MGR_MEM_T_RL (BASE_DATA_MGR + 0x000C) -#define DATA_MGR_MEM_T_RFC (BASE_DATA_MGR + 0x0010) -#define DATA_MGR_MEM_T_REFI (BASE_DATA_MGR + 0x0014) -#define DATA_MGR_MEM_T_WR (BASE_DATA_MGR + 0x0018) -#define DATA_MGR_MEM_T_MRD (BASE_DATA_MGR + 0x001C) -#define DATA_MGR_COL_WIDTH (BASE_DATA_MGR + 0x0020) -#define DATA_MGR_ROW_WIDTH (BASE_DATA_MGR + 0x0024) -#define DATA_MGR_BANK_WIDTH (BASE_DATA_MGR + 0x0028) -#define DATA_MGR_CS_WIDTH (BASE_DATA_MGR + 0x002C) -#define DATA_MGR_ITF_WIDTH (BASE_DATA_MGR + 0x0030) -#define DATA_MGR_DVC_WIDTH (BASE_DATA_MGR + 0x0034) - -#define MEM_T_WL_ADD DATA_MGR_MEM_T_WL -#define MEM_T_RL_ADD DATA_MGR_MEM_T_RL - -#define CALIB_SKIP_DELAY_LOOPS (1 << 0) -#define CALIB_SKIP_ALL_BITS_CHK (1 << 1) -#define CALIB_SKIP_DELAY_SWEEPS (1 << 2) -#define CALIB_SKIP_VFIFO (1 << 3) -#define CALIB_SKIP_LFIFO (1 << 4) -#define CALIB_SKIP_WLEVEL (1 << 5) -#define CALIB_SKIP_WRITES (1 << 6) -#define CALIB_SKIP_FULL_TEST (1 << 7) -#define CALIB_SKIP_ALL (CALIB_SKIP_VFIFO | CALIB_SKIP_LFIFO | CALIB_SKIP_WLEVEL | CALIB_SKIP_WRITES | CALIB_SKIP_FULL_TEST) -#define CALIB_IN_RTL_SIM (1 << 8) - -/* Scan chain manager command addresses */ - -#define WRITE_SCC_DQS_IN_DELAY(group, delay) IOWR_32DIRECT(SCC_MGR_DQS_IN_DELAY, (group) << 2, delay) -#define WRITE_SCC_DQS_EN_DELAY(group, delay) IOWR_32DIRECT(SCC_MGR_DQS_EN_DELAY, (group) << 2, (delay) + IO_DQS_EN_DELAY_OFFSET) -#define WRITE_SCC_DQS_EN_PHASE(group, phase) IOWR_32DIRECT(SCC_MGR_DQS_EN_PHASE, (group) << 2, phase) -#define WRITE_SCC_DQDQS_OUT_PHASE(group, phase) IOWR_32DIRECT(SCC_MGR_DQDQS_OUT_PHASE, (group) << 2, phase) -#define WRITE_SCC_OCT_OUT1_DELAY(group, delay) IOWR_32DIRECT(SCC_MGR_OCT_OUT1_DELAY, (group) << 2, delay) -#define WRITE_SCC_OCT_OUT2_DELAY(group, delay) -#define WRITE_SCC_DQS_BYPASS(group, bypass) - -#define WRITE_SCC_DQ_OUT1_DELAY(pin, delay) IOWR_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (pin) << 2, delay) - -#define WRITE_SCC_DQ_OUT2_DELAY(pin, delay) - -#define WRITE_SCC_DQ_IN_DELAY(pin, delay) IOWR_32DIRECT(SCC_MGR_IO_IN_DELAY, (pin) << 2, delay) - -#define WRITE_SCC_DQ_BYPASS(pin, bypass) - -#define WRITE_SCC_RFIFO_MODE(pin, mode) - -#define WRITE_SCC_HHP_EXTRAS(value) IOWR_32DIRECT(SCC_MGR_HHP_GLOBALS, SCC_MGR_HHP_EXTRAS_OFFSET, value) -#define WRITE_SCC_HHP_DQSE_MAP(value) IOWR_32DIRECT(SCC_MGR_HHP_GLOBALS, SCC_MGR_HHP_DQSE_MAP_OFFSET, value) - -#define WRITE_SCC_DQS_IO_OUT1_DELAY(delay) IOWR_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS) << 2, delay) - -#define WRITE_SCC_DQS_IO_OUT2_DELAY(delay) - -#define WRITE_SCC_DQS_IO_IN_DELAY(delay) IOWR_32DIRECT(SCC_MGR_IO_IN_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS) << 2, delay) - -#define WRITE_SCC_DM_IO_OUT1_DELAY(pin, delay) IOWR_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + pin) << 2, delay) - -#define WRITE_SCC_DM_IO_OUT2_DELAY(pin, delay) - -#define WRITE_SCC_DM_IO_IN_DELAY(pin, delay) IOWR_32DIRECT(SCC_MGR_IO_IN_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + pin) << 2, delay) - -#define WRITE_SCC_DM_BYPASS(pin, bypass) - -#define READ_SCC_DQS_IN_DELAY(group) IORD_32DIRECT(SCC_MGR_DQS_IN_DELAY, (group) << 2) -#define READ_SCC_DQS_EN_DELAY(group) (IORD_32DIRECT(SCC_MGR_DQS_EN_DELAY, (group) << 2) - IO_DQS_EN_DELAY_OFFSET) -#define READ_SCC_DQS_EN_PHASE(group) IORD_32DIRECT(SCC_MGR_DQS_EN_PHASE, (group) << 2) -#define READ_SCC_DQDQS_OUT_PHASE(group) IORD_32DIRECT(SCC_MGR_DQDQS_OUT_PHASE, (group) << 2) -#define READ_SCC_OCT_OUT1_DELAY(group) IORD_32DIRECT(SCC_MGR_OCT_OUT1_DELAY, (group * RW_MGR_MEM_IF_READ_DQS_WIDTH / RW_MGR_MEM_IF_WRITE_DQS_WIDTH) << 2) -#define READ_SCC_OCT_OUT2_DELAY(group) 0 -#define READ_SCC_DQS_BYPASS(group) 0 -#define READ_SCC_DQS_BYPASS(group) 0 - -#define READ_SCC_DQ_OUT1_DELAY(pin) IORD_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (pin) << 2) -#define READ_SCC_DQ_OUT2_DELAY(pin) 0 -#define READ_SCC_DQ_IN_DELAY(pin) IORD_32DIRECT(SCC_MGR_IO_IN_DELAY, (pin) << 2) -#define READ_SCC_DQ_BYPASS(pin) 0 -#define READ_SCC_RFIFO_MODE(pin) 0 - -#define READ_SCC_DQS_IO_OUT1_DELAY() IORD_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS) << 2) -#define READ_SCC_DQS_IO_OUT2_DELAY() 0 -#define READ_SCC_DQS_IO_IN_DELAY() IORD_32DIRECT(SCC_MGR_IO_IN_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS) << 2) - -#define READ_SCC_DM_IO_OUT1_DELAY(pin) IORD_32DIRECT(SCC_MGR_IO_OUT1_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + pin) << 2) -#define READ_SCC_DM_IO_OUT2_DELAY(pin) 0 -#define READ_SCC_DM_IO_IN_DELAY(pin) IORD_32DIRECT(SCC_MGR_IO_IN_DELAY, (RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + pin) << 2) -#define READ_SCC_DM_BYPASS(pin) 0 - -#define SCC_MGR_GROUP_COUNTER (BASE_SCC_MGR + 0x0000) -#define SCC_MGR_DQS_IN_DELAY (BASE_SCC_MGR + 0x0100) -#define SCC_MGR_DQS_EN_PHASE (BASE_SCC_MGR + 0x0200) -#define SCC_MGR_DQS_EN_DELAY (BASE_SCC_MGR + 0x0300) -#define SCC_MGR_DQDQS_OUT_PHASE (BASE_SCC_MGR + 0x0400) -#define SCC_MGR_OCT_OUT1_DELAY (BASE_SCC_MGR + 0x0500) -#define SCC_MGR_IO_OUT1_DELAY (BASE_SCC_MGR + 0x0700) -#define SCC_MGR_IO_IN_DELAY (BASE_SCC_MGR + 0x0900) - -/* HHP-HPS-specific versions of some commands */ -#define SCC_MGR_DQS_EN_DELAY_GATE (BASE_SCC_MGR + 0x0600) -#define SCC_MGR_IO_OE_DELAY (BASE_SCC_MGR + 0x0800) -#define SCC_MGR_HHP_GLOBALS (BASE_SCC_MGR + 0x0A00) -#define SCC_MGR_HHP_RFILE (BASE_SCC_MGR + 0x0B00) - -/* HHP-HPS-specific values */ -#define SCC_MGR_HHP_EXTRAS_OFFSET 0 -#define SCC_MGR_HHP_DQSE_MAP_OFFSET 1 - -#define SCC_MGR_DQS_ENA (BASE_SCC_MGR + 0x0E00) -#define SCC_MGR_DQS_IO_ENA (BASE_SCC_MGR + 0x0E04) -#define SCC_MGR_DQ_ENA (BASE_SCC_MGR + 0x0E08) -#define SCC_MGR_DM_ENA (BASE_SCC_MGR + 0x0E0C) -#define SCC_MGR_UPD (BASE_SCC_MGR + 0x0E20) -#define SCC_MGR_ACTIVE_RANK (BASE_SCC_MGR + 0x0E40) -#define SCC_MGR_AFI_CAL_INIT (BASE_SCC_MGR + 0x0D00) - -// PHY Debug mode flag constants -#define PHY_DEBUG_IN_DEBUG_MODE 0x00000001 -#define PHY_DEBUG_ENABLE_CAL_RPT 0x00000002 -#define PHY_DEBUG_ENABLE_MARGIN_RPT 0x00000004 -#define PHY_DEBUG_SWEEP_ALL_GROUPS 0x00000008 -#define PHY_DEBUG_DISABLE_GUARANTEED_READ 0x00000010 -#define PHY_DEBUG_ENABLE_NON_DESTRUCTIVE_CALIBRATION 0x00000020 - -// Init and Reset delay constants - Only use if defined by sequencer_defines.h, -// otherwise, revert to defaults -// Default for Tinit = (0+1) * ((202+1) * (2 * 131 + 1) + 1) = 53532 = 200.75us @ 266MHz -#ifdef TINIT_CNTR0_VAL -#define SEQ_TINIT_CNTR0_VAL TINIT_CNTR0_VAL -#else -#define SEQ_TINIT_CNTR0_VAL 0 -#endif - -#ifdef TINIT_CNTR1_VAL -#define SEQ_TINIT_CNTR1_VAL TINIT_CNTR1_VAL -#else -#define SEQ_TINIT_CNTR1_VAL 202 -#endif - -#ifdef TINIT_CNTR2_VAL -#define SEQ_TINIT_CNTR2_VAL TINIT_CNTR2_VAL -#else -#define SEQ_TINIT_CNTR2_VAL 131 -#endif - -// Default for Treset = (2+1) * ((252+1) * (2 * 131 + 1) + 1) = 133563 = 500.86us @ 266MHz -#ifdef TRESET_CNTR0_VAL -#define SEQ_TRESET_CNTR0_VAL TRESET_CNTR0_VAL -#else -#define SEQ_TRESET_CNTR0_VAL 2 -#endif - -#ifdef TRESET_CNTR1_VAL -#define SEQ_TRESET_CNTR1_VAL TRESET_CNTR1_VAL -#else -#define SEQ_TRESET_CNTR1_VAL 252 -#endif - -#ifdef TRESET_CNTR2_VAL -#define SEQ_TRESET_CNTR2_VAL TRESET_CNTR2_VAL -#else -#define SEQ_TRESET_CNTR2_VAL 131 -#endif - -/* Bitfield type changes depending on protocol */ -typedef uint32_t t_btfld; - -#define RW_MGR_INST_ROM_WRITE BASE_RW_MGR + 0x1800 -#define RW_MGR_AC_ROM_WRITE BASE_RW_MGR + 0x1C00 - -static const uint32_t inst_rom_init_size; -static const uint32_t inst_rom_init[]; -static const uint32_t ac_rom_init_size; -static const uint32_t ac_rom_init[]; - -/* parameter variable holder */ - -typedef struct param_type { - t_btfld dm_correct_mask; - t_btfld read_correct_mask; - t_btfld read_correct_mask_vg; - t_btfld write_correct_mask; - t_btfld write_correct_mask_vg; - - /* set a particular entry to 1 if we need to skip a particular rank */ - - uint32_t skip_ranks[MAX_RANKS]; - - /* set a particular entry to 1 if we need to skip a particular group */ - - uint32_t skip_groups; - - /* set a particular entry to 1 if the shadow register (which represents a set of ranks) needs to be skipped */ - - uint32_t skip_shadow_regs[NUM_SHADOW_REGS]; - -} param_t; - -/* global variable holder */ - -typedef struct gbl_type { - - uint32_t phy_debug_mode_flags; - - /* current read latency */ - - uint32_t curr_read_lat; - - /* current write latency */ - - uint32_t curr_write_lat; - - /* error code */ - - uint32_t error_substage; - uint32_t error_stage; - uint32_t error_group; - - /* figure-of-merit in, figure-of-merit out */ - - uint32_t fom_in; - uint32_t fom_out; - - //USER Number of RW Mgr NOP cycles between write command and write data - uint32_t rw_wl_nop_cycles; -} gbl_t; - -// External global variables -static gbl_t *gbl; -static param_t *param; - -// External functions -static uint32_t run_mem_calibrate(void); -static void rw_mgr_mem_initialize(void); -static void rw_mgr_mem_dll_lock_wait(void); -static inline void scc_mgr_set_dq_in_delay(uint32_t write_group, uint32_t dq_in_group, - uint32_t delay); -static inline void scc_mgr_set_dq_out1_delay(uint32_t write_group, uint32_t dq_in_group, - uint32_t delay); -static inline void scc_mgr_set_dq_out2_delay(uint32_t write_group, uint32_t dq_in_group, - uint32_t delay); -static inline void scc_mgr_load_dq(uint32_t dq_in_group); -static inline void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay); -static inline void scc_mgr_load_dqs(uint32_t dqs); -static void scc_mgr_set_group_dqs_io_and_oct_out1_gradual(uint32_t write_group, uint32_t delay); -static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group, uint32_t delay); -static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group, uint32_t phase); -static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group, uint32_t phase); -static inline void scc_mgr_set_dm_out1_delay(uint32_t write_group, uint32_t dm, uint32_t delay); -static inline void scc_mgr_set_dm_out2_delay(uint32_t write_group, uint32_t dm, uint32_t delay); -static inline void scc_mgr_load_dm(uint32_t dm); -int sdram_calibration(void); -#endif diff --git a/arch/arm/mach-socfpga/include/mach/sequencer_defines.h b/arch/arm/mach-socfpga/include/mach/sequencer_defines.h deleted file mode 100644 index 5059844106..0000000000 --- a/arch/arm/mach-socfpga/include/mach/sequencer_defines.h +++ /dev/null @@ -1,6 +0,0 @@ -#define TINIT_CNTR1_VAL 32 -#define TINIT_CNTR2_VAL 32 -#define TINIT_CNTR0_VAL 99 -#define TRESET_CNTR1_VAL 99 -#define TRESET_CNTR2_VAL 10 -#define TRESET_CNTR0_VAL 99 diff --git a/arch/arm/mach-socfpga/include/mach/socfpga-regs.h b/arch/arm/mach-socfpga/include/mach/socfpga-regs.h deleted file mode 100644 index e88daf7189..0000000000 --- a/arch/arm/mach-socfpga/include/mach/socfpga-regs.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __MACH_SOCFPGA_REGS_H -#define __MACH_SOCFPGA_REGS_H - -#define CYCLONE5_SDMMC_ADDRESS 0xff704000 -#define CYCLONE5_QSPI_CTRL_ADDRESS 0xff705000 -#define CYCLONE5_QSPI_DATA_ADDRESS 0xffa00000 -#define CYCLONE5_FPGAMGRREGS_ADDRESS 0xff706000 -#define CYCLONE5_GPIO0_BASE 0xff708000 -#define CYCLONE5_GPIO1_BASE 0xff709000 -#define CYCLONE5_GPIO2_BASE 0xff70A000 -#define CYCLONE5_L3REGS_ADDRESS 0xff800000 -#define CYCLONE5_FPGAMGRDATA_ADDRESS 0xffb90000 -#define CYCLONE5_UART0_ADDRESS 0xffc02000 -#define CYCLONE5_UART1_ADDRESS 0xffc03000 -#define CYCLONE5_SDR_ADDRESS 0xffc20000 -#define CYCLONE5_CLKMGR_ADDRESS 0xffd04000 -#define CYCLONE5_RSTMGR_ADDRESS 0xffd05000 -#define CYCLONE5_SYSMGR_ADDRESS 0xffd08000 -#define CYCLONE5_SCANMGR_ADDRESS 0xfff02000 -#define CYCLONE5_SMP_TWD_ADDRESS 0xfffec600 - -#endif /* __MACH_SOCFPGA_REGS_H */ diff --git a/arch/arm/mach-socfpga/include/mach/system-manager.h b/arch/arm/mach-socfpga/include/mach/system-manager.h deleted file mode 100644 index 9efc37a4dc..0000000000 --- a/arch/arm/mach-socfpga/include/mach/system-manager.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef _SYSTEM_MANAGER_H_ -#define _SYSTEM_MANAGER_H_ - -void socfpga_sysmgr_pinmux_init(unsigned long *sys_mgr_init_table, int num); - -/* address */ -#define CONFIG_SYSMGR_ROMCODEGRP_CTRL (CYCLONE5_SYSMGR_ADDRESS + 0xc0) - -/* FPGA interface group */ -#define SYSMGR_FPGAINTF_MODULE (CYCLONE5_SYSMGR_ADDRESS + 0x28) -/* EMAC interface selection */ -#define CONFIG_SYSMGR_EMAC_CTRL (CYCLONE5_SYSMGR_ADDRESS + 0x60) - -#define ISWGRP_HANDOFF_AXIBRIDGE SYSMGR_ISWGRP_HANDOFF0 -#define ISWGRP_HANDOFF_L3REMAP SYSMGR_ISWGRP_HANDOFF1 -#define ISWGRP_HANDOFF_FPGAINTF SYSMGR_ISWGRP_HANDOFF2 -#define ISWGRP_HANDOFF_FPGA2SDR SYSMGR_ISWGRP_HANDOFF3 - -/* pin mux */ -#define SYSMGR_PINMUXGRP (CYCLONE5_SYSMGR_ADDRESS + 0x400) -#define SYSMGR_PINMUXGRP_NANDUSEFPGA (SYSMGR_PINMUXGRP + 0x2F0) -#define SYSMGR_PINMUXGRP_EMAC1USEFPGA (SYSMGR_PINMUXGRP + 0x2F8) -#define SYSMGR_PINMUXGRP_SDMMCUSEFPGA (SYSMGR_PINMUXGRP + 0x308) -#define SYSMGR_PINMUXGRP_EMAC0USEFPGA (SYSMGR_PINMUXGRP + 0x314) -#define SYSMGR_PINMUXGRP_SPIM1USEFPGA (SYSMGR_PINMUXGRP + 0x330) -#define SYSMGR_PINMUXGRP_SPIM0USEFPGA (SYSMGR_PINMUXGRP + 0x338) - -/* bit fields */ -#define CONFIG_SYSMGR_PINMUXGRP_OFFSET (0x400) -#define SYSMGR_ROMCODEGRP_CTRL_WARMRSTCFGPINMUX (1<<0) -#define SYSMGR_ROMCODEGRP_CTRL_WARMRSTCFGIO (1<<1) -#define SYSMGR_ECC_OCRAM_EN (1<<0) -#define SYSMGR_ECC_OCRAM_SERR (1<<3) -#define SYSMGR_ECC_OCRAM_DERR (1<<4) -#define SYSMGR_FPGAINTF_USEFPGA 0x1 -#define SYSMGR_FPGAINTF_SPIM0 (1<<0) -#define SYSMGR_FPGAINTF_SPIM1 (1<<1) -#define SYSMGR_FPGAINTF_EMAC0 (1<<2) -#define SYSMGR_FPGAINTF_EMAC1 (1<<3) -#define SYSMGR_FPGAINTF_NAND (1<<4) -#define SYSMGR_FPGAINTF_SDMMC (1<<5) - -/* Enumeration: sysmgr::emacgrp::ctrl::physel::enum */ -#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 -#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 -#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 -#define SYSMGR_EMACGRP_CTRL_PHYSEL0_LSB 0 -#define SYSMGR_EMACGRP_CTRL_PHYSEL1_LSB 2 -#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 - -#endif /* _SYSTEM_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/system.h b/arch/arm/mach-socfpga/include/mach/system.h old mode 100755 new mode 100644 diff --git a/arch/arm/mach-socfpga/include/mach/tclrpt.h b/arch/arm/mach-socfpga/include/mach/tclrpt.h old mode 100755 new mode 100644 index 4345b23ba6..6b332c8754 --- a/arch/arm/mach-socfpga/include/mach/tclrpt.h +++ b/arch/arm/mach-socfpga/include/mach/tclrpt.h @@ -28,7 +28,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "sequencer.h" +#include "cyclone5-sequencer.h" #define TCLRPT_SET(item, value) diff --git a/arch/arm/mach-socfpga/init.c b/arch/arm/mach-socfpga/init.c deleted file mode 100644 index 0c679e3d2d..0000000000 --- a/arch/arm/mach-socfpga/init.c +++ /dev/null @@ -1,58 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -void socfpga_lowlevel_init(struct socfpga_cm_config *cm_config, - struct socfpga_io_config *io_config) -{ - uint32_t val; - - val = 0xffffffff; - val &= ~(1 << RSTMGR_PERMODRST_L4WD0_LSB); - val &= ~(1 << RSTMGR_PERMODRST_OSC1TIMER0_LSB); - writel(val, CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); - - /* freeze all IO banks */ - sys_mgr_frzctrl_freeze_req(FREEZE_CHANNEL_0); - sys_mgr_frzctrl_freeze_req(FREEZE_CHANNEL_1); - sys_mgr_frzctrl_freeze_req(FREEZE_CHANNEL_2); - sys_mgr_frzctrl_freeze_req(FREEZE_CHANNEL_3); - - writel(~0, CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_BRG_MOD_RESET_OFS); - - debug("Reconfigure Clock Manager\n"); - - /* reconfigure the PLLs */ - socfpga_cm_basic_init(cm_config); - - debug("Configure IOCSR\n"); - /* configure the IOCSR through scan chain */ - scan_mgr_io_scan_chain_prg(IO_SCAN_CHAIN_0, CONFIG_HPS_IOCSR_SCANCHAIN0_LENGTH, io_config->iocsr_emac_mixed2); - scan_mgr_io_scan_chain_prg(IO_SCAN_CHAIN_1, CONFIG_HPS_IOCSR_SCANCHAIN1_LENGTH, io_config->iocsr_mixed1_flash); - scan_mgr_io_scan_chain_prg(IO_SCAN_CHAIN_2, CONFIG_HPS_IOCSR_SCANCHAIN2_LENGTH, io_config->iocsr_general); - scan_mgr_io_scan_chain_prg(IO_SCAN_CHAIN_3, CONFIG_HPS_IOCSR_SCANCHAIN3_LENGTH, io_config->iocsr_ddr); - - /* configure the pin muxing through system manager */ - socfpga_sysmgr_pinmux_init(io_config->pinmux, io_config->num_pin); - - writel(RSTMGR_PERMODRST_L4WD0 | RSTMGR_PERMODRST_L4WD1, - CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); - - /* unfreeze / thaw all IO banks */ - sys_mgr_frzctrl_thaw_req(FREEZE_CHANNEL_0); - sys_mgr_frzctrl_thaw_req(FREEZE_CHANNEL_1); - sys_mgr_frzctrl_thaw_req(FREEZE_CHANNEL_2); - sys_mgr_frzctrl_thaw_req(FREEZE_CHANNEL_3); - - writel(0x18, CYCLONE5_L3REGS_ADDRESS); - writel(0x1, 0xfffefc00); - - INIT_LL(); -} diff --git a/arch/arm/mach-socfpga/nic301.c b/arch/arm/mach-socfpga/nic301.c index 206dd48ff9..7069c6e5b9 100644 --- a/arch/arm/mach-socfpga/nic301.c +++ b/arch/arm/mach-socfpga/nic301.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include /* * Convert all slave from secure to non secure diff --git a/arch/arm/mach-socfpga/reset-manager.c b/arch/arm/mach-socfpga/reset-manager.c deleted file mode 100644 index 04522da4d1..0000000000 --- a/arch/arm/mach-socfpga/reset-manager.c +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include -#include - -/* Disable the watchdog (toggle reset to watchdog) */ -void watchdog_disable(void) -{ - void __iomem *rm = (void *)CYCLONE5_RSTMGR_ADDRESS; - uint32_t val; - - /* assert reset for watchdog */ - val = readl(rm + RESET_MGR_PER_MOD_RESET_OFS); - val |= 1 << RSTMGR_PERMODRST_L4WD0_LSB; - writel(val, rm + RESET_MGR_PER_MOD_RESET_OFS); - - /* deassert watchdog from reset (watchdog in not running state) */ - val = readl(rm + RESET_MGR_PER_MOD_RESET_OFS); - val &= ~(1 << RSTMGR_PERMODRST_L4WD0_LSB); - writel(val, rm + RESET_MGR_PER_MOD_RESET_OFS); -} - -/* Write the reset manager register to cause reset */ -static void __noreturn socfpga_restart_soc(struct restart_handler *rst) -{ - /* request a warm reset */ - writel((1 << RSTMGR_CTRL_SWWARMRSTREQ_LSB), - CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_CTRL_OFS); - /* - * infinite loop here as watchdog will trigger and reset - * the processor - */ - hang(); -} - -static int restart_register_feature(void) -{ - restart_handler_register_fn(socfpga_restart_soc); - - return 0; -} -coredevice_initcall(restart_register_feature); diff --git a/arch/arm/mach-socfpga/scan-manager.c b/arch/arm/mach-socfpga/scan-manager.c deleted file mode 100644 index 57979b90a2..0000000000 --- a/arch/arm/mach-socfpga/scan-manager.c +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include - -/* - * @fn scan_mgr_io_scan_chain_engine_is_idle - * - * @brief function to check IO scan chain engine status and wait if the - * engine is active. Poll the IO scan chain engine till maximum iteration - * reached. - * - * @param max_iter uint32_t [in] - maximum polling loop to revent infinite loop - */ -static int scan_mgr_io_scan_chain_engine_is_idle(uint32_t max_iter) -{ - uint32_t scanmgr_status; - - scanmgr_status = readl(SCANMGR_STAT_ADDRESS + - CYCLONE5_SCANMGR_ADDRESS); - - /* Poll the engine until the scan engine is inactive */ - while (SCANMGR_STAT_ACTIVE_GET(scanmgr_status) - || (SCANMGR_STAT_WFIFOCNT_GET(scanmgr_status) > 0)) { - - max_iter--; - - if (max_iter > 0) { - scanmgr_status = readl( - CYCLONE5_SCANMGR_ADDRESS + - SCANMGR_STAT_ADDRESS); - } else { - return 0; - } - } - return 1; -} - -/* - * scan_mgr_io_scan_chain_prg - * Program HPS IO Scan Chain - */ -int scan_mgr_io_scan_chain_prg(enum io_scan_chain io_scan_chain_id, - uint32_t io_scan_chain_len_in_bits, - const unsigned long *iocsr_scan_chain) -{ - uint16_t tdi_tdo_header; - uint32_t io_program_iter; - uint32_t io_scan_chain_data_residual; - uint32_t residual; - uint32_t i; - uint32_t index = 0; - uint32_t val; - int ret; - void __iomem *sysmgr = (void *)CYCLONE5_SYSMGR_ADDRESS; - void __iomem *scanmgr = (void *)CYCLONE5_SCANMGR_ADDRESS; - - /* De-assert reinit if the IO scan chain is intended for HIO */ - if (io_scan_chain_id == IO_SCAN_CHAIN_3) { - val = readl(sysmgr + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - val &= ~SYSMGR_FRZCTRL_HIOCTRL_DLLRST_MASK; - writel(val, sysmgr + SYSMGR_FRZCTRL_HIOCTRL_ADDRESS); - } /* if (HIO) */ - - /* - * Check if the scan chain engine is inactive and the - * WFIFO is empty before enabling the IO scan chain - */ - if (!scan_mgr_io_scan_chain_engine_is_idle(MAX_WAITING_DELAY_IO_SCAN_ENGINE)) - return -EBUSY; - - /* - * Enable IO Scan chain based on scan chain id - * Note: only one chain can be enabled at a time - */ - val = readl(scanmgr + SCANMGR_EN_ADDRESS); - val |= 1 << io_scan_chain_id; - writel(val, scanmgr + SCANMGR_EN_ADDRESS); - - /* - * Calculate number of iteration needed for - * full 128-bit (4 x32-bits) bits shifting. - * Each TDI_TDO packet can shift in maximum 128-bits - */ - io_program_iter = io_scan_chain_len_in_bits >> IO_SCAN_CHAIN_128BIT_SHIFT; - io_scan_chain_data_residual = io_scan_chain_len_in_bits & IO_SCAN_CHAIN_128BIT_MASK; - - /* - * Construct TDI_TDO packet for - * 128-bit IO scan chain (2 bytes) - */ - tdi_tdo_header = TDI_TDO_HEADER_FIRST_BYTE | - (TDI_TDO_MAX_PAYLOAD << TDI_TDO_HEADER_SECOND_BYTE_SHIFT); - - /* Program IO scan chain in 128-bit iteration */ - for (i = 0; i < io_program_iter; i++) { - - /* write TDI_TDO packet header to scan manager */ - writel(tdi_tdo_header, (scanmgr + SCANMGR_FIFODOUBLEBYTE_ADDRESS)); - - /* calculate array index */ - index = i * 4; - - /* - * write 4 successive 32-bit IO scan - * chain data into WFIFO - */ - writel(iocsr_scan_chain[index], (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); - writel(iocsr_scan_chain[index + 1], (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); - writel(iocsr_scan_chain[index + 2], (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); - writel(iocsr_scan_chain[index + 3], (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); - - /* - * Check if the scan chain engine has completed the - * IO scan chain data shifting - */ - if (!scan_mgr_io_scan_chain_engine_is_idle(MAX_WAITING_DELAY_IO_SCAN_ENGINE)) { - ret = -EBUSY; - goto out_disable; - } - } - - /* Calculate array index for final TDI_TDO packet */ - index = io_program_iter * 4; - - /* Final TDI_TDO packet if any */ - if (0 != io_scan_chain_data_residual) { - /* - * Calculate number of quad bytes FIFO write - * needed for the final TDI_TDO packet - */ - io_program_iter = io_scan_chain_data_residual >> IO_SCAN_CHAIN_32BIT_SHIFT; - - /* - * Construct TDI_TDO packet for remaining IO - * scan chain (2 bytes) - */ - tdi_tdo_header = TDI_TDO_HEADER_FIRST_BYTE | - ((io_scan_chain_data_residual - 1) << TDI_TDO_HEADER_SECOND_BYTE_SHIFT); - - /* - * Program the last part of IO scan chain - * write TDI_TDO packet header (2 bytes) to - * scan manager - */ - writel(tdi_tdo_header, (scanmgr + SCANMGR_FIFODOUBLEBYTE_ADDRESS)); - - for (i = 0; i < io_program_iter; i++) { - - /* - * write remaining scan chain data into scan - * manager WFIFO with 4 bytes write - */ - writel(iocsr_scan_chain[index + i], - (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); - } - - index += io_program_iter; - residual = io_scan_chain_data_residual & IO_SCAN_CHAIN_32BIT_MASK; - - if (IO_SCAN_CHAIN_PAYLOAD_24BIT < residual) { - /* - * write the last 4B scan chain data - * into scan manager WFIFO - */ - writel(iocsr_scan_chain[index], - (scanmgr + SCANMGR_FIFOQUADBYTE_ADDRESS)); - } else { - /* - * write the remaining 1 - 3 bytes scan chain - * data into scan manager WFIFO byte by byte - * to prevent JTAG engine shifting unused data - * from the FIFO and mistaken the data as a - * valid command (even though unused bits are - * set to 0, but just to prevent hardware - * glitch) - */ - for (i = 0; i < residual; i += 8) { - writel(((iocsr_scan_chain[index] >> i) & IO_SCAN_CHAIN_BYTE_MASK), - (scanmgr + SCANMGR_FIFOSINGLEBYTE_ADDRESS)); - } - } - - /* - * Check if the scan chain engine has completed the - * IO scan chain data shifting - */ - if (!scan_mgr_io_scan_chain_engine_is_idle(MAX_WAITING_DELAY_IO_SCAN_ENGINE)) { - ret = -EBUSY; - goto out_disable; - } - } /* if (io_scan_chain_data_residual) */ - - ret = 0; - -out_disable: - /* Disable IO Scan chain when configuration done*/ - val = readl(scanmgr + SCANMGR_EN_ADDRESS); - val &= ~(1 << io_scan_chain_id); - writel(val, scanmgr + SCANMGR_EN_ADDRESS); - - return ret; -} diff --git a/arch/arm/mach-socfpga/system-manager.c b/arch/arm/mach-socfpga/system-manager.c deleted file mode 100644 index 45db921f1a..0000000000 --- a/arch/arm/mach-socfpga/system-manager.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (C) 2012 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include - -void socfpga_sysmgr_pinmux_init(unsigned long *sys_mgr_init_table, int num) -{ - unsigned long offset = CONFIG_SYSMGR_PINMUXGRP_OFFSET; - const unsigned long *pval = sys_mgr_init_table; - unsigned long i; - - for (i = 0; i < num; i++) { - writel(*pval++, CYCLONE5_SYSMGR_ADDRESS + offset); - offset += sizeof(uint32_t); - } -} diff --git a/arch/arm/mach-socfpga/xload.c b/arch/arm/mach-socfpga/xload.c index d24944bbb8..5d47bb9d3e 100644 --- a/arch/arm/mach-socfpga/xload.c +++ b/arch/arm/mach-socfpga/xload.c @@ -17,8 +17,8 @@ #include #include -#include -#include +#include +#include static struct socfpga_barebox_part default_parts[] = { { diff --git a/drivers/firmware/socfpga.c b/drivers/firmware/socfpga.c index a0cd2011cb..c1eae98acc 100644 --- a/drivers/firmware/socfpga.c +++ b/drivers/firmware/socfpga.c @@ -34,10 +34,10 @@ #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #define FPGAMGRREGS_STAT 0x0 #define FPGAMGRREGS_CTRL 0x4 diff --git a/scripts/socfpga_import_preloader b/scripts/socfpga_import_preloader index b08262cb6c..63ff30ec2a 100755 --- a/scripts/socfpga_import_preloader +++ b/scripts/socfpga_import_preloader @@ -46,7 +46,7 @@ copy_source() { echo " Fixing include pathes..." # Fix include pathes - sed -i 's/#include /#include /g' $tgt + sed -i 's/#include /#include /g' $tgt sed -i 's/#include /#include /g' $tgt sed -i 's/#include "sequencer_auto.h"//g' $tgt sed -i 's/#include "sequencer_defines.h"//g' $tgt -- cgit v1.2.3 From beac095f7b0682853cd2ba6ad2e4bed6c6268f82 Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Fri, 28 Apr 2017 16:41:38 +0200 Subject: clk: socfpga: move driver to subdirectory Prepare for Arria10 clock driver. Signed-off-by: Steffen Trumtrar Signed-off-by: Sascha Hauer --- drivers/clk/Makefile | 2 +- drivers/clk/socfpga.c | 434 ------------------------------------------- drivers/clk/socfpga/Makefile | 1 + drivers/clk/socfpga/clk.c | 434 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 436 insertions(+), 435 deletions(-) delete mode 100644 drivers/clk/socfpga.c create mode 100644 drivers/clk/socfpga/Makefile create mode 100644 drivers/clk/socfpga/clk.c diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index d75b954a4e..b5abe1cdf5 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_ARCH_MVEBU) += mvebu/ obj-$(CONFIG_ARCH_MXS) += mxs/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ -obj-$(CONFIG_CLK_SOCFPGA) += socfpga.o +obj-$(CONFIG_CLK_SOCFPGA) += socfpga/ obj-$(CONFIG_MACH_MIPS_ATH79) += clk-ar933x.o obj-$(CONFIG_ARCH_IMX) += imx/ obj-$(CONFIG_COMMON_CLK_AT91) += at91/ diff --git a/drivers/clk/socfpga.c b/drivers/clk/socfpga.c deleted file mode 100644 index 6af0632caf..0000000000 --- a/drivers/clk/socfpga.c +++ /dev/null @@ -1,434 +0,0 @@ -/* - * Copyright (C) 2013 Sascha Hauer , Pengutronix - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* Clock Manager offsets */ -#define CLKMGR_CTRL 0x0 -#define CLKMGR_BYPASS 0x4 -#define CLKMGR_L4SRC 0x70 -#define CLKMGR_PERPLL_SRC 0xAC - -/* Clock bypass bits */ -#define MAINPLL_BYPASS (1<<0) -#define SDRAMPLL_BYPASS (1<<1) -#define SDRAMPLL_SRC_BYPASS (1<<2) -#define PERPLL_BYPASS (1<<3) -#define PERPLL_SRC_BYPASS (1<<4) - -#define SOCFPGA_PLL_BG_PWRDWN 0 -#define SOCFPGA_PLL_EXT_ENA 1 -#define SOCFPGA_PLL_PWR_DOWN 2 -#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8 -#define SOCFPGA_PLL_DIVF_SHIFT 3 -#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000 -#define SOCFPGA_PLL_DIVQ_SHIFT 16 -#define SOCFGPA_MAX_PARENTS 3 - -#define SOCFPGA_L4_MP_CLK "l4_mp_clk" -#define SOCFPGA_L4_SP_CLK "l4_sp_clk" -#define SOCFPGA_NAND_CLK "nand_clk" -#define SOCFPGA_NAND_X_CLK "nand_x_clk" -#define SOCFPGA_MMC_CLK "sdmmc_clk" -#define SOCFPGA_DB_CLK "gpio_db_clk" - -#define div_mask(width) ((1 << (width)) - 1) -#define streq(a, b) (strcmp((a), (b)) == 0) - -static void __iomem *clk_mgr_base_addr; - -struct clk_pll { - struct clk clk; - const char *parent; - unsigned regofs; -}; - -static unsigned long clk_pll_recalc_rate(struct clk *clk, - unsigned long parent_rate) -{ - struct clk_pll *pll = container_of(clk, struct clk_pll, clk); - unsigned long divf, divq, vco_freq, reg; - unsigned long bypass; - - reg = readl(clk_mgr_base_addr + pll->regofs); - - bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS); - if (bypass & MAINPLL_BYPASS) - return parent_rate; - - divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT; - divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT; - vco_freq = parent_rate * (divf + 1); - - return vco_freq / (1 + divq); -} - -static struct clk_ops clk_pll_ops = { - .recalc_rate = clk_pll_recalc_rate, -}; - -static struct clk *socfpga_pll_clk(struct device_node *node) -{ - struct clk_pll *pll; - int ret; - - pll = xzalloc(sizeof(*pll)); - - pll->parent = of_clk_get_parent_name(node, 0); - if (!pll->parent) - return ERR_PTR(-EINVAL); - - pll->clk.parent_names = &pll->parent; - pll->clk.num_parents = 1; - pll->clk.name = xstrdup(node->name); - pll->clk.ops = &clk_pll_ops; - - of_property_read_u32(node, "reg", &pll->regofs); - - ret = clk_register(&pll->clk); - if (ret) { - free(pll); - return ERR_PTR(ret); - } - - return &pll->clk; -} - -struct clk_periph { - struct clk clk; - const char *parent; - unsigned regofs; - unsigned int fixed_div; - void __iomem *div_reg; - unsigned int width; - unsigned int shift; -}; - -static unsigned long clk_periph_recalc_rate(struct clk *clk, - unsigned long parent_rate) -{ - struct clk_periph *periph = container_of(clk, struct clk_periph, clk); - u32 div, val; - - if (periph->fixed_div) { - div = periph->fixed_div; - } else { - if (periph->div_reg) { - val = readl(periph->div_reg) >> periph->shift; - val &= div_mask(periph->width); - parent_rate /= (val + 1); - } - div = ((readl(clk_mgr_base_addr + periph->regofs) & 0x1ff) + 1); - } - - return parent_rate / div; -} - -static struct clk_ops clk_periph_ops = { - .recalc_rate = clk_periph_recalc_rate, -}; - -static struct clk *socfpga_periph_clk(struct device_node *node) -{ - struct clk_periph *periph; - int ret; - u32 div_reg[3]; - - periph = xzalloc(sizeof(*periph)); - - periph->parent = of_clk_get_parent_name(node, 0); - if (!periph->parent) - return ERR_PTR(-EINVAL); - - periph->clk.parent_names = &periph->parent; - periph->clk.num_parents = 1; - periph->clk.name = xstrdup(node->name); - periph->clk.ops = &clk_periph_ops; - - ret = of_property_read_u32_array(node, "div-reg", div_reg, 3); - if (!ret) { - periph->div_reg = clk_mgr_base_addr + div_reg[0]; - periph->shift = div_reg[1]; - periph->width = div_reg[2]; - } else { - periph->div_reg = 0; - } - - of_property_read_u32(node, "reg", &periph->regofs); - of_property_read_u32(node, "fixed-divider", &periph->fixed_div); - - ret = clk_register(&periph->clk); - if (ret) { - free(periph); - return ERR_PTR(ret); - } - - return &periph->clk; -} - -struct clk_socfpga { - struct clk clk; - const char *parent; - void __iomem *reg; - void __iomem *div_reg; - unsigned int fixed_div; - unsigned int bit_idx; - unsigned int shift; - unsigned int width; - const char *parent_names[SOCFGPA_MAX_PARENTS]; -}; - -static int clk_socfpga_enable(struct clk *clk) -{ - struct clk_socfpga *cs = container_of(clk, struct clk_socfpga, clk); - u32 val; - - val = readl(cs->reg); - val |= 1 << cs->shift; - writel(val, cs->reg); - - return 0; -} - -static void clk_socfpga_disable(struct clk *clk) -{ - struct clk_socfpga *cs = container_of(clk, struct clk_socfpga, clk); - u32 val; - - val = readl(cs->reg); - val &= ~(1 << cs->shift); - writel(val, cs->reg); -} - -static int clk_socfpga_is_enabled(struct clk *clk) -{ - struct clk_socfpga *cs = container_of(clk, struct clk_socfpga, clk); - u32 val; - - val = readl(cs->reg); - - if (val & (1 << cs->shift)) - return 1; - else - return 0; -} - -static unsigned long clk_socfpga_recalc_rate(struct clk *clk, - unsigned long parent_rate) -{ - struct clk_socfpga *cs = container_of(clk, struct clk_socfpga, clk); - u32 div = 1, val; - - if (cs->fixed_div) { - div = cs->fixed_div; - } else if (cs->div_reg) { - val = readl(cs->div_reg) >> cs->shift; - val &= div_mask(cs->width); - if (streq(clk->name, SOCFPGA_DB_CLK)) - div = val + 1; - else - div = (1 << val); - } - - return parent_rate / div; -} - -static int clk_socfpga_get_parent(struct clk *clk) -{ - u32 perpll_src; - u32 l4_src; - - if (streq(clk->name, SOCFPGA_L4_MP_CLK)) { - l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); - return l4_src &= 0x1; - } - if (streq(clk->name, SOCFPGA_L4_SP_CLK)) { - l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); - return !!(l4_src & 2); - } - - perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); - if (streq(clk->name, SOCFPGA_MMC_CLK)) - return perpll_src &= 0x3; - if (streq(clk->name, SOCFPGA_NAND_CLK) || - streq(clk->name, SOCFPGA_NAND_X_CLK)) - return (perpll_src >> 2) & 3; - - /* QSPI clock */ - return (perpll_src >> 4) & 3; -} - -static int clk_socfpga_set_parent(struct clk *clk, u8 parent) -{ - u32 src_reg; - - if (streq(clk->name, SOCFPGA_L4_MP_CLK)) { - src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); - src_reg &= ~0x1; - src_reg |= parent; - writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); - } else if (streq(clk->name, SOCFPGA_L4_SP_CLK)) { - src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); - src_reg &= ~0x2; - src_reg |= (parent << 1); - writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); - } else { - src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); - if (streq(clk->name, SOCFPGA_MMC_CLK)) { - src_reg &= ~0x3; - src_reg |= parent; - } else if (streq(clk->name, SOCFPGA_NAND_CLK) || - streq(clk->name, SOCFPGA_NAND_X_CLK)) { - src_reg &= ~0xC; - src_reg |= (parent << 2); - } else {/* QSPI clock */ - src_reg &= ~0x30; - src_reg |= (parent << 4); - } - writel(src_reg, clk_mgr_base_addr + CLKMGR_PERPLL_SRC); - } - - return 0; -} - -static struct clk_ops clk_socfpga_ops = { - .recalc_rate = clk_socfpga_recalc_rate, - .enable = clk_socfpga_enable, - .disable = clk_socfpga_disable, - .is_enabled = clk_socfpga_is_enabled, - .get_parent = clk_socfpga_get_parent, - .set_parent = clk_socfpga_set_parent, -}; - -static struct clk *socfpga_gate_clk(struct device_node *node) -{ - u32 clk_gate[2]; - u32 div_reg[3]; - u32 fixed_div; - struct clk_socfpga *cs; - int ret; - int i = 0; - - cs = xzalloc(sizeof(*cs)); - - ret = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); - if (ret) - clk_gate[0] = 0; - - if (clk_gate[0]) { - cs->reg = clk_mgr_base_addr + clk_gate[0]; - cs->bit_idx = clk_gate[1]; - } - - ret = of_property_read_u32(node, "fixed-divider", &fixed_div); - if (ret) - cs->fixed_div = 0; - else - cs->fixed_div = fixed_div; - - ret = of_property_read_u32_array(node, "div-reg", div_reg, 3); - if (!ret) { - cs->div_reg = clk_mgr_base_addr + div_reg[0]; - cs->shift = div_reg[1]; - cs->width = div_reg[2]; - } - - for (i = 0; i < SOCFGPA_MAX_PARENTS; i++) { - cs->parent_names[i] = of_clk_get_parent_name(node, i); - if (!cs->parent_names[i]) - break; - } - - cs->clk.parent_names = cs->parent_names; - cs->clk.num_parents = i; - cs->clk.name = xstrdup(node->name); - cs->clk.ops = &clk_socfpga_ops; - - ret = clk_register(&cs->clk); - if (ret) { - free(cs); - return ERR_PTR(ret); - } - - return &cs->clk; -} - -static void socfpga_register_clocks(struct device_d *dev, struct device_node *node) -{ - struct device_node *child; - struct clk *clk; - - for_each_child_of_node(node, child) { - socfpga_register_clocks(dev, child); - } - - if (of_device_is_compatible(node, "altr,socfpga-pll-clock")) - clk = socfpga_pll_clk(node); - else if (of_device_is_compatible(node, "altr,socfpga-perip-clk")) - clk = socfpga_periph_clk(node); - else if (of_device_is_compatible(node, "altr,socfpga-gate-clk")) - clk = socfpga_gate_clk(node); - else - return; - - of_clk_add_provider(node, of_clk_src_simple_get, clk); -} - -static int socfpga_ccm_probe(struct device_d *dev) -{ - struct resource *iores; - void __iomem *regs; - struct device_node *clknode; - - iores = dev_request_mem_resource(dev, 0); - if (IS_ERR(iores)) - return PTR_ERR(iores); - regs = IOMEM(iores->start); - - clk_mgr_base_addr = regs; - - clknode = of_get_child_by_name(dev->device_node, "clocks"); - if (!clknode) - return -EINVAL; - - socfpga_register_clocks(dev, clknode); - - return 0; -} - -static __maybe_unused struct of_device_id socfpga_ccm_dt_ids[] = { - { - .compatible = "altr,clk-mgr", - }, { - /* sentinel */ - } -}; - -static struct driver_d socfpga_ccm_driver = { - .probe = socfpga_ccm_probe, - .name = "socfpga-ccm", - .of_compatible = DRV_OF_COMPAT(socfpga_ccm_dt_ids), -}; - -static int socfpga_ccm_init(void) -{ - return platform_driver_register(&socfpga_ccm_driver); -} -core_initcall(socfpga_ccm_init); diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile new file mode 100644 index 0000000000..fc216adb74 --- /dev/null +++ b/drivers/clk/socfpga/Makefile @@ -0,0 +1 @@ +obj-y += clk.o diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c new file mode 100644 index 0000000000..6af0632caf --- /dev/null +++ b/drivers/clk/socfpga/clk.c @@ -0,0 +1,434 @@ +/* + * Copyright (C) 2013 Sascha Hauer , Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Clock Manager offsets */ +#define CLKMGR_CTRL 0x0 +#define CLKMGR_BYPASS 0x4 +#define CLKMGR_L4SRC 0x70 +#define CLKMGR_PERPLL_SRC 0xAC + +/* Clock bypass bits */ +#define MAINPLL_BYPASS (1<<0) +#define SDRAMPLL_BYPASS (1<<1) +#define SDRAMPLL_SRC_BYPASS (1<<2) +#define PERPLL_BYPASS (1<<3) +#define PERPLL_SRC_BYPASS (1<<4) + +#define SOCFPGA_PLL_BG_PWRDWN 0 +#define SOCFPGA_PLL_EXT_ENA 1 +#define SOCFPGA_PLL_PWR_DOWN 2 +#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8 +#define SOCFPGA_PLL_DIVF_SHIFT 3 +#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000 +#define SOCFPGA_PLL_DIVQ_SHIFT 16 +#define SOCFGPA_MAX_PARENTS 3 + +#define SOCFPGA_L4_MP_CLK "l4_mp_clk" +#define SOCFPGA_L4_SP_CLK "l4_sp_clk" +#define SOCFPGA_NAND_CLK "nand_clk" +#define SOCFPGA_NAND_X_CLK "nand_x_clk" +#define SOCFPGA_MMC_CLK "sdmmc_clk" +#define SOCFPGA_DB_CLK "gpio_db_clk" + +#define div_mask(width) ((1 << (width)) - 1) +#define streq(a, b) (strcmp((a), (b)) == 0) + +static void __iomem *clk_mgr_base_addr; + +struct clk_pll { + struct clk clk; + const char *parent; + unsigned regofs; +}; + +static unsigned long clk_pll_recalc_rate(struct clk *clk, + unsigned long parent_rate) +{ + struct clk_pll *pll = container_of(clk, struct clk_pll, clk); + unsigned long divf, divq, vco_freq, reg; + unsigned long bypass; + + reg = readl(clk_mgr_base_addr + pll->regofs); + + bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS); + if (bypass & MAINPLL_BYPASS) + return parent_rate; + + divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT; + divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT; + vco_freq = parent_rate * (divf + 1); + + return vco_freq / (1 + divq); +} + +static struct clk_ops clk_pll_ops = { + .recalc_rate = clk_pll_recalc_rate, +}; + +static struct clk *socfpga_pll_clk(struct device_node *node) +{ + struct clk_pll *pll; + int ret; + + pll = xzalloc(sizeof(*pll)); + + pll->parent = of_clk_get_parent_name(node, 0); + if (!pll->parent) + return ERR_PTR(-EINVAL); + + pll->clk.parent_names = &pll->parent; + pll->clk.num_parents = 1; + pll->clk.name = xstrdup(node->name); + pll->clk.ops = &clk_pll_ops; + + of_property_read_u32(node, "reg", &pll->regofs); + + ret = clk_register(&pll->clk); + if (ret) { + free(pll); + return ERR_PTR(ret); + } + + return &pll->clk; +} + +struct clk_periph { + struct clk clk; + const char *parent; + unsigned regofs; + unsigned int fixed_div; + void __iomem *div_reg; + unsigned int width; + unsigned int shift; +}; + +static unsigned long clk_periph_recalc_rate(struct clk *clk, + unsigned long parent_rate) +{ + struct clk_periph *periph = container_of(clk, struct clk_periph, clk); + u32 div, val; + + if (periph->fixed_div) { + div = periph->fixed_div; + } else { + if (periph->div_reg) { + val = readl(periph->div_reg) >> periph->shift; + val &= div_mask(periph->width); + parent_rate /= (val + 1); + } + div = ((readl(clk_mgr_base_addr + periph->regofs) & 0x1ff) + 1); + } + + return parent_rate / div; +} + +static struct clk_ops clk_periph_ops = { + .recalc_rate = clk_periph_recalc_rate, +}; + +static struct clk *socfpga_periph_clk(struct device_node *node) +{ + struct clk_periph *periph; + int ret; + u32 div_reg[3]; + + periph = xzalloc(sizeof(*periph)); + + periph->parent = of_clk_get_parent_name(node, 0); + if (!periph->parent) + return ERR_PTR(-EINVAL); + + periph->clk.parent_names = &periph->parent; + periph->clk.num_parents = 1; + periph->clk.name = xstrdup(node->name); + periph->clk.ops = &clk_periph_ops; + + ret = of_property_read_u32_array(node, "div-reg", div_reg, 3); + if (!ret) { + periph->div_reg = clk_mgr_base_addr + div_reg[0]; + periph->shift = div_reg[1]; + periph->width = div_reg[2]; + } else { + periph->div_reg = 0; + } + + of_property_read_u32(node, "reg", &periph->regofs); + of_property_read_u32(node, "fixed-divider", &periph->fixed_div); + + ret = clk_register(&periph->clk); + if (ret) { + free(periph); + return ERR_PTR(ret); + } + + return &periph->clk; +} + +struct clk_socfpga { + struct clk clk; + const char *parent; + void __iomem *reg; + void __iomem *div_reg; + unsigned int fixed_div; + unsigned int bit_idx; + unsigned int shift; + unsigned int width; + const char *parent_names[SOCFGPA_MAX_PARENTS]; +}; + +static int clk_socfpga_enable(struct clk *clk) +{ + struct clk_socfpga *cs = container_of(clk, struct clk_socfpga, clk); + u32 val; + + val = readl(cs->reg); + val |= 1 << cs->shift; + writel(val, cs->reg); + + return 0; +} + +static void clk_socfpga_disable(struct clk *clk) +{ + struct clk_socfpga *cs = container_of(clk, struct clk_socfpga, clk); + u32 val; + + val = readl(cs->reg); + val &= ~(1 << cs->shift); + writel(val, cs->reg); +} + +static int clk_socfpga_is_enabled(struct clk *clk) +{ + struct clk_socfpga *cs = container_of(clk, struct clk_socfpga, clk); + u32 val; + + val = readl(cs->reg); + + if (val & (1 << cs->shift)) + return 1; + else + return 0; +} + +static unsigned long clk_socfpga_recalc_rate(struct clk *clk, + unsigned long parent_rate) +{ + struct clk_socfpga *cs = container_of(clk, struct clk_socfpga, clk); + u32 div = 1, val; + + if (cs->fixed_div) { + div = cs->fixed_div; + } else if (cs->div_reg) { + val = readl(cs->div_reg) >> cs->shift; + val &= div_mask(cs->width); + if (streq(clk->name, SOCFPGA_DB_CLK)) + div = val + 1; + else + div = (1 << val); + } + + return parent_rate / div; +} + +static int clk_socfpga_get_parent(struct clk *clk) +{ + u32 perpll_src; + u32 l4_src; + + if (streq(clk->name, SOCFPGA_L4_MP_CLK)) { + l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); + return l4_src &= 0x1; + } + if (streq(clk->name, SOCFPGA_L4_SP_CLK)) { + l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); + return !!(l4_src & 2); + } + + perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); + if (streq(clk->name, SOCFPGA_MMC_CLK)) + return perpll_src &= 0x3; + if (streq(clk->name, SOCFPGA_NAND_CLK) || + streq(clk->name, SOCFPGA_NAND_X_CLK)) + return (perpll_src >> 2) & 3; + + /* QSPI clock */ + return (perpll_src >> 4) & 3; +} + +static int clk_socfpga_set_parent(struct clk *clk, u8 parent) +{ + u32 src_reg; + + if (streq(clk->name, SOCFPGA_L4_MP_CLK)) { + src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); + src_reg &= ~0x1; + src_reg |= parent; + writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); + } else if (streq(clk->name, SOCFPGA_L4_SP_CLK)) { + src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); + src_reg &= ~0x2; + src_reg |= (parent << 1); + writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); + } else { + src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); + if (streq(clk->name, SOCFPGA_MMC_CLK)) { + src_reg &= ~0x3; + src_reg |= parent; + } else if (streq(clk->name, SOCFPGA_NAND_CLK) || + streq(clk->name, SOCFPGA_NAND_X_CLK)) { + src_reg &= ~0xC; + src_reg |= (parent << 2); + } else {/* QSPI clock */ + src_reg &= ~0x30; + src_reg |= (parent << 4); + } + writel(src_reg, clk_mgr_base_addr + CLKMGR_PERPLL_SRC); + } + + return 0; +} + +static struct clk_ops clk_socfpga_ops = { + .recalc_rate = clk_socfpga_recalc_rate, + .enable = clk_socfpga_enable, + .disable = clk_socfpga_disable, + .is_enabled = clk_socfpga_is_enabled, + .get_parent = clk_socfpga_get_parent, + .set_parent = clk_socfpga_set_parent, +}; + +static struct clk *socfpga_gate_clk(struct device_node *node) +{ + u32 clk_gate[2]; + u32 div_reg[3]; + u32 fixed_div; + struct clk_socfpga *cs; + int ret; + int i = 0; + + cs = xzalloc(sizeof(*cs)); + + ret = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); + if (ret) + clk_gate[0] = 0; + + if (clk_gate[0]) { + cs->reg = clk_mgr_base_addr + clk_gate[0]; + cs->bit_idx = clk_gate[1]; + } + + ret = of_property_read_u32(node, "fixed-divider", &fixed_div); + if (ret) + cs->fixed_div = 0; + else + cs->fixed_div = fixed_div; + + ret = of_property_read_u32_array(node, "div-reg", div_reg, 3); + if (!ret) { + cs->div_reg = clk_mgr_base_addr + div_reg[0]; + cs->shift = div_reg[1]; + cs->width = div_reg[2]; + } + + for (i = 0; i < SOCFGPA_MAX_PARENTS; i++) { + cs->parent_names[i] = of_clk_get_parent_name(node, i); + if (!cs->parent_names[i]) + break; + } + + cs->clk.parent_names = cs->parent_names; + cs->clk.num_parents = i; + cs->clk.name = xstrdup(node->name); + cs->clk.ops = &clk_socfpga_ops; + + ret = clk_register(&cs->clk); + if (ret) { + free(cs); + return ERR_PTR(ret); + } + + return &cs->clk; +} + +static void socfpga_register_clocks(struct device_d *dev, struct device_node *node) +{ + struct device_node *child; + struct clk *clk; + + for_each_child_of_node(node, child) { + socfpga_register_clocks(dev, child); + } + + if (of_device_is_compatible(node, "altr,socfpga-pll-clock")) + clk = socfpga_pll_clk(node); + else if (of_device_is_compatible(node, "altr,socfpga-perip-clk")) + clk = socfpga_periph_clk(node); + else if (of_device_is_compatible(node, "altr,socfpga-gate-clk")) + clk = socfpga_gate_clk(node); + else + return; + + of_clk_add_provider(node, of_clk_src_simple_get, clk); +} + +static int socfpga_ccm_probe(struct device_d *dev) +{ + struct resource *iores; + void __iomem *regs; + struct device_node *clknode; + + iores = dev_request_mem_resource(dev, 0); + if (IS_ERR(iores)) + return PTR_ERR(iores); + regs = IOMEM(iores->start); + + clk_mgr_base_addr = regs; + + clknode = of_get_child_by_name(dev->device_node, "clocks"); + if (!clknode) + return -EINVAL; + + socfpga_register_clocks(dev, clknode); + + return 0; +} + +static __maybe_unused struct of_device_id socfpga_ccm_dt_ids[] = { + { + .compatible = "altr,clk-mgr", + }, { + /* sentinel */ + } +}; + +static struct driver_d socfpga_ccm_driver = { + .probe = socfpga_ccm_probe, + .name = "socfpga-ccm", + .of_compatible = DRV_OF_COMPAT(socfpga_ccm_dt_ids), +}; + +static int socfpga_ccm_init(void) +{ + return platform_driver_register(&socfpga_ccm_driver); +} +core_initcall(socfpga_ccm_init); -- cgit v1.2.3 From 0df547e24e44dec8615f89dd21b946e881e608a9 Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Fri, 28 Apr 2017 16:41:39 +0200 Subject: net: designware: add dwmac-3.72a compatible This compatible will be needed for Arria10. Signed-off-by: Steffen Trumtrar Signed-off-by: Sascha Hauer --- drivers/net/designware.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/designware.c b/drivers/net/designware.c index bd20a8793a..1d3a68384e 100644 --- a/drivers/net/designware.c +++ b/drivers/net/designware.c @@ -519,6 +519,9 @@ static __maybe_unused struct of_device_id dwc_ether_compatible[] = { { .compatible = "snps,dwmac-3.70a", .data = &dwmac_370a_drvdata, + }, { + .compatible = "snps,dwmac-3.72a", + .data = &dwmac_370a_drvdata, }, { /* sentinel */ } -- cgit v1.2.3 From db3feb61d19060a0589f3906a8a081bebd934ace Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Fri, 28 Apr 2017 16:41:40 +0200 Subject: ARM: socfpga: make debug_ll configurable Allow configuring the serial port and clock rate instead of hardcoding it. Signed-off-by: Steffen Trumtrar Signed-off-by: Sascha Hauer --- arch/arm/mach-socfpga/include/mach/debug_ll.h | 69 +++++++++++++++++---------- common/Kconfig | 20 ++++++++ 2 files changed, 64 insertions(+), 25 deletions(-) diff --git a/arch/arm/mach-socfpga/include/mach/debug_ll.h b/arch/arm/mach-socfpga/include/mach/debug_ll.h index f378435970..4e906ea66e 100644 --- a/arch/arm/mach-socfpga/include/mach/debug_ll.h +++ b/arch/arm/mach-socfpga/include/mach/debug_ll.h @@ -2,24 +2,33 @@ #define __MACH_DEBUG_LL_H__ #include +#include -#define UART_BASE 0xffc02000 +#ifdef CONFIG_DEBUG_LL +#define UART_BASE CONFIG_DEBUG_SOCFPGA_UART_PHYS_ADDR +#endif #define LSR_THRE 0x20 /* Xmit holding register empty */ -#define LSR (5 << 2) -#define THR (0 << 2) +#define LSR_TEMT 0x40 #define LCR_BKSE 0x80 /* Bank select enable */ -#define LSR (5 << 2) -#define THR (0 << 2) -#define DLL (0 << 2) -#define IER (1 << 2) -#define DLM (1 << 2) -#define FCR (2 << 2) -#define LCR (3 << 2) -#define MCR (4 << 2) -#define MDR (8 << 2) +#define LCRVAL 0x3 +#define MCRVAL 0x3 +#define FCRVAL 0xc1 + +#define RBR 0x0 +#define DLL 0x0 +#define IER 0x4 +#define DLM 0x4 +#define FCR 0x8 +#define LCR 0xc +#define MCR 0x10 +#define LSR 0x14 +#define MSR 0x18 +#define SCR 0x1c +#define THR 0x30 +#ifdef CONFIG_DEBUG_LL static inline unsigned int ns16550_calc_divisor(unsigned int clk, unsigned int baudrate) { @@ -28,19 +37,20 @@ static inline unsigned int ns16550_calc_divisor(unsigned int clk, static inline void INIT_LL(void) { - unsigned int clk = 100000000; - unsigned int divisor = clk / 16 / 115200; - - writeb(0x00, UART_BASE + LCR); - writeb(0x00, UART_BASE + IER); - writeb(0x07, UART_BASE + MDR); - writeb(LCR_BKSE, UART_BASE + LCR); - writeb(divisor & 0xff, UART_BASE + DLL); - writeb(divisor >> 8, UART_BASE + DLM); - writeb(0x03, UART_BASE + LCR); - writeb(0x03, UART_BASE + MCR); - writeb(0x07, UART_BASE + FCR); - writeb(0x00, UART_BASE + MDR); + unsigned int div = ns16550_calc_divisor(CONFIG_DEBUG_SOCFPGA_UART_CLOCK, + 115200); + + while ((readl(UART_BASE + LSR) & LSR_TEMT) == 0); + + writel(0x00, UART_BASE + IER); + + writel(LCR_BKSE, UART_BASE + LCR); + writel(div & 0xff, UART_BASE + DLL); + writel((div >> 8) & 0xff, UART_BASE + DLM); + writel(LCRVAL, UART_BASE + LCR); + + writel(MCRVAL, UART_BASE + MCR); + writel(FCRVAL, UART_BASE + FCR); } static inline void PUTC_LL(char c) @@ -52,4 +62,13 @@ static inline void PUTC_LL(char c) /* Wait to make sure it hits the line, in case we die too soon. */ while ((readb(UART_BASE + LSR) & LSR_THRE) == 0); } + +#else +static inline unsigned int ns16550_calc_divisor(unsigned int clk, + unsigned int baudrate) { + return -ENOSYS; +} +static inline void INIT_LL(void) {} +static inline void PUTC_LL(char c) {} +#endif #endif diff --git a/common/Kconfig b/common/Kconfig index 8dd40db2c0..4c7a2d2679 100644 --- a/common/Kconfig +++ b/common/Kconfig @@ -1122,6 +1122,14 @@ config DEBUG_ROCKCHIP_UART Say Y here if you want kernel low-level debugging support on RK3XXX. +config DEBUG_SOCFPGA_UART0 + bool "Use SOCFPGA UART0 for low-level debug" + depends on ARCH_SOCFPGA + help + Say Y here if you want kernel low-level debugging support + on SOCFPGA(Cyclone 5 and Arria 5) based platforms. + + endchoice config DEBUG_IMX_UART_PORT @@ -1164,6 +1172,18 @@ config DEBUG_ROCKCHIP_UART_PORT Choose UART port on which kernel low-level debug messages should be output. +config DEBUG_SOCFPGA_UART_PHYS_ADDR + hex "Physical base address of debug UART" if DEBUG_LL + default 0xffc02000 if DEBUG_SOCFPGA_UART0 + depends on ARCH_SOCFPGA + +config DEBUG_SOCFPGA_UART_CLOCK + int "SoCFPGA UART debug clock" if DEBUG_LL + default 100000000 + depends on ARCH_SOCFPGA + help + Choose UART root clock. + config DEBUG_INITCALLS bool "Trace initcalls" help -- cgit v1.2.3 From d5c8bc3ff1a795cb9ef44abd518f5dae6f9000fa Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Fri, 28 Apr 2017 16:41:41 +0200 Subject: ARM: socfpga: add arria10 support Arria10 is a SoC + FPGA like the Cyclone5 SoCFPGA that is already supported in barebox. Both a the same in some parts, but totaly different in others. Most of the hardware blocks are the same in the SoC parts. The OCRAM is larger on the Arria10 and the SDRAM controller is different. The serial core only supports 32bit accesses (different to the 8bit accesses on the Cyclone5). As Arria10 has 256KB of OCRAM, it is possible to fit a larger barebox (and/or use PBL) instead of the two stage bootprocess used on the Cyclone5 and its 64KB OCRAM. Signed-off-by: Steffen Trumtrar Signed-off-by: Sascha Hauer --- arch/arm/Kconfig | 6 +- arch/arm/mach-socfpga/Kconfig | 19 + arch/arm/mach-socfpga/Makefile | 10 +- arch/arm/mach-socfpga/arria10-bootsource.c | 53 ++ arch/arm/mach-socfpga/arria10-clock-manager.c | 815 +++++++++++++++++++++ arch/arm/mach-socfpga/arria10-generic.c | 85 +++ arch/arm/mach-socfpga/arria10-init.c | 193 +++++ arch/arm/mach-socfpga/arria10-reset-manager.c | 398 ++++++++++ arch/arm/mach-socfpga/arria10-sdram.c | 535 ++++++++++++++ arch/arm/mach-socfpga/cyclone5-bootsource.c | 45 +- arch/arm/mach-socfpga/generic.c | 104 --- .../include/mach/arria10-clock-manager.h | 249 +++++++ .../arm/mach-socfpga/include/mach/arria10-pinmux.h | 250 +++++++ arch/arm/mach-socfpga/include/mach/arria10-regs.h | 114 +++ .../include/mach/arria10-reset-manager.h | 114 +++ arch/arm/mach-socfpga/include/mach/arria10-sdram.h | 353 +++++++++ .../include/mach/arria10-system-manager.h | 97 +++ .../mach-socfpga/include/mach/barebox-arm-head.h | 42 ++ arch/arm/mach-socfpga/include/mach/debug_ll.h | 12 + arch/arm/mach-socfpga/include/mach/generic.h | 36 + arch/arm/mach-socfpga/xload.c | 92 +-- common/Kconfig | 11 +- images/Makefile.socfpga | 5 +- scripts/socfpga_xml_to_config.sh | 117 +++ 24 files changed, 3554 insertions(+), 201 deletions(-) create mode 100644 arch/arm/mach-socfpga/arria10-bootsource.c create mode 100644 arch/arm/mach-socfpga/arria10-clock-manager.c create mode 100644 arch/arm/mach-socfpga/arria10-generic.c create mode 100644 arch/arm/mach-socfpga/arria10-init.c create mode 100644 arch/arm/mach-socfpga/arria10-reset-manager.c create mode 100644 arch/arm/mach-socfpga/arria10-sdram.c delete mode 100644 arch/arm/mach-socfpga/generic.c create mode 100644 arch/arm/mach-socfpga/include/mach/arria10-clock-manager.h create mode 100644 arch/arm/mach-socfpga/include/mach/arria10-pinmux.h create mode 100644 arch/arm/mach-socfpga/include/mach/arria10-regs.h create mode 100644 arch/arm/mach-socfpga/include/mach/arria10-reset-manager.h create mode 100644 arch/arm/mach-socfpga/include/mach/arria10-sdram.h create mode 100644 arch/arm/mach-socfpga/include/mach/arria10-system-manager.h create mode 100644 arch/arm/mach-socfpga/include/mach/barebox-arm-head.h create mode 100755 scripts/socfpga_xml_to_config.sh diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2583e9dd1b..e7edc2ad44 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -169,7 +169,7 @@ config ARCH_ROCKCHIP select ARCH_HAS_L2X0 config ARCH_SOCFPGA - bool "Altera SOCFPGA cyclone5" + bool "Altera SOCFPGA" select HAS_DEBUG_LL select ARM_SMP_TWD select CPU_V7 @@ -177,8 +177,8 @@ config ARCH_SOCFPGA select CLKDEV_LOOKUP select GPIOLIB select HAVE_PBL_MULTI_IMAGES - select OFDEVICE if !ARCH_SOCFPGA_XLOAD - select OFTREE if !ARCH_SOCFPGA_XLOAD + select OFDEVICE if !(ARCH_SOCFPGA_XLOAD && ARCH_SOCFPGA_CYCLONE5) + select OFTREE if !(ARCH_SOCFPGA_XLOAD && ARCH_SOCFPGA_CYCLONE5) config ARCH_S3C24xx bool "Samsung S3C2410, S3C2440" diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index 04b5416b64..0a33e88644 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -12,16 +12,35 @@ config ARCH_TEXT_BASE hex default 0x00100000 if MACH_SOCFPGA_CYCLONE5 +comment "Altera SoCFPGA System-on-Chip" + +config ARCH_SOCFPGA_CYCLONE5 + bool + select CPU_V7 + +config ARCH_SOCFPGA_ARRIA10 + bool + select CPU_V7 + select HAVE_MACH_ARM_HEAD + config MACH_SOCFPGA_ALTERA_SOCDK + select HAVE_DEFAULT_ENVIRONMENT_NEW + select ARCH_SOCFPGA_CYCLONE5 bool "Altera SoCFPGA Development Kit" config MACH_SOCFPGA_EBV_SOCRATES + select HAVE_DEFAULT_ENVIRONMENT_NEW + select ARCH_SOCFPGA_CYCLONE5 bool "EBV Socrates" config MACH_SOCFPGA_TERASIC_DE0_NANO_SOC + select HAVE_DEFAULT_ENVIRONMENT_NEW + select ARCH_SOCFPGA_CYCLONE5 bool "Terasic DE0-NANO-SoC aka Atlas" config MACH_SOCFPGA_TERASIC_SOCKIT + select HAVE_DEFAULT_ENVIRONMENT_NEW + select ARCH_SOCFPGA_CYCLONE5 bool "Terasic SoCKit" endif diff --git a/arch/arm/mach-socfpga/Makefile b/arch/arm/mach-socfpga/Makefile index 30b796dd3b..cbb47fa206 100644 --- a/arch/arm/mach-socfpga/Makefile +++ b/arch/arm/mach-socfpga/Makefile @@ -1,4 +1,8 @@ -pbl-y += cyclone5-init.o cyclone5-freeze-controller.o cyclone5-scan-manager.o cyclone5-system-manager.o -pbl-y += cyclone5-clock-manager.o -obj-y += cyclone5-generic.o nic301.o cyclone5-bootsource.o cyclone5-reset-manager.o +pbl-$(CONFIG_ARCH_SOCFPGA_CYCLONE5) += cyclone5-init.o cyclone5-freeze-controller.o cyclone5-scan-manager.o cyclone5-system-manager.o +pbl-$(CONFIG_ARCH_SOCFPGA_CYCLONE5) += cyclone5-clock-manager.o +obj-$(CONFIG_ARCH_SOCFPGA_CYCLONE5) += cyclone5-generic.o nic301.o cyclone5-bootsource.o cyclone5-reset-manager.o + +pbl-$(CONFIG_ARCH_SOCFPGA_ARRIA10) += arria10-init.o arria10-clock-manager.o arria10-sdram.o arria10-reset-manager.o arria10-bootsource.o +obj-$(CONFIG_ARCH_SOCFPGA_ARRIA10) += arria10-bootsource.o arria10-generic.o arria10-reset-manager.o + obj-$(CONFIG_ARCH_SOCFPGA_XLOAD) += xload.o diff --git a/arch/arm/mach-socfpga/arria10-bootsource.c b/arch/arm/mach-socfpga/arria10-bootsource.c new file mode 100644 index 0000000000..26af64a1a4 --- /dev/null +++ b/arch/arm/mach-socfpga/arria10-bootsource.c @@ -0,0 +1,53 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +static int arria10_boot_save_loc(void) +{ + enum bootsource src = BOOTSOURCE_UNKNOWN; + uint32_t val; + + val = readl(ARRIA10_SYSMGR_BOOTINFO); + + switch ((val & 0x7000) >> 12) { + case 0: + /* reserved */ + break; + case 1: + /* FPGA, currently not decoded */ + break; + case 2: + case 3: + src = BOOTSOURCE_NAND; + break; + case 4: + case 5: + src = BOOTSOURCE_MMC; + break; + case 6: + case 7: + src = BOOTSOURCE_SPI; + break; + } + + bootsource_set(src); + bootsource_set_instance(0); + + return 0; +} +core_initcall(arria10_boot_save_loc); diff --git a/arch/arm/mach-socfpga/arria10-clock-manager.c b/arch/arm/mach-socfpga/arria10-clock-manager.c new file mode 100644 index 0000000000..8052afe2d8 --- /dev/null +++ b/arch/arm/mach-socfpga/arria10-clock-manager.c @@ -0,0 +1,815 @@ +/* + * Copyright (C) 2014 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include +#include +#include +#include +#include + +static const struct arria10_clock_manager *arria10_clkmgr_base = + (void *)ARRIA10_CLKMGR_ADDR; + +static uint32_t eosc1_hz; +static uint32_t cb_intosc_hz; +static uint32_t f2s_free_hz; +#define LOCKED_MASK (ARRIA10_CLKMGR_CLKMGR_STAT_MAINPLLLOCKED_SET_MSK | \ + ARRIA10_CLKMGR_CLKMGR_STAT_PERPLLLOCKED_SET_MSK) + +static inline void arria10_cm_wait_for_lock(uint32_t mask) +{ + register uint32_t inter_val; + + do { + inter_val = readl(&arria10_clkmgr_base->stat) & mask; + } while (inter_val != mask); +} + +/* function to poll in the fsm busy bit */ +static inline void arria10_cm_wait4fsm(void) +{ + register uint32_t inter_val; + + do { + inter_val = readl(&arria10_clkmgr_base->stat) & + ARRIA10_CLKMGR_CLKMGR_STAT_BUSY_SET_MSK; + } while (inter_val); +} + +static uint32_t arria10_cm_get_main_vco(void) +{ + uint32_t vco1, src_hz, numer, denom, vco; + uint32_t clk_src = readl(&arria10_clkmgr_base->main_pll_vco0); + + clk_src = (clk_src >> ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_LSB) & + ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_MSK; + + switch (clk_src) { + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_EOSC: + src_hz = eosc1_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_E_INTOSC: + src_hz = cb_intosc_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_F2S: + src_hz = f2s_free_hz; + break; + default: + pr_err("arria10_cm_get_main_vco invalid clk_src %d\n", clk_src); + return 0; + } + + vco1 = readl(&arria10_clkmgr_base->main_pll_vco1); + numer = vco1 & ARRIA10_CLKMGR_MAINPLL_VCO1_NUMER_MSK; + denom = (vco1 >> ARRIA10_CLKMGR_MAINPLL_VCO1_DENOM_LSB) & + ARRIA10_CLKMGR_MAINPLL_VCO1_DENOM_MSK; + vco = src_hz * (1 + numer); + vco /= 1 + denom; + + return vco; +} + +static uint32_t arria10_cm_get_peri_vco(void) +{ + uint32_t vco1, src_hz, numer, denom, vco; + uint32_t clk_src = readl(&arria10_clkmgr_base->per_pll_vco0); + + clk_src = (clk_src >> ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_LSB) & + ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_MSK; + + switch (clk_src) { + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_EOSC: + src_hz = eosc1_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_E_INTOSC: + src_hz = cb_intosc_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_F2S: + src_hz = f2s_free_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_MAIN: + src_hz = arria10_cm_get_main_vco(); + src_hz /= (readl(&arria10_clkmgr_base->main_pll_cntr15clk) & + ARRIA10_CLKMGR_MAINPLL_CNTRCLK_MSK) + 1; + break; + default: + pr_err("arria10_cm_get_peri_vco invalid clk_src %d\n", clk_src); + return 0; + } + + vco1 = readl(&arria10_clkmgr_base->per_pll_vco1); + numer = vco1 & ARRIA10_CLKMGR_PERPLL_VCO1_NUMER_MSK; + denom = (vco1 >> ARRIA10_CLKMGR_PERPLL_VCO1_DENOM_LSB) & + ARRIA10_CLKMGR_PERPLL_VCO1_DENOM_MSK; + vco = src_hz * (1 + numer); + vco /= 1 + denom; + + return vco; +} + +unsigned int arria10_cm_get_mmc_controller_clk_hz(void) +{ + uint32_t clk_hz = 0; + uint32_t clk_input = readl(&arria10_clkmgr_base->per_pll_cntr6clk); + clk_input = (clk_input >> ARRIA10_CLKMGR_PERPLL_CNTR6CLK_SRC_LSB) & + ARRIA10_CLKMGR_PERPLLGRP_SRC_MSK; + + switch (clk_input) { + case ARRIA10_CLKMGR_PERPLLGRP_SRC_MAIN: + clk_hz = arria10_cm_get_main_vco(); + clk_hz /= 1 + (readl(&arria10_clkmgr_base->main_pll_cntr6clk) & + ARRIA10_CLKMGR_MAINPLL_CNTRCLK_MSK); + break; + + case ARRIA10_CLKMGR_PERPLLGRP_SRC_PERI: + clk_hz = arria10_cm_get_peri_vco(); + clk_hz /= 1 + (readl(&arria10_clkmgr_base->per_pll_cntr6clk) & + ARRIA10_CLKMGR_PERPLL_CNTRCLK_MSK); + break; + + case ARRIA10_CLKMGR_PERPLLGRP_SRC_OSC1: + clk_hz = eosc1_hz; + break; + + case ARRIA10_CLKMGR_PERPLLGRP_SRC_INTOSC: + clk_hz = cb_intosc_hz; + break; + + case ARRIA10_CLKMGR_PERPLLGRP_SRC_FPGA: + clk_hz = f2s_free_hz; + break; + } + + return clk_hz/4; +} + +/* calculate the intended main VCO frequency based on handoff */ +static uint32_t arria10_cm_calc_handoff_main_vco_clk_hz(struct arria10_mainpll_cfg *main_cfg) +{ + uint32_t clk_hz; + + /* Check main VCO clock source: eosc, intosc or f2s? */ + switch (main_cfg->vco0_psrc) { + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_EOSC: + clk_hz = eosc1_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_E_INTOSC: + clk_hz = cb_intosc_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_F2S: + clk_hz = f2s_free_hz; + break; + default: + return 0; + } + + /* calculate the VCO frequency */ + clk_hz *= 1 + main_cfg->vco1_numer; + clk_hz /= 1 + main_cfg->vco1_denom; + + return clk_hz; +} + +/* calculate the intended periph VCO frequency based on handoff */ +static uint32_t arria10_cm_calc_handoff_periph_vco_clk_hz(struct arria10_mainpll_cfg *main_cfg, + struct arria10_perpll_cfg *per_cfg) +{ + uint32_t clk_hz; + + /* Check periph VCO clock source: eosc, intosc, f2s or mainpll? */ + switch (per_cfg->vco0_psrc) { + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_EOSC: + clk_hz = eosc1_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_E_INTOSC: + clk_hz = cb_intosc_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_F2S: + clk_hz = f2s_free_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_MAIN: + clk_hz = arria10_cm_calc_handoff_main_vco_clk_hz(main_cfg); + clk_hz /= main_cfg->cntr15clk_cnt; + break; + default: + return 0; + } + + /* calculate the VCO frequency */ + clk_hz *= 1 + per_cfg->vco1_numer; + clk_hz /= 1 + per_cfg->vco1_denom; + + return clk_hz; +} + +/* calculate the intended MPU clock frequency based on handoff */ +static uint32_t arria10_cm_calc_handoff_mpu_clk_hz(struct arria10_mainpll_cfg *main_cfg, + struct arria10_perpll_cfg *per_cfg) +{ + uint32_t clk_hz; + + /* Check MPU clock source: main, periph, osc1, intosc or f2s? */ + switch (main_cfg->mpuclk_src) { + case ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_MAIN: + clk_hz = arria10_cm_calc_handoff_main_vco_clk_hz(main_cfg); + clk_hz /= ((main_cfg->mpuclk & ARRIA10_CLKMGR_MAINPLL_MPUCLK_CNT_MSK) + + 1); + break; + case ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_PERI: + clk_hz = arria10_cm_calc_handoff_periph_vco_clk_hz(main_cfg, per_cfg); + clk_hz /= (((main_cfg->mpuclk >> + ARRIA10_CLKMGR_MAINPLL_MPUCLK_PERICNT_LSB) & + ARRIA10_CLKMGR_MAINPLL_MPUCLK_CNT_MSK) + 1); + break; + case ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_OSC1: + clk_hz = eosc1_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_INTOSC: + clk_hz = cb_intosc_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_FPGA: + clk_hz = f2s_free_hz; + break; + default: + return 0; + } + + clk_hz /= (main_cfg->mpuclk_cnt + 1); + + return clk_hz; +} + +/* calculate the intended NOC clock frequency based on handoff */ +static uint32_t arria10_cm_calc_handoff_noc_clk_hz(struct arria10_mainpll_cfg *main_cfg, + struct arria10_perpll_cfg *per_cfg) +{ + uint32_t clk_hz; + + /* Check MPU clock source: main, periph, osc1, intosc or f2s? */ + switch (main_cfg->nocclk_src) { + case ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_MAIN: + clk_hz = arria10_cm_calc_handoff_main_vco_clk_hz(main_cfg); + clk_hz /= ((main_cfg->nocclk & ARRIA10_CLKMGR_MAINPLL_NOCCLK_CNT_MSK) + + 1); + break; + case ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_PERI: + clk_hz = arria10_cm_calc_handoff_periph_vco_clk_hz(main_cfg, per_cfg); + clk_hz /= (((main_cfg->nocclk >> + ARRIA10_CLKMGR_MAINPLL_NOCCLK_PERICNT_LSB) & + ARRIA10_CLKMGR_MAINPLL_NOCCLK_CNT_MSK) + 1); + break; + case ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_OSC1: + clk_hz = eosc1_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_INTOSC: + clk_hz = cb_intosc_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_FPGA: + clk_hz = f2s_free_hz; + break; + default: + return 0; + } + + clk_hz /= (main_cfg->nocclk_cnt + 1); + + return clk_hz; +} + +/* return 1 if PLL ramp is required */ +static int arria10_cm_is_pll_ramp_required(int main0periph1, + struct arria10_mainpll_cfg *main_cfg, + struct arria10_perpll_cfg *per_cfg) +{ + + /* Check for main PLL */ + if (main0periph1 == 0) { + /* + * PLL ramp is not required if both MPU clock and NOC clock are + * not sourced from main PLL + */ + if (main_cfg->mpuclk_src != ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_MAIN && + main_cfg->nocclk_src != ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_MAIN) + return 0; + + /* + * PLL ramp is required if MPU clock is sourced from main PLL + * and MPU clock is over 900MHz (as advised by HW team) + */ + if (main_cfg->mpuclk_src == ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_MAIN && + (arria10_cm_calc_handoff_mpu_clk_hz(main_cfg, per_cfg) > + ARRIA10_CLKMGR_PLL_RAMP_MPUCLK_THRESHOLD_HZ)) + return 1; + + /* + * PLL ramp is required if NOC clock is sourced from main PLL + * and NOC clock is over 300MHz (as advised by HW team) + */ + if (main_cfg->nocclk_src == ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_MAIN && + (arria10_cm_calc_handoff_noc_clk_hz(main_cfg, per_cfg) > + ARRIA10_CLKMGR_PLL_RAMP_NOCCLK_THRESHOLD_HZ)) + return 1; + + } else if (main0periph1 == 1) { + /* + * PLL ramp is not required if both MPU clock and NOC clock are + * not sourced from periph PLL + */ + if (main_cfg->mpuclk_src != ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_PERI && + main_cfg->nocclk_src != ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_PERI) + return 0; + + /* + * PLL ramp is required if MPU clock are source from periph PLL + * and MPU clock is over 900MHz (as advised by HW team) + */ + if (main_cfg->mpuclk_src == ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_PERI && + (arria10_cm_calc_handoff_mpu_clk_hz(main_cfg, per_cfg) > + ARRIA10_CLKMGR_PLL_RAMP_MPUCLK_THRESHOLD_HZ)) + return 1; + + /* + * PLL ramp is required if NOC clock are source from periph PLL + * and NOC clock is over 300MHz (as advised by HW team) + */ + if (main_cfg->nocclk_src == ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_PERI && + (arria10_cm_calc_handoff_noc_clk_hz(main_cfg, per_cfg) > + ARRIA10_CLKMGR_PLL_RAMP_NOCCLK_THRESHOLD_HZ)) + return 1; + } + + return 0; +} + +/* + * Calculate the new PLL numerator which is based on existing DTS hand off and + * intended safe frequency (safe_hz). Note that PLL ramp is only modifying the + * numerator while maintaining denominator as denominator will influence the + * jitter condition. Please refer A10 HPS TRM for the jitter guide. Note final + * value for numerator is minus with 1 to cater our register value + * representation. + */ +static uint32_t arria10_cm_calc_safe_pll_numer(int main0periph1, + struct arria10_mainpll_cfg *main_cfg, + struct arria10_perpll_cfg *per_cfg, + uint32_t safe_hz) +{ + uint32_t clk_hz = 0; + + /* Check for main PLL */ + if (main0periph1 == 0) { + /* Check main VCO clock source: eosc, intosc or f2s? */ + switch (main_cfg->vco0_psrc) { + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_EOSC: + clk_hz = eosc1_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_E_INTOSC: + clk_hz = cb_intosc_hz; + break; + case ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_F2S: + clk_hz = f2s_free_hz; + break; + default: + return 0; + } + + /* Applicable if MPU clock is from main PLL */ + if (main_cfg->mpuclk_src == ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_MAIN) { + /* calculate the safe numer value */ + clk_hz = (safe_hz / clk_hz) * + (main_cfg->mpuclk_cnt + 1) * + ((main_cfg->mpuclk & + ARRIA10_CLKMGR_MAINPLL_MPUCLK_CNT_MSK) + 1) * + (1 + main_cfg->vco1_denom) - 1; + } + /* Reach here if MPU clk not from main PLL but NOC clk is */ + else if (main_cfg->nocclk_src == + ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_MAIN) { + /* calculate the safe numer value */ + clk_hz = (safe_hz / clk_hz) * + (main_cfg->nocclk_cnt + 1) * + ((main_cfg->nocclk & + ARRIA10_CLKMGR_MAINPLL_NOCCLK_CNT_MSK) + 1) * + (1 + main_cfg->vco1_denom) - 1; + } else { + clk_hz = 0; + } + + } else if (main0periph1 == 1) { + /* Check periph VCO clock source: eosc, intosc, f2s, mainpll */ + switch (per_cfg->vco0_psrc) { + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_EOSC: + clk_hz = eosc1_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_E_INTOSC: + clk_hz = cb_intosc_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_F2S: + clk_hz = f2s_free_hz; + break; + case ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_MAIN: + clk_hz = arria10_cm_calc_handoff_main_vco_clk_hz( + main_cfg); + clk_hz /= main_cfg->cntr15clk_cnt; + break; + default: + return 0; + } + /* Applicable if MPU clock is from periph PLL */ + if (main_cfg->mpuclk_src == ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_PERI) { + /* calculate the safe numer value */ + clk_hz = (safe_hz / clk_hz) * + (main_cfg->mpuclk_cnt + 1) * + (((main_cfg->mpuclk >> + ARRIA10_CLKMGR_MAINPLL_MPUCLK_PERICNT_LSB) & + ARRIA10_CLKMGR_MAINPLL_MPUCLK_CNT_MSK) + 1) * + (1 + per_cfg->vco1_denom) - 1; + } + /* Reach here if MPU clk not from periph PLL but NOC clk is */ + else if (main_cfg->nocclk_src == + ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_PERI) { + /* calculate the safe numer value */ + clk_hz = (safe_hz / clk_hz) * + (main_cfg->nocclk_cnt + 1) * + (((main_cfg->nocclk >> + ARRIA10_CLKMGR_MAINPLL_NOCCLK_PERICNT_LSB) & + ARRIA10_CLKMGR_MAINPLL_NOCCLK_CNT_MSK) + 1) * + (1 + per_cfg->vco1_denom) - 1; + } else { + clk_hz = 0; + } + } + + return clk_hz; +} + +/* ramping the main PLL to final value */ +static void arria10_cm_pll_ramp_main(struct arria10_mainpll_cfg *main_cfg, + struct arria10_perpll_cfg *per_cfg, + uint32_t pll_ramp_main_hz) +{ + uint32_t clk_hz = 0; + uint32_t clk_incr_hz = 0; + uint32_t clk_final_hz = 0; + + /* find out the increment value */ + if (main_cfg->mpuclk_src == ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_MAIN) { + clk_incr_hz = ARRIA10_CLKMGR_PLL_RAMP_MPUCLK_INCREMENT_HZ; + clk_final_hz = arria10_cm_calc_handoff_mpu_clk_hz(main_cfg, per_cfg); + } else if (main_cfg->nocclk_src == ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_MAIN) { + clk_incr_hz = ARRIA10_CLKMGR_PLL_RAMP_NOCCLK_INCREMENT_HZ; + clk_final_hz = arria10_cm_calc_handoff_noc_clk_hz(main_cfg, per_cfg); + } + + /* execute the ramping here */ + for (clk_hz = pll_ramp_main_hz + clk_incr_hz; + clk_hz < clk_final_hz; clk_hz += clk_incr_hz) { + writel((main_cfg->vco1_denom << ARRIA10_CLKMGR_MAINPLL_VCO1_DENOM_LSB) | + arria10_cm_calc_safe_pll_numer(0, main_cfg, per_cfg, clk_hz), + &arria10_clkmgr_base->main_pll_vco1); + __udelay(1000); + arria10_cm_wait_for_lock(LOCKED_MASK); + } + + writel((main_cfg->vco1_denom << ARRIA10_CLKMGR_MAINPLL_VCO1_DENOM_LSB) | + main_cfg->vco1_numer, &arria10_clkmgr_base->main_pll_vco1); + + __udelay(1000); + arria10_cm_wait_for_lock(LOCKED_MASK); +} + +/* ramping the periph PLL to final value */ +static void arria10_cm_pll_ramp_periph(struct arria10_mainpll_cfg *main_cfg, + struct arria10_perpll_cfg *per_cfg, + uint32_t pll_ramp_periph_hz) +{ + uint32_t clk_hz = 0; + uint32_t clk_incr_hz = 0; + uint32_t clk_final_hz = 0; + + /* find out the increment value */ + if (main_cfg->mpuclk_src == ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_PERI) { + clk_incr_hz = ARRIA10_CLKMGR_PLL_RAMP_MPUCLK_INCREMENT_HZ; + clk_final_hz = arria10_cm_calc_handoff_mpu_clk_hz(main_cfg, per_cfg); + } else if (main_cfg->nocclk_src == ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_PERI) { + clk_incr_hz = ARRIA10_CLKMGR_PLL_RAMP_NOCCLK_INCREMENT_HZ; + clk_final_hz = arria10_cm_calc_handoff_noc_clk_hz(main_cfg, per_cfg); + } + + /* execute the ramping here */ + for (clk_hz = pll_ramp_periph_hz + clk_incr_hz; + clk_hz < clk_final_hz; clk_hz += clk_incr_hz) { + writel((per_cfg->vco1_denom << ARRIA10_CLKMGR_PERPLL_VCO1_DENOM_LSB) | + arria10_cm_calc_safe_pll_numer(1, main_cfg, per_cfg, clk_hz), + &arria10_clkmgr_base->per_pll_vco1); + __udelay(1000); + arria10_cm_wait_for_lock(LOCKED_MASK); + } + + writel((per_cfg->vco1_denom << ARRIA10_CLKMGR_PERPLL_VCO1_DENOM_LSB) | + per_cfg->vco1_numer, &arria10_clkmgr_base->per_pll_vco1); + __udelay(1000); + arria10_cm_wait_for_lock(LOCKED_MASK); +} + +/* + * Setup clocks while making no assumptions of the + * previous state of the clocks. + * + * - Start by being paranoid and gate all sw managed clocks + * - Put all plls in bypass + * - Put all plls VCO registers back to reset value (bgpwr dwn). + * - Put peripheral and main pll src to reset value to avoid glitch. + * - Delay 5 us. + * - Deassert bg pwr dn and set numerator and denominator + * - Start 7 us timer. + * - set internal dividers + * - Wait for 7 us timer. + * - Enable plls + * - Set external dividers while plls are locking + * - Wait for pll lock + * - Assert/deassert outreset all. + * - Take all pll's out of bypass + * - Clear safe mode + * - set source main and peripheral clocks + * - Ungate clocks + */ +static int arria10_cm_full_cfg(struct arria10_mainpll_cfg *main_cfg, + struct arria10_perpll_cfg *per_cfg) +{ + uint32_t pll_ramp_main_hz = 0; + uint32_t pll_ramp_periph_hz = 0; + + /* gate off all mainpll clock excpet HW managed clock */ + writel(ARRIA10_CLKMGR_MAINPLL_EN_S2FUSER0CLKEN_SET_MSK | + ARRIA10_CLKMGR_MAINPLL_EN_HMCPLLREFCLKEN_SET_MSK, + &arria10_clkmgr_base->main_pll_enr); + + /* now we can gate off the rest of the peripheral clocks */ + writel(0, &arria10_clkmgr_base->per_pll_en); + + /* Put all plls in external bypass */ + writel(ARRIA10_CLKMGR_MAINPLL_BYPASS_RESET, + &arria10_clkmgr_base->main_pll_bypasss); + writel(ARRIA10_CLKMGR_PERPLL_BYPASS_RESET, + &arria10_clkmgr_base->per_pll_bypasss); + + /* + * Put all plls VCO registers back to reset value. + * Some code might have messed with them. At same time set the + * desired clock source + */ + writel(ARRIA10_CLKMGR_MAINPLL_VCO0_RESET | + ARRIA10_CLKMGR_MAINPLL_VCO0_REGEXTSEL_SET_MSK | + (main_cfg->vco0_psrc << ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_LSB), + &arria10_clkmgr_base->main_pll_vco0); + + writel(ARRIA10_CLKMGR_PERPLL_VCO0_RESET | + ARRIA10_CLKMGR_PERPLL_VCO0_REGEXTSEL_SET_MSK | + (per_cfg->vco0_psrc << ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_LSB), + &arria10_clkmgr_base->per_pll_vco0); + + writel(ARRIA10_CLKMGR_MAINPLL_VCO1_RESET, + &arria10_clkmgr_base->main_pll_vco1); + writel(ARRIA10_CLKMGR_PERPLL_VCO1_RESET, + &arria10_clkmgr_base->per_pll_vco1); + + /* clear the interrupt register status register */ + writel(ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLLOST_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLLOST_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLRFSLIP_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLRFSLIP_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLFBSLIP_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLFBSLIP_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLACHIEVED_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLACHIEVED_SET_MSK, + &arria10_clkmgr_base->intr); + + /* Program VCO “Numerator” and “Denominator” for main PLL */ + if (arria10_cm_is_pll_ramp_required(0, main_cfg, per_cfg)) { + /* set main PLL to safe starting threshold frequency */ + if (main_cfg->mpuclk_src == ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_MAIN) + pll_ramp_main_hz = ARRIA10_CLKMGR_PLL_RAMP_MPUCLK_THRESHOLD_HZ; + else if (main_cfg->nocclk_src == ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_MAIN) + pll_ramp_main_hz = ARRIA10_CLKMGR_PLL_RAMP_NOCCLK_THRESHOLD_HZ; + + writel((main_cfg->vco1_denom << ARRIA10_CLKMGR_MAINPLL_VCO1_DENOM_LSB) | + arria10_cm_calc_safe_pll_numer(0, main_cfg, per_cfg, + pll_ramp_main_hz), + &arria10_clkmgr_base->main_pll_vco1); + } else { + writel((main_cfg->vco1_denom << ARRIA10_CLKMGR_MAINPLL_VCO1_DENOM_LSB) | + main_cfg->vco1_numer, + &arria10_clkmgr_base->main_pll_vco1); + } + + /* Program VCO “Numerator” and “Denominator” for periph PLL */ + if (arria10_cm_is_pll_ramp_required(1, main_cfg, per_cfg)) { + /* set periph PLL to safe starting threshold frequency */ + if (main_cfg->mpuclk_src == ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_PERI) + pll_ramp_periph_hz = ARRIA10_CLKMGR_PLL_RAMP_MPUCLK_THRESHOLD_HZ; + else if (main_cfg->nocclk_src == ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_PERI) + pll_ramp_periph_hz = ARRIA10_CLKMGR_PLL_RAMP_NOCCLK_THRESHOLD_HZ; + + writel((per_cfg->vco1_denom << ARRIA10_CLKMGR_PERPLL_VCO1_DENOM_LSB) | + arria10_cm_calc_safe_pll_numer(1, main_cfg, per_cfg, + pll_ramp_periph_hz), + &arria10_clkmgr_base->per_pll_vco1); + } else { + writel((per_cfg->vco1_denom << ARRIA10_CLKMGR_PERPLL_VCO1_DENOM_LSB) | + per_cfg->vco1_numer, &arria10_clkmgr_base->per_pll_vco1); + } + + /* Wait for at least 5 us */ + __udelay(5); + + /* Now deassert BGPWRDN and PWRDN */ + clrbits_le32(&arria10_clkmgr_base->main_pll_vco0, + ARRIA10_CLKMGR_MAINPLL_VCO0_BGPWRDN_SET_MSK | + ARRIA10_CLKMGR_MAINPLL_VCO0_PWRDN_SET_MSK); + clrbits_le32(&arria10_clkmgr_base->per_pll_vco0, + ARRIA10_CLKMGR_PERPLL_VCO0_BGPWRDN_SET_MSK | + ARRIA10_CLKMGR_PERPLL_VCO0_PWRDN_SET_MSK); + + /* Wait for at least 7 us */ + __udelay(7); + + /* enable the VCO and disable the external regulator to PLL */ + writel((readl(&arria10_clkmgr_base->main_pll_vco0) & + ~ARRIA10_CLKMGR_MAINPLL_VCO0_REGEXTSEL_SET_MSK) | + ARRIA10_CLKMGR_MAINPLL_VCO0_EN_SET_MSK, + &arria10_clkmgr_base->main_pll_vco0); + writel((readl(&arria10_clkmgr_base->per_pll_vco0) & + ~ARRIA10_CLKMGR_PERPLL_VCO0_REGEXTSEL_SET_MSK) | + ARRIA10_CLKMGR_PERPLL_VCO0_EN_SET_MSK, + &arria10_clkmgr_base->per_pll_vco0); + + /* setup all the main PLL counter and clock source */ + writel(main_cfg->nocclk, + ARRIA10_CLKMGR_ADDR + ARRIA10_CLKMGR_MAINPLL_NOC_CLK_OFFSET); + writel(main_cfg->mpuclk, + ARRIA10_CLKMGR_ADDR + ARRIA10_CLKMGR_ALTERAGRP_MPU_CLK_OFFSET); + + /* main_emaca_clk divider */ + writel(main_cfg->cntr2clk_cnt, &arria10_clkmgr_base->main_pll_cntr2clk); + /* main_emacb_clk divider */ + writel(main_cfg->cntr3clk_cnt, &arria10_clkmgr_base->main_pll_cntr3clk); + /* main_emac_ptp_clk divider */ + writel(main_cfg->cntr4clk_cnt, &arria10_clkmgr_base->main_pll_cntr4clk); + /* main_gpio_db_clk divider */ + writel(main_cfg->cntr5clk_cnt, &arria10_clkmgr_base->main_pll_cntr5clk); + /* main_sdmmc_clk divider */ + writel(main_cfg->cntr6clk_cnt, &arria10_clkmgr_base->main_pll_cntr6clk); + /* main_s2f_user0_clk divider */ + writel(main_cfg->cntr7clk_cnt | + (main_cfg->cntr7clk_src << ARRIA10_CLKMGR_MAINPLL_CNTR7CLK_SRC_LSB), + &arria10_clkmgr_base->main_pll_cntr7clk); + /* main_s2f_user1_clk divider */ + writel(main_cfg->cntr8clk_cnt, &arria10_clkmgr_base->main_pll_cntr8clk); + /* main_hmc_pll_clk divider */ + writel(main_cfg->cntr9clk_cnt | + (main_cfg->cntr9clk_src << ARRIA10_CLKMGR_MAINPLL_CNTR9CLK_SRC_LSB), + &arria10_clkmgr_base->main_pll_cntr9clk); + /* main_periph_ref_clk divider */ + writel(main_cfg->cntr15clk_cnt, &arria10_clkmgr_base->main_pll_cntr15clk); + + /* setup all the peripheral PLL counter and clock source */ + /* peri_emaca_clk divider */ + writel(per_cfg->cntr2clk_cnt | + (per_cfg->cntr2clk_src << ARRIA10_CLKMGR_PERPLL_CNTR2CLK_SRC_LSB), + &arria10_clkmgr_base->per_pll_cntr2clk); + /* peri_emacb_clk divider */ + writel(per_cfg->cntr3clk_cnt | + (per_cfg->cntr3clk_src << ARRIA10_CLKMGR_PERPLL_CNTR3CLK_SRC_LSB), + &arria10_clkmgr_base->per_pll_cntr3clk); + /* peri_emac_ptp_clk divider */ + writel(per_cfg->cntr4clk_cnt | + (per_cfg->cntr4clk_src << ARRIA10_CLKMGR_PERPLL_CNTR4CLK_SRC_LSB), + &arria10_clkmgr_base->per_pll_cntr4clk); + /* peri_gpio_db_clk divider */ + writel(per_cfg->cntr5clk_cnt | + (per_cfg->cntr5clk_src << ARRIA10_CLKMGR_PERPLL_CNTR5CLK_SRC_LSB), + &arria10_clkmgr_base->per_pll_cntr5clk); + /* peri_sdmmc_clk divider */ + writel(per_cfg->cntr6clk_cnt | + (per_cfg->cntr6clk_src << ARRIA10_CLKMGR_PERPLL_CNTR6CLK_SRC_LSB), + &arria10_clkmgr_base->per_pll_cntr6clk); + /* peri_s2f_user0_clk divider */ + writel(per_cfg->cntr7clk_cnt, &arria10_clkmgr_base->per_pll_cntr7clk); + /* peri_s2f_user1_clk divider */ + writel(per_cfg->cntr8clk_cnt | + (per_cfg->cntr8clk_src << ARRIA10_CLKMGR_PERPLL_CNTR8CLK_SRC_LSB), + &arria10_clkmgr_base->per_pll_cntr8clk); + /* peri_hmc_pll_clk divider */ + writel(per_cfg->cntr9clk_cnt, &arria10_clkmgr_base->per_pll_cntr9clk); + + /* setup all the external PLL counter */ + /* mpu wrapper / external divider */ + writel(main_cfg->mpuclk_cnt | + (main_cfg->mpuclk_src << ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_LSB), + &arria10_clkmgr_base->main_pll_mpuclk); + /* NOC wrapper / external divider */ + writel(main_cfg->nocclk_cnt | + (main_cfg->nocclk_src << ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_LSB), + &arria10_clkmgr_base->main_pll_nocclk); + /* NOC subclock divider such as l4 */ + writel(main_cfg->nocdiv_l4mainclk | + (main_cfg->nocdiv_l4mpclk << ARRIA10_CLKMGR_MAINPLL_NOCDIV_L4MPCLK_LSB) | + (main_cfg->nocdiv_l4spclk << ARRIA10_CLKMGR_MAINPLL_NOCDIV_L4SPCLK_LSB) | + (main_cfg->nocdiv_csatclk << ARRIA10_CLKMGR_MAINPLL_NOCDIV_CSATCLK_LSB) | + (main_cfg->nocdiv_cstraceclk << ARRIA10_CLKMGR_MAINPLL_NOCDIV_CSTRACECLK_LSB) | + (main_cfg->nocdiv_cspdbgclk << ARRIA10_CLKMGR_MAINPLL_NOCDIV_CSPDBGCLK_LSB), + &arria10_clkmgr_base->main_pll_nocdiv); + /* gpio_db external divider */ + writel(per_cfg->gpiodiv_gpiodbclk, &arria10_clkmgr_base->per_pll_gpiodiv); + + /* setup the EMAC clock mux select */ + writel((per_cfg->emacctl_emac0sel < ARRIA10_CLKMGR_PERPLL_EMACCTL_EMAC0SEL_LSB) | + (per_cfg->emacctl_emac1sel << ARRIA10_CLKMGR_PERPLL_EMACCTL_EMAC1SEL_LSB) | + (per_cfg->emacctl_emac2sel << ARRIA10_CLKMGR_PERPLL_EMACCTL_EMAC2SEL_LSB), + &arria10_clkmgr_base->per_pll_emacctl); + + /* at this stage, check for PLL lock status */ + arria10_cm_wait_for_lock(LOCKED_MASK); + + /* + * after locking, but before taking out of bypass, + * assert/deassert outresetall + */ + + /* assert mainpll outresetall */ + setbits_le32(&arria10_clkmgr_base->main_pll_vco0, + ARRIA10_CLKMGR_MAINPLL_VCO0_OUTRSTALL_SET_MSK); + /* assert perpll outresetall */ + setbits_le32(&arria10_clkmgr_base->per_pll_vco0, + ARRIA10_CLKMGR_PERPLL_VCO0_OUTRSTALL_SET_MSK); + /* de-assert mainpll outresetall */ + clrbits_le32(&arria10_clkmgr_base->main_pll_vco0, + ARRIA10_CLKMGR_MAINPLL_VCO0_OUTRSTALL_SET_MSK); + /* de-assert perpll outresetall */ + clrbits_le32(&arria10_clkmgr_base->per_pll_vco0, + ARRIA10_CLKMGR_PERPLL_VCO0_OUTRSTALL_SET_MSK); + + /* + * Take all PLLs out of bypass when boot mode is cleared. + * release mainpll from bypass + */ + writel(ARRIA10_CLKMGR_MAINPLL_BYPASS_RESET, + &arria10_clkmgr_base->main_pll_bypassr); + /* wait till Clock Manager is not busy */ + arria10_cm_wait4fsm(); + + /* release perpll from bypass */ + writel(ARRIA10_CLKMGR_PERPLL_BYPASS_RESET, + &arria10_clkmgr_base->per_pll_bypassr); + /* wait till Clock Manager is not busy */ + arria10_cm_wait4fsm(); + + /* clear boot mode */ + clrbits_le32(&arria10_clkmgr_base->ctrl, + ARRIA10_CLKMGR_CLKMGR_CTL_BOOTMOD_SET_MSK); + /* wait till Clock Manager is not busy */ + arria10_cm_wait4fsm(); + + /* At here, we need to ramp to final value if needed */ + if (pll_ramp_main_hz != 0) + arria10_cm_pll_ramp_main(main_cfg, per_cfg, pll_ramp_main_hz); + if (pll_ramp_periph_hz != 0) + arria10_cm_pll_ramp_periph(main_cfg, per_cfg, pll_ramp_periph_hz); + + /* Now ungate non-hw-managed clocks */ + writel(ARRIA10_CLKMGR_MAINPLL_EN_S2FUSER0CLKEN_SET_MSK | + ARRIA10_CLKMGR_MAINPLL_EN_HMCPLLREFCLKEN_SET_MSK, + &arria10_clkmgr_base->main_pll_ens); + writel(ARRIA10_CLKMGR_PERPLL_EN_RESET, + &arria10_clkmgr_base->per_pll_ens); + + /* + * Clear the loss lock and slip bits as they might set during + * clock reconfiguration + */ + writel(ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLLOST_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLLOST_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLRFSLIP_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLRFSLIP_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLFBSLIP_SET_MSK | + ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLFBSLIP_SET_MSK, + &arria10_clkmgr_base->intr); + + return 0; +} + +int arria10_cm_basic_init(struct arria10_mainpll_cfg *mainpll, + struct arria10_perpll_cfg *perpll) +{ + return arria10_cm_full_cfg(mainpll, perpll); +} + +void arria10_cm_use_intosc(void) +{ + setbits_le32(&arria10_clkmgr_base->ctrl, + ARRIA10_CLKMGR_CLKMGR_CTL_BOOTCLK_INTOSC_SET_MSK); +} diff --git a/arch/arm/mach-socfpga/arria10-generic.c b/arch/arm/mach-socfpga/arria10-generic.c new file mode 100644 index 0000000000..b8129eaf23 --- /dev/null +++ b/arch/arm/mach-socfpga/arria10-generic.c @@ -0,0 +1,85 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Some initialization for the EMAC */ +static void arria10_init_emac(void) +{ + uint32_t rst, val; + + /* No need for this without network support, e.g. xloader build */ + if (!IS_ENABLED(CONFIG_NET)) + return; + + rst = readl(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST); + rst |= ARRIA10_RSTMGR_PER0MODRST_EMAC0 | + ARRIA10_RSTMGR_PER0MODRST_EMAC1 | + ARRIA10_RSTMGR_PER0MODRST_EMAC2; + + writel(rst, ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST); + val = readl(ARRIA10_SYSMGR_EMAC0); + val &= ~(ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_MASK); + val |= ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; + writel(val, ARRIA10_SYSMGR_EMAC0); + + val = readl(ARRIA10_SYSMGR_EMAC1); + val &= ~(ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_MASK); + val |= ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; + writel(val, ARRIA10_SYSMGR_EMAC1); + + val = readl(ARRIA10_SYSMGR_EMAC2); + val &= ~(ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_MASK); + val |= ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; + writel(val, ARRIA10_SYSMGR_EMAC2); + + val = readl(ARRIA10_SYSMGR_FPGAINTF_EN_3); + val &= ~(ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC0 | + ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC0_SW | + ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC1 | + ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC1_SW | + ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC2 | + ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC2_SW); + + rst = readl(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST); + rst &= ~(ARRIA10_RSTMGR_PER0MODRST_EMAC0 | + ARRIA10_RSTMGR_PER0MODRST_EMAC1 | + ARRIA10_RSTMGR_PER0MODRST_EMAC2); + writel(rst, ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST); +} + +/* Write the reset manager register to cause reset */ +static void __noreturn arria10_restart_soc(struct restart_handler *rst) +{ + /* request a warm reset */ + writel(ARRIA10_RSTMGR_CTL_SWWARMRSTREQ, + ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_CTRL); + /* + * infinite loop here as watchdog will trigger and reset + * the processor + */ + hang(); +} + +static int arria10_generic_init(void) +{ + barebox_set_model("SoCFPGA Arria10"); + + pr_debug("Setting SDMMC phase shifts for Arria10\n"); + writel(ARRIA10_SYSMGR_SDMMC_DRVSEL(3) | + ARRIA10_SYSMGR_SDMMC_SMPLSEL(0), + ARRIA10_SYSMGR_SDMMC); + + pr_debug("Initialize EMACs\n"); + arria10_init_emac(); + + pr_debug("Register restart handler\n"); + restart_handler_register_fn(arria10_restart_soc); + + return 0; +} +postcore_initcall(arria10_generic_init); diff --git a/arch/arm/mach-socfpga/arria10-init.c b/arch/arm/mach-socfpga/arria10-init.c new file mode 100644 index 0000000000..07256da1db --- /dev/null +++ b/arch/arm/mach-socfpga/arria10-init.c @@ -0,0 +1,193 @@ +/* + * Copyright (C) 2014 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */ +#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26) +#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */ + +static inline void set_auxcr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR" + : : "r" (val)); + isb(); +} + +static inline unsigned int get_auxcr(void) +{ + unsigned int val; + + asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val)); + return val; +} + +static void l2c310_disable(void __iomem *base) +{ + u32 aux; + int ways = 8; + + aux = readl(base + L2X0_AUX_CTRL); + + /* + * If full-line-of-zeros is enabled, we must first disable it in the + * Cortex-A9 auxiliary control register before disabling the L2 cache. + */ + if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) + set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); + + /* flush all ways */ + writel((1 << ways) - 1, base + L2X0_INV_WAY); + + while (readl(base + L2X0_INV_WAY) & ways) + ; + + /* sync */ + writel(0, base + L2X0_CACHE_SYNC); + + /* disable */ + writel(0, base + L2X0_CTRL); + dsb(); +} + +static void arria10_initialize_security_policies(void) +{ + void __iomem *l2x0_base = (void __iomem *) 0xfffff000; + + /* BootROM leaves the L2X0 in a weird state. Always disable L2X0 for now. */ + l2c310_disable(l2x0_base); + + /* Put OCRAM in non-secure */ + writel(0x003f0000, ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_REGION0); + writel(0x1, ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_EN); + + /* Put DDR in non-secure */ + writel(0xffff0000, ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION0); + writel(0x1, ARRIA10_NOC_FW_DDR_L3_DDR_SCR_EN); + + /* Enable priviledge and non priviledge access to L4 peripherals */ + writel(0xffffffff, ARRIA10_NOC_L4_PRIV_L4_PRIV_L4_PRIV); + + /* Enable secure and non secure transaction to bridges */ + writel(0xffffffff, ARRIA10_NOC_FW_SOC2FPGA_SOC2FPGA_SCR_LWSOC2FPGA); + writel(0xffffffff, ARRIA10_NOC_FW_SOC2FPGA_SOC2FPGA_SCR_SOC2FPGA); + + /* allow non-secure and secure transaction from/to all peripherals */ + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_NAND_REG); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_NAND_DATA); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_QSPI_DATA); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_USB0_REG); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_USB1_REG); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_SPIM0); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_SPIM1); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_SPIS0); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_SPIS1); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_EMAC0); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_EMAC1); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_EMAC2); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_EMAC3); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_QSPI); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_SDMMC); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_GPIO0); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_GPIO1); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_GPIO2); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_I2C0); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_I2C1); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_I2C2); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_I2C3); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_I2C4); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_SPTIMER0); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_SPTIMER1); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_UART0); + writel(0xffffffff, ARRIA10_NOC_FW_L4_PER_SCR_UART1); + + /* Return error instead of random data */ + writel(0x1, ARRIA10_NOC_FW_DDR_L3_DDR_SCR_GLOBAL); +} + +static void arria10_mask_ecc_errors(void) +{ + writel(0x0007FFFF, ARRIA10_SYSMGR_ADDR + 0x94); +} + +/* + * First C function to initialize the critical hardware early + */ +void arria10_init(struct arria10_mainpll_cfg *mainpll, + struct arria10_perpll_cfg *perpll, + uint32_t *pinmux) +{ + int i; + + arria10_cm_use_intosc(); + + arria10_initialize_security_policies(); + + arria10_mask_ecc_errors(); + + /* + * Configure the L2 controller to make SDRAM start at 0. + * Set address filtering start to 0x0 (Bits [31:20]), + * Enable address filtering (Bit[0]) + */ + writel(0x00000001, ARRIA10_MPUL2_ADRFLTR_START); + writel(0x00000002, ARRIA10_SYSMGR_NOC_ADDR_REMAP_VALUE); + + arria10_reset_peripherals(); + + /* timer init */ + writel(0xffffffff, ARRIA10_OSC1TIMER0_ADDR); + writel(0xffffffff, ARRIA10_OSC1TIMER0_ADDR + 0x4); + writel(0x00000003, ARRIA10_OSC1TIMER0_ADDR + 0x8); + + /* configuring the clock based on handoff */ + arria10_cm_basic_init(mainpll, perpll); + + /* dedicated pins */ + for (i = arria10_pinmux_dedicated_io_4; + i <= arria10_pinmux_dedicated_io_17; i++) + writel(pinmux[i], ARRIA10_PINMUX_DEDICATED_IO_4_ADDR + + (i - arria10_pinmux_dedicated_io_4) * sizeof(uint32_t)); + + for (i = arria10_pincfg_dedicated_io_bank; + i <= arria10_pincfg_dedicated_io_17; i++) + writel(pinmux[i], ARRIA10_PINCFG_DEDICATED_IO_BANK_ADDR + + (i - arria10_pincfg_dedicated_io_bank) * sizeof(uint32_t)); + + /* deassert peripheral resets */ + arria10_reset_deassert_dedicated_peripherals(); + + /* wait for fpga_usermode */ + while ((readl(0xffd03080) & 0x6) == 0); + + /* shared pins */ + for (i = arria10_pinmux_shared_io_q1_1; + i <= arria10_pinmux_shared_io_q4_12; i++) + writel(pinmux[i], ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + + (i - arria10_pinmux_shared_io_q1_1) * sizeof(uint32_t)); + + arria10_reset_deassert_shared_peripherals(); + + /* usefpga: select source for signals: hps or fpga */ + for (i = arria10_pinmux_rgmii0_usefpga; + i < arria10_pinmux_max; i++) + writel(pinmux[i], ARRIA10_PINMUX_FPGA_INTERFACE_ADDR + + (i - arria10_pinmux_rgmii0_usefpga) * sizeof(uint32_t)); + + arria10_reset_deassert_fpga_peripherals(); + + INIT_LL(); +} diff --git a/arch/arm/mach-socfpga/arria10-reset-manager.c b/arch/arm/mach-socfpga/arria10-reset-manager.c new file mode 100644 index 0000000000..a7e4bd603e --- /dev/null +++ b/arch/arm/mach-socfpga/arria10-reset-manager.c @@ -0,0 +1,398 @@ +/* + * Copyright (C) 2014-2016 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + +void arria10_reset_peripherals(void) +{ + unsigned mask_ecc_ocp = ARRIA10_RSTMGR_PER0MODRST_EMAC0OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC1OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC2OCP | + ARRIA10_RSTMGR_PER0MODRST_USB0OCP | + ARRIA10_RSTMGR_PER0MODRST_USB1OCP | + ARRIA10_RSTMGR_PER0MODRST_NANDOCP | + ARRIA10_RSTMGR_PER0MODRST_QSPIOCP | + ARRIA10_RSTMGR_PER0MODRST_SDMMCOCP; + + /* disable all components except ECC_OCP, L4 Timer0 and L4 WD0 */ + writel(0xffffffff, ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER1MODRST); + setbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, + ~mask_ecc_ocp); + + /* Finally disable the ECC_OCP */ + setbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, + mask_ecc_ocp); +} + +void arria10_reset_deassert_dedicated_peripherals(void) +{ + uint32_t mask; + + mask = ARRIA10_RSTMGR_PER0MODRST_SDMMCOCP | + ARRIA10_RSTMGR_PER0MODRST_QSPIOCP | + ARRIA10_RSTMGR_PER0MODRST_NANDOCP | + ARRIA10_RSTMGR_PER0MODRST_DMAOCP; + + /* enable ECC OCP first */ + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, mask); + + mask = ARRIA10_RSTMGR_PER0MODRST_SDMMC | + ARRIA10_RSTMGR_PER0MODRST_QSPI | + ARRIA10_RSTMGR_PER0MODRST_NAND | + ARRIA10_RSTMGR_PER0MODRST_DMA; + + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, mask); + + mask = ARRIA10_RSTMGR_PER1MODRST_L4SYSTIMER0 | + ARRIA10_RSTMGR_PER1MODRST_UART1 | + ARRIA10_RSTMGR_PER1MODRST_UART0; + + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER1MODRST, mask); + + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, + ARRIA10_RSTMGR_OCP_MASK); + mask = ARRIA10_RSTMGR_PER0MODRST_EMAC1 | + ARRIA10_RSTMGR_PER0MODRST_EMAC2 | + ARRIA10_RSTMGR_PER0MODRST_EMAC0 | + ARRIA10_RSTMGR_PER0MODRST_SPIS0 | + ARRIA10_RSTMGR_PER0MODRST_SPIM0; + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, mask); + + mask = ARRIA10_RSTMGR_PER1MODRST_I2C3 | + ARRIA10_RSTMGR_PER1MODRST_I2C4 | + ARRIA10_RSTMGR_PER1MODRST_I2C2 | + ARRIA10_RSTMGR_PER1MODRST_UART1 | + ARRIA10_RSTMGR_PER1MODRST_GPIO2; + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER1MODRST, mask); +} + +static const uint32_t per0fpgamasks[] = { + ARRIA10_RSTMGR_PER0MODRST_EMAC0OCP | ARRIA10_RSTMGR_PER0MODRST_EMAC0, + ARRIA10_RSTMGR_PER0MODRST_EMAC1OCP | ARRIA10_RSTMGR_PER0MODRST_EMAC1, + ARRIA10_RSTMGR_PER0MODRST_EMAC2OCP | ARRIA10_RSTMGR_PER0MODRST_EMAC2, + 0, /* i2c0 per0mod */ + 0, /* i2c1 per0mod */ + 0, /* i2c0_emac */ + 0, /* i2c1_emac */ + 0, /* i2c2_emac */ + ARRIA10_RSTMGR_PER0MODRST_NANDOCP | ARRIA10_RSTMGR_PER0MODRST_NAND, + ARRIA10_RSTMGR_PER0MODRST_QSPIOCP | ARRIA10_RSTMGR_PER0MODRST_QSPI, + ARRIA10_RSTMGR_PER0MODRST_SDMMCOCP | ARRIA10_RSTMGR_PER0MODRST_SDMMC, + ARRIA10_RSTMGR_PER0MODRST_SPIM0, + ARRIA10_RSTMGR_PER0MODRST_SPIM1, + ARRIA10_RSTMGR_PER0MODRST_SPIS0, + ARRIA10_RSTMGR_PER0MODRST_SPIS1, + 0, /* uart0 per0mod */ + 0, /* uart1 per0mod */ +}; + +static const uint32_t per1fpgamasks[] = { + 0, /* emac0 per0mod */ + 0, /* emac1 per0mod */ + 0, /* emac2 per0mod */ + ARRIA10_RSTMGR_PER1MODRST_I2C0, + ARRIA10_RSTMGR_PER1MODRST_I2C1, + ARRIA10_RSTMGR_PER1MODRST_I2C2, + ARRIA10_RSTMGR_PER1MODRST_I2C3, + ARRIA10_RSTMGR_PER1MODRST_I2C4, + 0, /* nand per0mod */ + 0, /* qspi per0mod */ + 0, /* sdmmc per0mod */ + 0, /* spim0 per0mod */ + 0, /* spim1 per0mod */ + 0, /* spis0 per0mod */ + 0, /* spis1 per0mod */ + ARRIA10_RSTMGR_PER1MODRST_UART0, + ARRIA10_RSTMGR_PER1MODRST_UART1, +}; + +void arria10_reset_deassert_fpga_peripherals(void) +{ + uint32_t mask0 = 0; + uint32_t mask1 = 0; + uint32_t fpga_pinux_addr = ARRIA10_PINMUX_FPGA_INTERFACE_ADDR; + int i; + + for (i = 0; i < ARRAY_SIZE(per1fpgamasks); i++) { + if (readl(fpga_pinux_addr)) { + mask0 |= per0fpgamasks[i]; + mask1 |= per1fpgamasks[i]; + } + fpga_pinux_addr += sizeof(uint32_t); + } + + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, + mask0 & ARRIA10_RSTMGR_OCP_MASK); + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, mask0); + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER1MODRST, mask1); +} + +void arria10_reset_deassert_shared_peripherals_q1(uint32_t *mask0, + uint32_t *mask1) +{ + uint32_t pinmux_addr = ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR; + int q1; + + for (q1 = 1; q1 <= 12; q1++, pinmux_addr += sizeof(uint32_t)) { + switch (readl(pinmux_addr)) { + case ARRIA10_PINMUX_SHARED_IO_Q1_GPIO: + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_GPIO0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_NAND: + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_NANDOCP| + ARRIA10_RSTMGR_PER0MODRST_NAND; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_UART: + if ((q1 >= 1) && (q1 <= 4)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_UART0; + else if ((q1 >= 5) && (q1 <= 8)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_UART1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_QSPI: + if ((q1 >= 5) && (q1 <= 6)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_QSPIOCP | + ARRIA10_RSTMGR_PER0MODRST_QSPI; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_USB: + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_USB0OCP | + ARRIA10_RSTMGR_PER0MODRST_USB0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_SDMMC: + if ((q1 >= 1) && (q1 <= 10)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SDMMCOCP | + ARRIA10_RSTMGR_PER0MODRST_SDMMC; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_SPIM: + if ((q1 == 1) || ((q1 >= 5) && (q1 <= 8))) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIM0; + else if ((q1 == 2) || ((q1 >= 9) && (q1 <= 12))) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIM1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_SPIS: + if ((q1 >= 1) && (q1 <= 4)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIS0; + else if ((q1 >= 9) && (q1 <= 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIS1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_EMAC: + if ((q1 == 7) || (q1 == 8)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC2OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC2; + else if ((q1 == 9) || (q1 == 10)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC1OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC1; + else if ((q1 == 11) || (1 == 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC0OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q1_I2C: + if ((q1 == 3) || (q1 == 4)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C1; + else if ((q1 == 5) || (q1 == 6)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C0; + else if ((q1 == 7) || (q1 == 8)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C4; + else if ((q1 == 9) || (q1 == 10)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C3; + else if ((q1 == 11) || (q1 == 12)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C2; + break; + } + } +} + +void arria10_reset_deassert_shared_peripherals_q2(uint32_t *mask0, + uint32_t *mask1) +{ + uint32_t pinmux_addr = ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR; + int q2; + + for (q2 = 1; q2 <= 12; q2++, pinmux_addr += sizeof(uint32_t)) { + switch (readl(pinmux_addr)) { + case ARRIA10_PINMUX_SHARED_IO_Q2_GPIO: + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_GPIO0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q2_NAND: + if ((q2 != 4) && (q2 != 5)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_NANDOCP | + ARRIA10_RSTMGR_PER0MODRST_NAND; + break; + case ARRIA10_PINMUX_SHARED_IO_Q2_UART: + if ((q2 >= 9) && (q2 <= 12)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_UART0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q2_USB: + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_USB1OCP | + ARRIA10_RSTMGR_PER0MODRST_USB1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q2_EMAC: + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC0OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q2_SPIM: + if ((q2 >= 8) && (q2 <= 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIM1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q2_SPIS: + if ((q2 >= 9) && (q2 <= 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIS0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q2_I2C: + if ((q2 == 9) || (q2 == 10)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C1; + else if ((q2 == 11) || (q2 == 12)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C0; + break; + } + } +} + +void arria10_reset_deassert_shared_peripherals_q3(uint32_t *mask0, + uint32_t *mask1) +{ + uint32_t pinmux_addr = ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR; + int q3; + + for (q3 = 1; q3 <= 12; q3++, pinmux_addr += sizeof(uint32_t)) { + switch (readl(pinmux_addr)) { + case ARRIA10_PINMUX_SHARED_IO_Q3_GPIO: + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_GPIO1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q3_NAND: + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_NANDOCP | + ARRIA10_RSTMGR_PER0MODRST_NAND; + break; + case ARRIA10_PINMUX_SHARED_IO_Q3_UART: + if ((q3 >= 1) && (q3 <= 4)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_UART0; + else if ((q3 >= 5) && (q3 <= 8)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_UART1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q3_EMAC1: + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC1OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q3_SPIM: + if ((q3 >= 1) && (q3 <= 5)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIM1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q3_SPIS: + if ((q3 >= 5) && (q3 <= 8)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIS1; + else if ((q3 >= 9) && (q3 <= 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIS0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q3_EMAC0: + if ((q3 == 9) || (q3 == 10)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC2OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC2; + else if ((q3 == 11) || (q3 == 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC0OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q3_I2C: + if ((q3 == 7) || (q3 == 8)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C1; + else if ((q3 == 3) || (q3 == 4)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C0; + else if ((q3 == 9) || (q3 == 10)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C4; + else if ((q3 == 11) || (q3 == 12)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C2; + break; + } + } +} + +void arria10_reset_deassert_shared_peripherals_q4(uint32_t *mask0, uint32_t *mask1) +{ + uint32_t pinmux_addr = ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR; + int q4; + + for (q4 = 1; q4 <= 12; q4++, pinmux_addr += sizeof(uint32_t)) { + switch (readl(pinmux_addr)) { + case ARRIA10_PINMUX_SHARED_IO_Q4_GPIO: + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_GPIO1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_NAND: + if (q4 != 4) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_NANDOCP | + ARRIA10_RSTMGR_PER0MODRST_NAND; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_UART: + if ((q4 >= 3) && (q4 <= 6)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_UART1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_QSPI: + if ((q4 == 5) || (q4 == 6)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_QSPIOCP | + ARRIA10_RSTMGR_PER0MODRST_QSPI; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_EMAC1: + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC2OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC2; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_SDMMC: + if ((q4 >= 1) && (q4 <= 6)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SDMMCOCP | + ARRIA10_RSTMGR_PER0MODRST_SDMMC; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_SPIM: + if ((q4 >= 6) && (q4 <= 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIM0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_SPIS: + if ((q4 >= 9) && (q4 <= 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_SPIS1; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_EMAC0: + if ((q4 == 7) || (q4 == 8)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC1OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC1; + else if ((q4 == 11) || (q4 == 12)) + *mask0 |= ARRIA10_RSTMGR_PER0MODRST_EMAC0OCP | + ARRIA10_RSTMGR_PER0MODRST_EMAC0; + break; + case ARRIA10_PINMUX_SHARED_IO_Q4_I2C: + if ((q4 == 1) || (q4 == 2)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C1; + else if ((q4 == 7) || (q4 == 8)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C3; + else if ((q4 == 9) || (q4 == 10)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C4; + else if ((q4 == 11) || (q4 == 12)) + *mask1 |= ARRIA10_RSTMGR_PER1MODRST_I2C2; + break; + } + } +} + +void arria10_reset_deassert_shared_peripherals(void) +{ + uint32_t mask0 = 0; + uint32_t mask1 = 0; + + arria10_reset_deassert_shared_peripherals_q1(&mask0, &mask1); + arria10_reset_deassert_shared_peripherals_q2(&mask0, &mask1); + arria10_reset_deassert_shared_peripherals_q3(&mask0, &mask1); + arria10_reset_deassert_shared_peripherals_q4(&mask0, &mask1); + + mask1 |= ARRIA10_RSTMGR_PER1MODRST_WATCHDOG1 | + ARRIA10_RSTMGR_PER1MODRST_L4SYSTIMER1 | + ARRIA10_RSTMGR_PER1MODRST_SPTIMER0 | + ARRIA10_RSTMGR_PER1MODRST_SPTIMER1; + + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, + mask0 & ARRIA10_RSTMGR_OCP_MASK); + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER1MODRST, mask1); + clrbits_le32(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_PER0MODRST, mask0); +} diff --git a/arch/arm/mach-socfpga/arria10-sdram.c b/arch/arm/mach-socfpga/arria10-sdram.c new file mode 100644 index 0000000000..08de0e32cd --- /dev/null +++ b/arch/arm/mach-socfpga/arria10-sdram.c @@ -0,0 +1,535 @@ +/* + * Copyright (C) 2014-2016 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + + +/* FAWBANK - Number of Bank of a given device involved in the FAW period. */ +#define ARRIA10_SDR_ACTIVATE_FAWBANK (0x1) + +#define ARRIA10_EMIF_RST BIT(31) +#define ARRIA10_OCT_CAL_REQ BIT(30) +#define ARRIA10_OCT_CAL_ACK 31 + +#define ARRIA10_NIOS_OCT_DONE BIT(7) +#define ARRIA10_NIOS_OCT_ACK 7 + +/* Engineering sample silicon */ +#define ARRIA10_ES_SILICON_VER 0x00010001 + +#define DDR_REG_SEQ2CORE 0xFFD0507C +#define DDR_REG_CORE2SEQ 0xFFD05078 +#define DDR_REG_GPOUT 0xFFD03010 +#define DDR_REG_GPIN 0xFFD03014 +#define DDR_MAX_TRIES 0x00100000 +#define IO48_MMR_DRAMSTS 0xFFCFA0EC +#define IO48_MMR_NIOS2_RESERVE0 0xFFCFA110 +#define IO48_MMR_NIOS2_RESERVE1 0xFFCFA114 +#define IO48_MMR_NIOS2_RESERVE2 0xFFCFA118 + +#define SEQ2CORE_MASK 0xF +#define CORE2SEQ_INT_REQ 0xF +#define SEQ2CORE_INT_RESP_BIT 3 + +#define DDR_ECC_DMA_SIZE 1500 +#define DDR_READ_LATENCY_DELAY 40 + +#define ARRIA_DDR_CONFIG(A, B, C, R) ((A<<24)|(B<<16)|(C<<8)|R) +/* The followring are the supported configurations */ +uint32_t ddr_config[] = { + /* Chip - Row - Bank - Column Style */ + /* All Types */ + ARRIA_DDR_CONFIG(0, 3, 10, 12), + ARRIA_DDR_CONFIG(0, 3, 10, 13), + ARRIA_DDR_CONFIG(0, 3, 10, 14), + ARRIA_DDR_CONFIG(0, 3, 10, 15), + ARRIA_DDR_CONFIG(0, 3, 10, 16), + ARRIA_DDR_CONFIG(0, 3, 10, 17), + /* LPDDR x16 */ + ARRIA_DDR_CONFIG(0, 3, 11, 14), + ARRIA_DDR_CONFIG(0, 3, 11, 15), + ARRIA_DDR_CONFIG(0, 3, 11, 16), + ARRIA_DDR_CONFIG(0, 3, 12, 15), + /* DDR4 Only */ + ARRIA_DDR_CONFIG(0, 4, 10, 14), + ARRIA_DDR_CONFIG(0, 4, 10, 15), + ARRIA_DDR_CONFIG(0, 4, 10, 16), + ARRIA_DDR_CONFIG(0, 4, 10, 17), /* 14 */ + /* Chip - Bank - Row - Column Style */ + ARRIA_DDR_CONFIG(1, 3, 10, 12), + ARRIA_DDR_CONFIG(1, 3, 10, 13), + ARRIA_DDR_CONFIG(1, 3, 10, 14), + ARRIA_DDR_CONFIG(1, 3, 10, 15), + ARRIA_DDR_CONFIG(1, 3, 10, 16), + ARRIA_DDR_CONFIG(1, 3, 10, 17), + ARRIA_DDR_CONFIG(1, 3, 11, 14), + ARRIA_DDR_CONFIG(1, 3, 11, 15), + ARRIA_DDR_CONFIG(1, 3, 11, 16), + ARRIA_DDR_CONFIG(1, 3, 12, 15), + /* DDR4 Only */ + ARRIA_DDR_CONFIG(1, 4, 10, 14), + ARRIA_DDR_CONFIG(1, 4, 10, 15), + ARRIA_DDR_CONFIG(1, 4, 10, 16), + ARRIA_DDR_CONFIG(1, 4, 10, 17), +}; +#define DDR_CONFIG_ELEMENTS ARRAY_SIZE(ddr_config) + +static int match_ddr_conf(uint32_t ddr_conf) +{ + int i; + + for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) { + if (ddr_conf == ddr_config[i]) + return i; + } + return 0; +} + +/* Check whether SDRAM is successfully Calibrated */ +static int is_sdram_cal_success(void) +{ + return readl(ARRIA10_ECC_HMC_OCP_DDRCALSTAT); +} + +static unsigned char ddr_get_bit(uint32_t ereg, unsigned char bit) +{ + unsigned int reg = readl(ereg); + + return (reg & (1 << bit)) ? 1 : 0; +} + +static unsigned char ddr_wait_bit(uint32_t ereg, uint32_t bit, + uint32_t expected, uint32_t timeout_usec) +{ + unsigned int tmr; + + for (tmr = 0; tmr < timeout_usec; tmr += 100) { + __udelay(100); + if (ddr_get_bit(ereg, bit) == expected) + return 0; + } + + return 1; +} + +static void ddr_delay(uint32_t delay) +{ + int tmr; + + for (tmr = 0; tmr < delay; tmr++) + __udelay(1000); +} + +/* + * Diagram of OCT Workaround: + * + * EMIF Core HPS Processor OCT FSM + * ================================================================= + * + * seq2core ==============> + * [0x?????????] OCT Request [0xFFD0507C] + * + * core2seq + * [0x?????????] <============== + * OCT Ready [0xFFD05078] + * + * [0xFFD03010] ============> Request + * OCT Request + * + * [0xFFD03014] <============ Ready + * OCT Ready + * Signal definitions: + * + * seq2core[7] - OCT calibration request (act-high) + * core2seq[7] - Signals OCT FSM is ready (active high) + * gpout[31] - EMIF Reset override (active low) + * gpout[30] - OCT calibration request (act-high) + * gpin[31] - OCT calibration ready (act-high) + */ + +static int ddr_calibration_es_workaround(void) +{ + ddr_delay(500); + /* Step 1 - Initiating Reset Sequence */ + clrbits_le32(DDR_REG_GPOUT, ARRIA10_EMIF_RST); + ddr_delay(10); + + /* Step 2 - Clearing registers to EMIF core */ + writel(0, DDR_REG_CORE2SEQ); /*Clear the HPS->NIOS COM reg.*/ + + /* Step 3 - Clearing registers to OCT core */ + clrbits_le32(DDR_REG_GPOUT, ARRIA10_OCT_CAL_REQ); + ddr_delay(5); + + /* Step 4 - Taking EMIF out of reset */ + setbits_le32(DDR_REG_GPOUT, ARRIA10_EMIF_RST); + ddr_delay(10); + + /* Step 5 - Waiting for OCT circuitry to come out of reset */ + if (ddr_wait_bit(DDR_REG_GPIN, ARRIA10_OCT_CAL_ACK, 1, 1000000)) + return -1; + + /* Step 6 - Allowing EMIF to proceed with OCT calibration */ + setbits_le32(DDR_REG_CORE2SEQ, ARRIA10_NIOS_OCT_DONE); + + /* Step 7 - Waiting for EMIF request */ + if (ddr_wait_bit(DDR_REG_SEQ2CORE, ARRIA10_NIOS_OCT_ACK, 1, 2000000)) + return -2; + + /* Step 8 - Acknowledging EMIF OCT request */ + clrbits_le32(DDR_REG_CORE2SEQ, ARRIA10_NIOS_OCT_DONE); + + /* Step 9 - Waiting for EMIF response */ + if (ddr_wait_bit(DDR_REG_SEQ2CORE, ARRIA10_NIOS_OCT_ACK, 0, 2000000)) + return -3; + + /* Step 10 - Triggering OCT Calibration */ + setbits_le32(DDR_REG_GPOUT, ARRIA10_OCT_CAL_REQ); + + /* Step 11 - Waiting for OCT response */ + if (ddr_wait_bit(DDR_REG_GPIN, ARRIA10_OCT_CAL_ACK, 0, 1000)) + return -4; + + /* Step 12 - Clearing OCT Request bit */ + clrbits_le32(DDR_REG_GPOUT, ARRIA10_OCT_CAL_REQ); + + /* Step 13 - Waiting for OCT Engine */ + if (ddr_wait_bit(DDR_REG_GPIN, ARRIA10_OCT_CAL_ACK, 1, 200000)) + return -5; + + /* Step 14 - Proceeding with EMIF calibration */ + setbits_le32(DDR_REG_CORE2SEQ, ARRIA10_NIOS_OCT_DONE); + + ddr_delay(100); + + return 0; +} + +static int emif_clear(void) +{ + uint32_t s2c; + uint32_t i = DDR_MAX_TRIES; + + writel(0, DDR_REG_CORE2SEQ); + do { + ddr_delay(50); + s2c = readl(DDR_REG_SEQ2CORE); + } while ((s2c & SEQ2CORE_MASK) && (--i > 0)); + + return !i; +} +static int emif_reset(void) +{ + uint32_t c2s, s2c; + + c2s = readl(DDR_REG_CORE2SEQ); + s2c = readl(DDR_REG_SEQ2CORE); + + pr_debug("c2s=%08x s2c=%08x nr0=%08x nr1=%08x nr2=%08x dst=%08x\n", + c2s, s2c, readl(IO48_MMR_NIOS2_RESERVE0), + readl(IO48_MMR_NIOS2_RESERVE1), + readl(IO48_MMR_NIOS2_RESERVE2), + readl(IO48_MMR_DRAMSTS)); + + if ((s2c & SEQ2CORE_MASK) && emif_clear()) { + printf("failed emif_clear()\n"); + return -1; + } + + writel(CORE2SEQ_INT_REQ, DDR_REG_CORE2SEQ); + + if (ddr_wait_bit(DDR_REG_SEQ2CORE, SEQ2CORE_INT_RESP_BIT, 0, 1000000)) { + printf("emif_reset failed to see interrupt acknowledge\n"); + return -2; + } else { + printf("emif_reset interrupt acknowledged\n"); + } + + if (emif_clear()) { + printf("emif_clear() failed\n"); + return -3; + } + pr_debug("emif_reset interrupt cleared\n"); + + pr_debug("nr0=%08x nr1=%08x nr2=%08x\n", + readl(IO48_MMR_NIOS2_RESERVE0), + readl(IO48_MMR_NIOS2_RESERVE1), + readl(IO48_MMR_NIOS2_RESERVE2)); + + return 0; +} + +static int arria10_ddr_setup(void) +{ + int i, j, retcode, ddr_setup_complete = 0; + int chip_version = readl(ARRIA10_SYSMGR_SILICONID1); + + /* Try 3 times to do a calibration */ + for (i = 0; (i < 3) && !ddr_setup_complete; i++) { + /* Only engineering sample needs calibration workaround */ + if (ARRIA10_ES_SILICON_VER == chip_version) { + retcode = ddr_calibration_es_workaround(); + if (retcode) { + printf("DDRCAL: Failure: %d\n", retcode); + continue; + } + } + + /* A delay to wait for calibration bit to set */ + for (j = 0; (j < 10) && !ddr_setup_complete; j++) { + ddr_delay(500); + ddr_setup_complete = is_sdram_cal_success(); + } + + if (!ddr_setup_complete && + (ARRIA10_ES_SILICON_VER != chip_version)) { + emif_reset(); + } + } + + if (!ddr_setup_complete) { + puts_ll("Error: Could Not Calibrate SDRAM\n"); + return -1; + } + + return 0; +} + +/* Function to startup the SDRAM*/ +static int arria10_sdram_startup(void) +{ + uint32_t val; + + /* Release NOC ddr scheduler from reset */ + val = readl(ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_BRGMODRST); + val &= ~ARRIA10_RSTMGR_BRGMODRST_DDRSCH; + writel(val, ARRIA10_RSTMGR_ADDR + ARRIA10_RSTMGR_BRGMODRST); + + /* Bringup the DDR (calibration and configuration) */ + return arria10_ddr_setup(); +} + +/* Function to initialize SDRAM MMR and NOC DDR scheduler*/ +static void arria10_sdram_mmr_init(void) +{ + uint32_t update_value, io48_value; + union ctrlcfg0_reg ctrlcfg0 = + (union ctrlcfg0_reg)readl(ARRIA10_IO48_HMC_MMR_CTRLCFG0); + union ctrlcfg1_reg ctrlcfg1 = + (union ctrlcfg1_reg)readl(ARRIA10_IO48_HMC_MMR_CTRLCFG1); + union dramaddrw_reg dramaddrw = + (union dramaddrw_reg)readl(ARRIA10_IO48_HMC_MMR_DRAMADDRW); + union caltiming0_reg caltim0 = + (union caltiming0_reg)readl(ARRIA10_IO48_HMC_MMR_CALTIMING0); + union caltiming1_reg caltim1 = + (union caltiming1_reg)readl(ARRIA10_IO48_HMC_MMR_CALTIMING1); + union caltiming2_reg caltim2 = + (union caltiming2_reg)readl(ARRIA10_IO48_HMC_MMR_CALTIMING2); + union caltiming3_reg caltim3 = + (union caltiming3_reg)readl(ARRIA10_IO48_HMC_MMR_CALTIMING3); + union caltiming4_reg caltim4 = + (union caltiming4_reg)readl(ARRIA10_IO48_HMC_MMR_CALTIMING4); + union caltiming9_reg caltim9 = + (union caltiming9_reg)readl(ARRIA10_IO48_HMC_MMR_CALTIMING9); + uint32_t ddrioctl; + + /* + * Configure the DDR IO size [0xFFCFB008] + * niosreserve0: Used to indicate DDR width & + * bit[7:0] = Number of data bits (0x20 for 32bit) + * bit[8] = 1 if user-mode OCT is present + * bit[9] = 1 if warm reset compiled into EMIF Cal Code + * bit[10] = 1 if warm reset is on during generation in EMIF Cal + * niosreserve1: IP ADCDS version encoded as 16 bit value + * bit[2:0] = Variant (0=not special,1=FAE beta, 2=Customer beta, + * 3=EAP, 4-6 are reserved) + * bit[5:3] = Service Pack # (e.g. 1) + * bit[9:6] = Minor Release # + * bit[14:10] = Major Release # + */ + if ((readl(ARRIA10_IO48_HMC_MMR_NIOSRESERVE1) >> 6) & 0x1FF) { + update_value = readl(ARRIA10_IO48_HMC_MMR_NIOSRESERVE0); + writel(((update_value & 0xFF) >> 5), + ARRIA10_ECC_HMC_OCP_DDRIOCTRL); + } + + ddrioctl = readl(ARRIA10_ECC_HMC_OCP_DDRIOCTRL); + + /* Set the DDR Configuration [0xFFD12400] */ + io48_value = ARRIA_DDR_CONFIG(ctrlcfg1.cfg_addr_order, + (dramaddrw.cfg_bank_addr_width + + dramaddrw.cfg_bank_group_addr_width), + dramaddrw.cfg_col_addr_width, + dramaddrw.cfg_row_addr_width); + + update_value = match_ddr_conf(io48_value); + if (update_value) + writel(update_value, ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_DDRCONF); + + /* + * Configure DDR timing [0xFFD1240C] + * RDTOMISS = tRTP + tRP + tRCD - BL/2 + * WRTOMISS = WL + tWR + tRP + tRCD and + * WL = RL + BL/2 + 2 - rd-to-wr ; tWR = 15ns so... + * First part of equation is in memory clock units so divide by 2 + * for HMC clock units. 1066MHz is close to 1ns so use 15 directly. + * WRTOMISS = ((RL + BL/2 + 2 + tWR) >> 1)- rd-to-wr + tRP + tRCD + */ + update_value = (caltim2.cfg_rd_to_pch + caltim4.cfg_pch_to_valid + + caltim0.cfg_act_to_rdwr - + (ctrlcfg0.cfg_ctrl_burst_len >> 2)); + io48_value = ((((readl(ARRIA10_IO48_HMC_MMR_DRAMTIMING0) & + ARRIA10_IO48_DRAMTIME_MEM_READ_LATENCY) + 2 + 15 + + (ctrlcfg0.cfg_ctrl_burst_len >> 1)) >> 1) - + /* Up to here was in memory cycles so divide by 2 */ + caltim1.cfg_rd_to_wr + caltim0.cfg_act_to_rdwr + + caltim4.cfg_pch_to_valid); + + writel(((caltim0.cfg_act_to_act << + ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_ACTTOACT_LSB) | + (update_value << + ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_RDTOMISS_LSB) | + (io48_value << + ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_WRTOMISS_LSB) | + ((ctrlcfg0.cfg_ctrl_burst_len >> 2) << + ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_BURSTLEN_LSB) | + (caltim1.cfg_rd_to_wr << + ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_RDTOWR_LSB) | + (caltim3.cfg_wr_to_rd << + ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_WRTORD_LSB) | + (((ddrioctl == 1) ? 1 : 0) << + ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_BWRATIO_LSB)), + ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_DDRTIMING); + + /* Configure DDR mode [0xFFD12410] [precharge = 0] */ + writel(((ddrioctl ? 0 : 1) << + ARRIA10_NOC_MPU_DDR_T_SCHED_DDRMOD_BWRATIOEXTENDED_LSB), + ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_DDRMODE); + + /* Configure the read latency [0xFFD12414] */ + writel(((readl(ARRIA10_IO48_HMC_MMR_DRAMTIMING0) & + ARRIA10_IO48_DRAMTIME_MEM_READ_LATENCY) >> 1) + + DDR_READ_LATENCY_DELAY, + ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_READLATENCY); + + /* + * Configuring timing values concerning activate commands + * [0xFFD12438] [FAWBANK alway 1 because always 4 bank DDR] + */ + writel(((caltim0.cfg_act_to_act_db << + ARRIA10_NOC_MPU_DDR_T_SCHED_ACTIVATE_RRD_LSB) | + (caltim9.cfg_4_act_to_act << + ARRIA10_NOC_MPU_DDR_T_SCHED_ACTIVATE_FAW_LSB) | + (ARRIA10_SDR_ACTIVATE_FAWBANK << + ARRIA10_NOC_MPU_DDR_T_SCHED_ACTIVATE_FAWBANK_LSB)), + ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_ACTIVATE); + + /* + * Configuring timing values concerning device to device data bus + * ownership change [0xFFD1243C] + */ + writel(((caltim1.cfg_rd_to_rd_dc << + ARRIA10_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSRDTORD_LSB) | + (caltim1.cfg_rd_to_wr_dc << + ARRIA10_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSRDTOWR_LSB) | + (caltim3.cfg_wr_to_rd_dc << + ARRIA10_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSWRTORD_LSB)), + ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_DEVTODEV); + + /* Enable or disable the SDRAM ECC */ + if (ctrlcfg1.cfg_ctrl_enable_ecc) { + setbits_le32(ARRIA10_ECC_HMC_OCP_MPR_ECCCTRL1, + (ARRIA10_ECC_HMC_OCP_ECCCTL_AWB_CNT_RST | + ARRIA10_ECC_HMC_OCP_ECCCTL_CNT_RST | + ARRIA10_ECC_HMC_OCP_ECCCTL_ECC_EN)); + clrbits_le32(ARRIA10_ECC_HMC_OCP_MPR_ECCCTRL1, + (ARRIA10_ECC_HMC_OCP_ECCCTL_AWB_CNT_RST | + ARRIA10_ECC_HMC_OCP_ECCCTL_CNT_RST)); + setbits_le32(ARRIA10_ECC_HMC_OCP_MPR_ECCCTRL2, + (ARRIA10_ECC_HMC_OCP_ECCCTL2_RMW_EN | + ARRIA10_ECC_HMC_OCP_ECCCTL2_AWB_EN)); + } else { + clrbits_le32(ARRIA10_ECC_HMC_OCP_MPR_ECCCTRL1, + (ARRIA10_ECC_HMC_OCP_ECCCTL_AWB_CNT_RST | + ARRIA10_ECC_HMC_OCP_ECCCTL_CNT_RST | + ARRIA10_ECC_HMC_OCP_ECCCTL_ECC_EN)); + clrbits_le32(ARRIA10_ECC_HMC_OCP_MPR_ECCCTRL2, + (ARRIA10_ECC_HMC_OCP_ECCCTL2_RMW_EN | + ARRIA10_ECC_HMC_OCP_ECCCTL2_AWB_EN)); + } +} + +static int arria10_sdram_firewall_setup(void) +{ + uint32_t mpu_en = 0; + + /* set to default state */ + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_EN); + writel(0x00000000, ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x00); + + writel(0xffff0000, ARRIA10_SDR_FW_MPU_FPGA_MPUREGION0ADDR); + + mpu_en |= ARRIA10_NOC_FW_DDR_MPU_MPUREG0EN; + + writel(mpu_en, ARRIA10_SDR_FW_MPU_FPGA_EN); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_MPUREGION1ADDR); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_MPUREGION2ADDR); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_MPUREGION3ADDR); + writel(0xffff0000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM0REGION0ADDR); + + mpu_en |= ARRIA10_NOC_FW_DDR_MPU_MPUREG1EN; + writel(mpu_en, ARRIA10_SDR_FW_MPU_FPGA_EN); + + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM0REGION1ADDR); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM0REGION2ADDR); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM0REGION3ADDR); + writel(0xffff0000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM1REGION0ADDR); + + mpu_en |= ARRIA10_NOC_FW_DDR_MPU_MPUREG2EN; + writel(mpu_en, ARRIA10_SDR_FW_MPU_FPGA_EN); + + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM1REGION1ADDR); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM1REGION2ADDR); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM1REGION3ADDR); + writel(0xffff0000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM2REGION0ADDR); + + mpu_en |= ARRIA10_NOC_FW_DDR_MPU_MPUREG3EN; + writel(mpu_en, ARRIA10_SDR_FW_MPU_FPGA_EN); + + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM2REGION1ADDR); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM2REGION2ADDR); + writel(0x00000000, ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM2REGION3ADDR); + + writel(0xffff0000, ARRIA10_NOC_FW_DDR_L3_HPSREGION0ADDR); + writel(ARRIA10_NOC_FW_DDR_L3_HPSREG0EN, ARRIA10_NOC_FW_DDR_L3_EN); + + return 0; +} + +int arria10_ddr_calibration_sequence(void) +{ + /* Check to see if SDRAM cal was success */ + if (arria10_sdram_startup()) { + puts_ll("DDRCAL: Failed\n"); + return -1; + } + + puts_ll("DDRCAL: Success\n"); + + /* initialize the MMR register */ + arria10_sdram_mmr_init(); + + if (arria10_sdram_firewall_setup()) + puts_ll("FW: Error Configuring Firewall\n"); + + return 0; +} diff --git a/arch/arm/mach-socfpga/cyclone5-bootsource.c b/arch/arm/mach-socfpga/cyclone5-bootsource.c index da4102c4f5..717a003425 100644 --- a/arch/arm/mach-socfpga/cyclone5-bootsource.c +++ b/arch/arm/mach-socfpga/cyclone5-bootsource.c @@ -18,6 +18,7 @@ #include #include #include +#include #define CYCLONE5_SYSMGR_BOOTINFO 0x14 @@ -54,4 +55,46 @@ static int cyclone5_boot_save_loc(void) return 0; } -core_initcall(cyclone5_boot_save_loc); + +static int arria10_boot_save_loc(void) +{ + enum bootsource src = BOOTSOURCE_UNKNOWN; + uint32_t val; + + val = readl(ARRIA10_SYSMGR_BOOTINFO); + + switch ((val & 0x7000) >> 12) { + case 0: + /* reserved */ + break; + case 1: + /* FPGA, currently not decoded */ + break; + case 2: + case 3: + src = BOOTSOURCE_NAND; + break; + case 4: + case 5: + src = BOOTSOURCE_MMC; + break; + case 6: + case 7: + src = BOOTSOURCE_SPI; + break; + } + + bootsource_set(src); + bootsource_set_instance(0); + + return 0; +} + +static int socfpga_boot_save_loc(void) +{ + if (IS_ENABLED(CONFIG_ARCH_SOCFPGA_ARRIA10)) + return arria10_boot_save_loc(); + else + return cyclone5_boot_save_loc(); +} +core_initcall(socfpga_boot_save_loc); diff --git a/arch/arm/mach-socfpga/generic.c b/arch/arm/mach-socfpga/generic.c deleted file mode 100644 index c920bd658f..0000000000 --- a/arch/arm/mach-socfpga/generic.c +++ /dev/null @@ -1,104 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define SYSMGR_SDMMCGRP_CTRL_REG (CYCLONE5_SYSMGR_ADDRESS + 0x108) -#define SYSMGR_SDMMC_CTRL_SMPLSEL(smplsel) (((smplsel) & 0x7) << 3) -#define SYSMGR_SDMMC_CTRL_DRVSEL(drvsel) ((drvsel) & 0x7) - -static int socfpga_detect_sdram(void) -{ - void __iomem *base = (void *)CYCLONE5_SDR_ADDRESS; - uint32_t dramaddrw, ctrlwidth, memsize; - int colbits, rowbits, bankbits; - int width_bytes; - - dramaddrw = readl(base + 0x5000 + 0x2c); - - colbits = dramaddrw & 0x1f; - rowbits = (dramaddrw >> 5) & 0x1f; - bankbits = (dramaddrw >> 10) & 0x7; - - ctrlwidth = readl(base + 0x5000 + 0x60); - - switch (ctrlwidth & 0x3) { - default: - case 0: - width_bytes = 1; - break; - case 1: - width_bytes = 2; - break; - case 2: - width_bytes = 4; - break; - } - - memsize = (1 << colbits) * (1 << rowbits) * (1 << bankbits) * width_bytes; - - pr_debug("%s: colbits: %d rowbits: %d bankbits: %d width: %d => memsize: 0x%08x\n", - __func__, colbits, rowbits, bankbits, width_bytes, memsize); - - arm_add_mem_device("ram0", 0x0, memsize); - - return 0; -} - -/* Some initialization for the EMAC */ -static void socfpga_init_emac(void) -{ - uint32_t rst, val; - - /* No need for this without network support, e.g. xloader build */ - if (!IS_ENABLED(CONFIG_NET)) - return; - - /* According to Cyclone V datasheet, 17-60 "EMAC HPS Interface - * Initialization", changing PHYSEL should be done with EMAC in reset - * via permodrst. */ - - /* Everything, except L4WD0/1, is out of reset via socfpga_lowlevel_init() */ - rst = readl(CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); - rst |= RSTMGR_PERMODRST_EMAC0 | RSTMGR_PERMODRST_EMAC1; - writel(rst, CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); - - /* Set emac0/1 PHY interface select to RGMII. We could read phy-mode - * from the device tree, if it was desired to support interfaces other - * than RGMII. */ - val = readl(CONFIG_SYSMGR_EMAC_CTRL); - val &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << SYSMGR_EMACGRP_CTRL_PHYSEL0_LSB); - val &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << SYSMGR_EMACGRP_CTRL_PHYSEL1_LSB); - val |= SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII << SYSMGR_EMACGRP_CTRL_PHYSEL0_LSB; - val |= SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII << SYSMGR_EMACGRP_CTRL_PHYSEL1_LSB; - writel(val, CONFIG_SYSMGR_EMAC_CTRL); - - /* Take emac0 and emac1 out of reset */ - rst &= ~(RSTMGR_PERMODRST_EMAC0 | RSTMGR_PERMODRST_EMAC1); - writel(rst, CYCLONE5_RSTMGR_ADDRESS + RESET_MGR_PER_MOD_RESET_OFS); -} - -static int socfpga_init(void) -{ - socfpga_init_emac(); - - writel(SYSMGR_SDMMC_CTRL_DRVSEL(3) | SYSMGR_SDMMC_CTRL_SMPLSEL(0), - SYSMGR_SDMMCGRP_CTRL_REG); - - nic301_slave_ns(); - - socfpga_detect_sdram(); - - return 0; -} -core_initcall(socfpga_init); diff --git a/arch/arm/mach-socfpga/include/mach/arria10-clock-manager.h b/arch/arm/mach-socfpga/include/mach/arria10-clock-manager.h new file mode 100644 index 0000000000..ee2b9b3c5e --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/arria10-clock-manager.h @@ -0,0 +1,249 @@ +/* + * Copyright (C) 2014 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef _ARRIA10_CLOCK_MANAGER_H_ +#define _ARRIA10_CLOCK_MANAGER_H_ + +struct arria10_clock_manager { + /* clkmgr */ + volatile uint32_t ctrl; + volatile uint32_t intr; + volatile uint32_t intrs; + volatile uint32_t intrr; + volatile uint32_t intren; + volatile uint32_t intrens; + volatile uint32_t intrenr; + volatile uint32_t stat; + volatile uint32_t testioctrl; + volatile uint32_t _pad_0x24_0x40[7]; + + /* mainpllgrp*/ + volatile uint32_t main_pll_vco0; + volatile uint32_t main_pll_vco1; + volatile uint32_t main_pll_en; + volatile uint32_t main_pll_ens; + volatile uint32_t main_pll_enr; + volatile uint32_t main_pll_bypass; + volatile uint32_t main_pll_bypasss; + volatile uint32_t main_pll_bypassr; + volatile uint32_t main_pll_mpuclk; + volatile uint32_t main_pll_nocclk; + volatile uint32_t main_pll_cntr2clk; + volatile uint32_t main_pll_cntr3clk; + volatile uint32_t main_pll_cntr4clk; + volatile uint32_t main_pll_cntr5clk; + volatile uint32_t main_pll_cntr6clk; + volatile uint32_t main_pll_cntr7clk; + volatile uint32_t main_pll_cntr8clk; + volatile uint32_t main_pll_cntr9clk; + volatile uint32_t main_pll__pad_0x48_0x5b[5]; + volatile uint32_t main_pll_cntr15clk; + volatile uint32_t main_pll_outrst; + volatile uint32_t main_pll_outrststat; + volatile uint32_t main_pll_nocdiv; + volatile uint32_t main_pll__pad_0x6c_0x80[5]; + + /* perpllgrp*/ + volatile uint32_t per_pll_vco0; + volatile uint32_t per_pll_vco1; + volatile uint32_t per_pll_en; + volatile uint32_t per_pll_ens; + volatile uint32_t per_pll_enr; + volatile uint32_t per_pll_bypass; + volatile uint32_t per_pll_bypasss; + volatile uint32_t per_pll_bypassr; + volatile uint32_t per_pll__pad_0x20_0x27[2]; + volatile uint32_t per_pll_cntr2clk; + volatile uint32_t per_pll_cntr3clk; + volatile uint32_t per_pll_cntr4clk; + volatile uint32_t per_pll_cntr5clk; + volatile uint32_t per_pll_cntr6clk; + volatile uint32_t per_pll_cntr7clk; + volatile uint32_t per_pll_cntr8clk; + volatile uint32_t per_pll_cntr9clk; + volatile uint32_t per_pll__pad_0x48_0x5f[6]; + volatile uint32_t per_pll_outrst; + volatile uint32_t per_pll_outrststat; + volatile uint32_t per_pll_emacctl; + volatile uint32_t per_pll_gpiodiv; + volatile uint32_t per_pll__pad_0x70_0x80[4]; +}; + +struct arria10_mainpll_cfg { + uint32_t vco0_psrc; + uint32_t vco1_denom; + uint32_t vco1_numer; + uint32_t mpuclk; + uint32_t mpuclk_cnt; + uint32_t mpuclk_src; + uint32_t nocclk; + uint32_t nocclk_cnt; + uint32_t nocclk_src; + uint32_t cntr2clk_cnt; + uint32_t cntr3clk_cnt; + uint32_t cntr4clk_cnt; + uint32_t cntr5clk_cnt; + uint32_t cntr6clk_cnt; + uint32_t cntr7clk_cnt; + uint32_t cntr7clk_src; + uint32_t cntr8clk_cnt; + uint32_t cntr9clk_cnt; + uint32_t cntr9clk_src; + uint32_t cntr15clk_cnt; + uint32_t nocdiv_l4mainclk; + uint32_t nocdiv_l4mpclk; + uint32_t nocdiv_l4spclk; + uint32_t nocdiv_csatclk; + uint32_t nocdiv_cstraceclk; + uint32_t nocdiv_cspdbgclk; +}; + +struct arria10_perpll_cfg { + uint32_t vco0_psrc; + uint32_t vco1_denom; + uint32_t vco1_numer; + uint32_t cntr2clk_cnt; + uint32_t cntr2clk_src; + uint32_t cntr3clk_cnt; + uint32_t cntr3clk_src; + uint32_t cntr4clk_cnt; + uint32_t cntr4clk_src; + uint32_t cntr5clk_cnt; + uint32_t cntr5clk_src; + uint32_t cntr6clk_cnt; + uint32_t cntr6clk_src; + uint32_t cntr7clk_cnt; + uint32_t cntr8clk_cnt; + uint32_t cntr8clk_src; + uint32_t cntr9clk_cnt; + uint32_t cntr9clk_src; + uint32_t emacctl_emac0sel; + uint32_t emacctl_emac1sel; + uint32_t emacctl_emac2sel; + uint32_t gpiodiv_gpiodbclk; +}; + +extern int arria10_cm_basic_init(struct arria10_mainpll_cfg *mainpll_cfg, + struct arria10_perpll_cfg *perpll_cfg); +extern unsigned int cm_get_mmc_controller_clk_hz(void); +extern void arria10_cm_use_intosc(void); +extern uint32_t cm_l4_main_clk_hz; +extern uint32_t cm_l4_sp_clk_hz; +extern uint32_t cm_l4_mp_clk_hz; +extern uint32_t cm_l4_sys_free_clk_hz; + +#define ARRIA10_CLKMGR_ALTERAGRP_MPU_CLK_OFFSET 0x140 +#define ARRIA10_CLKMGR_MAINPLL_NOC_CLK_OFFSET 0x144 + +/* value */ +#define ARRIA10_CLKMGR_MAINPLL_BYPASS_RESET 0x0000003f +#define ARRIA10_CLKMGR_MAINPLL_VCO0_RESET 0x00010053 +#define ARRIA10_CLKMGR_MAINPLL_VCO1_RESET 0x00010001 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_EOSC 0x0 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_E_INTOSC 0x1 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_F2S 0x2 +#define ARRIA10_CLKMGR_PERPLL_BYPASS_RESET 0x000000ff +#define ARRIA10_CLKMGR_PERPLL_VCO0_RESET 0x00010053 +#define ARRIA10_CLKMGR_PERPLL_VCO1_RESET 0x00010001 +#define ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_EOSC 0x0 +#define ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_E_INTOSC 0x1 +#define ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_F2S 0x2 +#define ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_MAIN 0x3 + +/* mask */ +#define ARRIA10_CLKMGR_MAINPLL_EN_S2FUSER0CLKEN_SET_MSK 0x00000040 +#define ARRIA10_CLKMGR_MAINPLL_EN_HMCPLLREFCLKEN_SET_MSK 0x00000080 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_BGPWRDN_SET_MSK 0x00000001 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_PWRDN_SET_MSK 0x00000002 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_EN_SET_MSK 0x00000004 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_OUTRSTALL_SET_MSK 0x00000008 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_REGEXTSEL_SET_MSK 0x00000010 +#define ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_MSK 0x00000003 +#define ARRIA10_CLKMGR_MAINPLL_VCO1_NUMER_MSK 0x00001fff +#define ARRIA10_CLKMGR_MAINPLL_VCO1_DENOM_MSK 0x0000003f +#define ARRIA10_CLKMGR_MAINPLL_CNTRCLK_MSK 0x000003ff +#define ARRIA10_CLKMGR_MAINPLL_MPUCLK_CNT_MSK 0x000003ff +#define ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_MAIN 0 +#define ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_PERI 1 +#define ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_OSC1 2 +#define ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_INTOSC 3 +#define ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_FPGA 4 +#define ARRIA10_CLKMGR_MAINPLL_NOCDIV_MSK 0x00000003 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_CNT_MSK 0x000003ff +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_MSK 0x00000007 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_MAIN 0 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_PERI 1 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_OSC1 2 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_INTOSC 3 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_FPGA 4 +#define ARRIA10_CLKMGR_CLKMGR_STAT_BUSY_SET_MSK 0x00000001 +#define ARRIA10_CLKMGR_CLKMGR_STAT_MAINPLLLOCKED_SET_MSK 0x00000100 +#define ARRIA10_CLKMGR_CLKMGR_STAT_PERPLLLOCKED_SET_MSK 0x00000200 +#define ARRIA10_CLKMGR_CLKMGR_STAT_BOOTCLKSRC_SET_MSK 0x00020000 +#define ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLFBSLIP_SET_MSK 0x00000800 +#define ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLFBSLIP_SET_MSK 0x00000400 +#define ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLRFSLIP_SET_MSK 0x00000200 +#define ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLRFSLIP_SET_MSK 0x00000100 +#define ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLLOST_SET_MSK 0x00000008 +#define ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLLOST_SET_MSK 0x00000004 +#define ARRIA10_CLKMGR_CLKMGR_INTR_MAINPLLACHIEVED_SET_MSK 0x00000001 +#define ARRIA10_CLKMGR_CLKMGR_INTR_PERPLLACHIEVED_SET_MSK 0x00000002 +#define ARRIA10_CLKMGR_CLKMGR_CTL_BOOTMOD_SET_MSK 0x00000001 +#define ARRIA10_CLKMGR_CLKMGR_CTL_BOOTCLK_INTOSC_SET_MSK 0x00000300 +#define ARRIA10_CLKMGR_PERPLL_VCO0_BGPWRDN_SET_MSK 0x00000001 +#define ARRIA10_CLKMGR_PERPLL_VCO0_PWRDN_SET_MSK 0x00000002 +#define ARRIA10_CLKMGR_PERPLL_VCO0_EN_SET_MSK 0x00000004 +#define ARRIA10_CLKMGR_PERPLL_VCO0_OUTRSTALL_SET_MSK 0x00000008 +#define ARRIA10_CLKMGR_PERPLL_VCO0_REGEXTSEL_SET_MSK 0x00000010 +#define ARRIA10_CLKMGR_PERPLL_EN_RESET 0x00000f7f +#define ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_MSK 0x00000003 +#define ARRIA10_CLKMGR_PERPLL_VCO1_NUMER_MSK 0x00001fff +#define ARRIA10_CLKMGR_PERPLL_VCO1_DENOM_MSK 0x0000003f +#define ARRIA10_CLKMGR_PERPLL_CNTRCLK_MSK 0x000003ff + +#define ARRIA10_CLKMGR_PERPLLGRP_EN_SDMMCCLK_MASK 0x00000020 +#define ARRIA10_CLKMGR_PERPLLGRP_SRC_MSK 0x00000007 +#define ARRIA10_CLKMGR_PERPLLGRP_SRC_MAIN 0 +#define ARRIA10_CLKMGR_PERPLLGRP_SRC_PERI 1 +#define ARRIA10_CLKMGR_PERPLLGRP_SRC_OSC1 2 +#define ARRIA10_CLKMGR_PERPLLGRP_SRC_INTOSC 3 +#define ARRIA10_CLKMGR_PERPLLGRP_SRC_FPGA 4 + +/* bit shifting macro */ +#define ARRIA10_CLKMGR_MAINPLL_VCO0_PSRC_LSB 8 +#define ARRIA10_CLKMGR_MAINPLL_VCO1_DENOM_LSB 16 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_PERICNT_LSB 16 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_MAINPLL_NOCDIV_L4MAINCLK_LSB 0 +#define ARRIA10_CLKMGR_MAINPLL_NOCDIV_L4MPCLK_LSB 8 +#define ARRIA10_CLKMGR_MAINPLL_NOCDIV_L4SPCLK_LSB 16 +#define ARRIA10_CLKMGR_MAINPLL_NOCDIV_CSATCLK_LSB 24 +#define ARRIA10_CLKMGR_MAINPLL_NOCDIV_CSTRACECLK_LSB 26 +#define ARRIA10_CLKMGR_MAINPLL_NOCDIV_CSPDBGCLK_LSB 28 +#define ARRIA10_CLKMGR_MAINPLL_MPUCLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_MAINPLL_MPUCLK_PERICNT_LSB 16 +#define ARRIA10_CLKMGR_MAINPLL_NOCCLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_MAINPLL_CNTR7CLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_MAINPLL_CNTR9CLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_PERPLL_VCO1_DENOM_LSB 16 +#define ARRIA10_CLKMGR_PERPLL_VCO0_PSRC_LSB 8 +#define ARRIA10_CLKMGR_PERPLL_CNTR2CLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_PERPLL_CNTR3CLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_PERPLL_CNTR4CLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_PERPLL_CNTR5CLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_PERPLL_CNTR6CLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_PERPLL_CNTR8CLK_SRC_LSB 16 +#define ARRIA10_CLKMGR_PERPLL_EMACCTL_EMAC0SEL_LSB 26 +#define ARRIA10_CLKMGR_PERPLL_EMACCTL_EMAC1SEL_LSB 27 +#define ARRIA10_CLKMGR_PERPLL_EMACCTL_EMAC2SEL_LSB 28 + +/* PLL ramping work around */ +#define ARRIA10_CLKMGR_PLL_RAMP_MPUCLK_THRESHOLD_HZ 900000000 +#define ARRIA10_CLKMGR_PLL_RAMP_NOCCLK_THRESHOLD_HZ 300000000 +#define ARRIA10_CLKMGR_PLL_RAMP_MPUCLK_INCREMENT_HZ 100000000 +#define ARRIA10_CLKMGR_PLL_RAMP_NOCCLK_INCREMENT_HZ 33000000 + +#endif /* _ARRIA10_CLOCK_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/arria10-pinmux.h b/arch/arm/mach-socfpga/include/mach/arria10-pinmux.h new file mode 100644 index 0000000000..979e4769db --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/arria10-pinmux.h @@ -0,0 +1,250 @@ +/* + * Copyright (C) 2017 Pengutronix, Steffen Trumtrar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARRIA10_PINMUX_H_ +#define _ARRIA10_PINMUX_H_ + +#include + +#define ARRIA10_PINMUX_SHARED_IO_Q1_1_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x00 +#define ARRIA10_PINMUX_SHARED_IO_Q1_2_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x04 +#define ARRIA10_PINMUX_SHARED_IO_Q1_3_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x08 +#define ARRIA10_PINMUX_SHARED_IO_Q1_4_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x0c +#define ARRIA10_PINMUX_SHARED_IO_Q1_5_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x10 +#define ARRIA10_PINMUX_SHARED_IO_Q1_6_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x14 +#define ARRIA10_PINMUX_SHARED_IO_Q1_7_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x18 +#define ARRIA10_PINMUX_SHARED_IO_Q1_8_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x1c +#define ARRIA10_PINMUX_SHARED_IO_Q1_9_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x20 +#define ARRIA10_PINMUX_SHARED_IO_Q1_10_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x24 +#define ARRIA10_PINMUX_SHARED_IO_Q1_11_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x28 +#define ARRIA10_PINMUX_SHARED_IO_Q1_12_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x2c +#define ARRIA10_PINMUX_SHARED_IO_Q2_1_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x30 +#define ARRIA10_PINMUX_SHARED_IO_Q2_2_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x34 +#define ARRIA10_PINMUX_SHARED_IO_Q2_3_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x38 +#define ARRIA10_PINMUX_SHARED_IO_Q2_4_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x3c +#define ARRIA10_PINMUX_SHARED_IO_Q2_5_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x40 +#define ARRIA10_PINMUX_SHARED_IO_Q2_6_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x44 +#define ARRIA10_PINMUX_SHARED_IO_Q2_7_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x48 +#define ARRIA10_PINMUX_SHARED_IO_Q2_8_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x4c +#define ARRIA10_PINMUX_SHARED_IO_Q2_9_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x50 +#define ARRIA10_PINMUX_SHARED_IO_Q2_10_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x54 +#define ARRIA10_PINMUX_SHARED_IO_Q2_11_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x58 +#define ARRIA10_PINMUX_SHARED_IO_Q2_12_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x5c +#define ARRIA10_PINMUX_SHARED_IO_Q3_1_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x60 +#define ARRIA10_PINMUX_SHARED_IO_Q3_2_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x64 +#define ARRIA10_PINMUX_SHARED_IO_Q3_3_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x68 +#define ARRIA10_PINMUX_SHARED_IO_Q3_4_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x6c +#define ARRIA10_PINMUX_SHARED_IO_Q3_5_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x70 +#define ARRIA10_PINMUX_SHARED_IO_Q3_6_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x74 +#define ARRIA10_PINMUX_SHARED_IO_Q3_7_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x78 +#define ARRIA10_PINMUX_SHARED_IO_Q3_8_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x7c +#define ARRIA10_PINMUX_SHARED_IO_Q3_9_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x80 +#define ARRIA10_PINMUX_SHARED_IO_Q3_10_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x84 +#define ARRIA10_PINMUX_SHARED_IO_Q3_11_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x88 +#define ARRIA10_PINMUX_SHARED_IO_Q3_12_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x8c +#define ARRIA10_PINMUX_SHARED_IO_Q4_1_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x90 +#define ARRIA10_PINMUX_SHARED_IO_Q4_2_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x94 +#define ARRIA10_PINMUX_SHARED_IO_Q4_3_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x98 +#define ARRIA10_PINMUX_SHARED_IO_Q4_4_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0x9c +#define ARRIA10_PINMUX_SHARED_IO_Q4_5_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0xa0 +#define ARRIA10_PINMUX_SHARED_IO_Q4_6_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0xa4 +#define ARRIA10_PINMUX_SHARED_IO_Q4_7_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0xa8 +#define ARRIA10_PINMUX_SHARED_IO_Q4_8_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0xac +#define ARRIA10_PINMUX_SHARED_IO_Q4_9_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0xb0 +#define ARRIA10_PINMUX_SHARED_IO_Q4_10_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0xb4 +#define ARRIA10_PINMUX_SHARED_IO_Q4_11_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0xb8 +#define ARRIA10_PINMUX_SHARED_IO_Q4_12_ADDR ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR + 0xbc + +#define ARRIA10_PINMUX_SHARED_IO_Q1_I2C 0 +#define ARRIA10_PINMUX_SHARED_IO_Q1_EMAC 1 +#define ARRIA10_PINMUX_SHARED_IO_Q1_SPIS 2 +#define ARRIA10_PINMUX_SHARED_IO_Q1_SPIM 3 +#define ARRIA10_PINMUX_SHARED_IO_Q1_SDMMC 4 +#define ARRIA10_PINMUX_SHARED_IO_Q1_USB 8 +#define ARRIA10_PINMUX_SHARED_IO_Q1_QSPI 12 +#define ARRIA10_PINMUX_SHARED_IO_Q1_UART 13 +#define ARRIA10_PINMUX_SHARED_IO_Q1_NAND 14 +#define ARRIA10_PINMUX_SHARED_IO_Q1_GPIO 15 + +#define ARRIA10_PINMUX_SHARED_IO_Q2_I2C 0 +#define ARRIA10_PINMUX_SHARED_IO_Q2_SPIS 2 +#define ARRIA10_PINMUX_SHARED_IO_Q2_SPIM 3 +#define ARRIA10_PINMUX_SHARED_IO_Q2_EMAC 4 +#define ARRIA10_PINMUX_SHARED_IO_Q2_USB 8 +#define ARRIA10_PINMUX_SHARED_IO_Q2_UART 13 +#define ARRIA10_PINMUX_SHARED_IO_Q2_NAND 14 +#define ARRIA10_PINMUX_SHARED_IO_Q2_GPIO 15 + +#define ARRIA10_PINMUX_SHARED_IO_Q3_I2C 0 +#define ARRIA10_PINMUX_SHARED_IO_Q3_EMAC0 1 +#define ARRIA10_PINMUX_SHARED_IO_Q3_SPIS 2 +#define ARRIA10_PINMUX_SHARED_IO_Q3_SPIM 3 +#define ARRIA10_PINMUX_SHARED_IO_Q3_EMAC1 8 +#define ARRIA10_PINMUX_SHARED_IO_Q3_UART 13 +#define ARRIA10_PINMUX_SHARED_IO_Q3_NAND 14 +#define ARRIA10_PINMUX_SHARED_IO_Q3_GPIO 15 + +#define ARRIA10_PINMUX_SHARED_IO_Q4_I2C 0 +#define ARRIA10_PINMUX_SHARED_IO_Q4_EMAC0 1 +#define ARRIA10_PINMUX_SHARED_IO_Q4_SPIS 2 +#define ARRIA10_PINMUX_SHARED_IO_Q4_SPIM 3 +#define ARRIA10_PINMUX_SHARED_IO_Q4_SDMMC 4 +#define ARRIA10_PINMUX_SHARED_IO_Q4_EMAC1 8 +#define ARRIA10_PINMUX_SHARED_IO_Q4_QSPI 12 +#define ARRIA10_PINMUX_SHARED_IO_Q4_UART 13 +#define ARRIA10_PINMUX_SHARED_IO_Q4_NAND 14 +#define ARRIA10_PINMUX_SHARED_IO_Q4_GPIO 15 + +#define ARRIA10_PINMUX_DEDICATED_IO_1_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x00 +#define ARRIA10_PINMUX_DEDICATED_IO_2_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x04 +#define ARRIA10_PINMUX_DEDICATED_IO_3_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x08 +#define ARRIA10_PINMUX_DEDICATED_IO_4_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x0c +#define ARRIA10_PINMUX_DEDICATED_IO_5_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x10 +#define ARRIA10_PINMUX_DEDICATED_IO_6_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x14 +#define ARRIA10_PINMUX_DEDICATED_IO_7_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x18 +#define ARRIA10_PINMUX_DEDICATED_IO_8_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x1c +#define ARRIA10_PINMUX_DEDICATED_IO_9_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x20 +#define ARRIA10_PINMUX_DEDICATED_IO_10_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x24 +#define ARRIA10_PINMUX_DEDICATED_IO_11_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x28 +#define ARRIA10_PINMUX_DEDICATED_IO_12_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x2c +#define ARRIA10_PINMUX_DEDICATED_IO_13_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x30 +#define ARRIA10_PINMUX_DEDICATED_IO_14_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x34 +#define ARRIA10_PINMUX_DEDICATED_IO_15_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x38 +#define ARRIA10_PINMUX_DEDICATED_IO_16_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x3c +#define ARRIA10_PINMUX_DEDICATED_IO_17_ADDR ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR + 0x40 + +#define ARRIA10_PINCFG_DEDICATED_IO_BANK_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x00 +#define ARRIA10_PINCFG_DEDICATED_IO_1_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x04 +#define ARRIA10_PINCFG_DEDICATED_IO_2_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x08 +#define ARRIA10_PINCFG_DEDICATED_IO_3_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x0c +#define ARRIA10_PINCFG_DEDICATED_IO_4_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x10 +#define ARRIA10_PINCFG_DEDICATED_IO_5_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x14 +#define ARRIA10_PINCFG_DEDICATED_IO_6_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x18 +#define ARRIA10_PINCFG_DEDICATED_IO_7_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x1c +#define ARRIA10_PINCFG_DEDICATED_IO_8_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x20 +#define ARRIA10_PINCFG_DEDICATED_IO_9_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x24 +#define ARRIA10_PINCFG_DEDICATED_IO_10_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x28 +#define ARRIA10_PINCFG_DEDICATED_IO_11_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x2c +#define ARRIA10_PINCFG_DEDICATED_IO_12_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x30 +#define ARRIA10_PINCFG_DEDICATED_IO_13_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x34 +#define ARRIA10_PINCFG_DEDICATED_IO_14_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x38 +#define ARRIA10_PINCFG_DEDICATED_IO_15_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x3c +#define ARRIA10_PINCFG_DEDICATED_IO_16_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x40 +#define ARRIA10_PINCFG_DEDICATED_IO_17_ADDR ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR + 0x44 + +enum arria10_pinmux_io_addr { + arria10_pinmux_shared_io_q1_1, + arria10_pinmux_shared_io_q1_2, + arria10_pinmux_shared_io_q1_3, + arria10_pinmux_shared_io_q1_4, + arria10_pinmux_shared_io_q1_5, + arria10_pinmux_shared_io_q1_6, + arria10_pinmux_shared_io_q1_7, + arria10_pinmux_shared_io_q1_8, + arria10_pinmux_shared_io_q1_9, + arria10_pinmux_shared_io_q1_10, + arria10_pinmux_shared_io_q1_11, + arria10_pinmux_shared_io_q1_12, + arria10_pinmux_shared_io_q2_1, + arria10_pinmux_shared_io_q2_2, + arria10_pinmux_shared_io_q2_3, + arria10_pinmux_shared_io_q2_4, + arria10_pinmux_shared_io_q2_5, + arria10_pinmux_shared_io_q2_6, + arria10_pinmux_shared_io_q2_7, + arria10_pinmux_shared_io_q2_8, + arria10_pinmux_shared_io_q2_9, + arria10_pinmux_shared_io_q2_10, + arria10_pinmux_shared_io_q2_11, + arria10_pinmux_shared_io_q2_12, + arria10_pinmux_shared_io_q3_1, + arria10_pinmux_shared_io_q3_2, + arria10_pinmux_shared_io_q3_3, + arria10_pinmux_shared_io_q3_4, + arria10_pinmux_shared_io_q3_5, + arria10_pinmux_shared_io_q3_6, + arria10_pinmux_shared_io_q3_7, + arria10_pinmux_shared_io_q3_8, + arria10_pinmux_shared_io_q3_9, + arria10_pinmux_shared_io_q3_10, + arria10_pinmux_shared_io_q3_11, + arria10_pinmux_shared_io_q3_12, + arria10_pinmux_shared_io_q4_1, + arria10_pinmux_shared_io_q4_2, + arria10_pinmux_shared_io_q4_3, + arria10_pinmux_shared_io_q4_4, + arria10_pinmux_shared_io_q4_5, + arria10_pinmux_shared_io_q4_6, + arria10_pinmux_shared_io_q4_7, + arria10_pinmux_shared_io_q4_8, + arria10_pinmux_shared_io_q4_9, + arria10_pinmux_shared_io_q4_10, + arria10_pinmux_shared_io_q4_11, + arria10_pinmux_shared_io_q4_12, + arria10_pinmux_dedicated_io_1, + arria10_pinmux_dedicated_io_2, + arria10_pinmux_dedicated_io_3, + arria10_pinmux_dedicated_io_4, + arria10_pinmux_dedicated_io_5, + arria10_pinmux_dedicated_io_6, + arria10_pinmux_dedicated_io_7, + arria10_pinmux_dedicated_io_8, + arria10_pinmux_dedicated_io_9, + arria10_pinmux_dedicated_io_10, + arria10_pinmux_dedicated_io_11, + arria10_pinmux_dedicated_io_12, + arria10_pinmux_dedicated_io_13, + arria10_pinmux_dedicated_io_14, + arria10_pinmux_dedicated_io_15, + arria10_pinmux_dedicated_io_16, + arria10_pinmux_dedicated_io_17, + arria10_pincfg_dedicated_io_bank, + arria10_pincfg_dedicated_io_1, + arria10_pincfg_dedicated_io_2, + arria10_pincfg_dedicated_io_3, + arria10_pincfg_dedicated_io_4, + arria10_pincfg_dedicated_io_5, + arria10_pincfg_dedicated_io_6, + arria10_pincfg_dedicated_io_7, + arria10_pincfg_dedicated_io_8, + arria10_pincfg_dedicated_io_9, + arria10_pincfg_dedicated_io_10, + arria10_pincfg_dedicated_io_11, + arria10_pincfg_dedicated_io_12, + arria10_pincfg_dedicated_io_13, + arria10_pincfg_dedicated_io_14, + arria10_pincfg_dedicated_io_15, + arria10_pincfg_dedicated_io_16, + arria10_pincfg_dedicated_io_17, + arria10_pinmux_rgmii0_usefpga, + arria10_pinmux_rgmii1_usefpga, + arria10_pinmux_rgmii2_usefpga, + arria10_pinmux_i2c0_usefpga, + arria10_pinmux_i2c1_usefpga, + arria10_pinmux_i2cemac0_usefpga, + arria10_pinmux_i2cemac1_usefpga, + arria10_pinmux_i2cemac2_usefpga, + arria10_pinmux_nand_usefpga, + arria10_pinmux_qspi_usefpga, + arria10_pinmux_sdmmc_usefpga, + arria10_pinmux_spim0_usefpga, + arria10_pinmux_spim1_usefpga, + arria10_pinmux_spis0_usefpga, + arria10_pinmux_spis1_usefpga, + arria10_pinmux_uart0_usefpga, + arria10_pinmux_uart1_usefpga, + arria10_pinmux_max +}; +#endif diff --git a/arch/arm/mach-socfpga/include/mach/arria10-regs.h b/arch/arm/mach-socfpga/include/mach/arria10-regs.h new file mode 100644 index 0000000000..5569574e15 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/arria10-regs.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2014 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef _ARRIA10_HARDWARE_H_ +#define _ARRIA10_HARDWARE_H_ + +#define ARRIA10_EMAC0_ADDR (0xff800000) +#define ARRIA10_EMAC1_ADDR (0xff802000) +#define ARRIA10_EMAC2_ADDR (0xff804000) +#define ARRIA10_SDMMC_ADDR (0xff808000) +#define ARRIA10_QSPIREGS_ADDR (0xff809000) +#define ARRIA10_ECC_OCRAM_ADDR (0xff8c3000) +#define ARRIA10_QSPIDATA_ADDR (0xffa00000) +#define ARRIA10_UART0_ADDR (0xffc02000) +#define ARRIA10_UART1_ADDR (0xffc02100) +#define ARRIA10_I2C0_ADDR (0xffc02200) +#define ARRIA10_I2C1_ADDR (0xffc02300) +#define ARRIA10_GPIO0_ADDR (0xffc02900) +#define ARRIA10_GPIO1_ADDR (0xffc02a00) +#define ARRIA10_GPIO2_ADDR (0xffc02b00) +#define ARRIA10_HMC_MMR_IO48_ADDR (0xffcfa000) +#define ARRIA10_SDR_ADDR (0xffcfb000) +#define ARRIA10_FPGAMGRDATA_ADDR (0xffcfe400) +#define ARRIA10_OSC1TIMER0_ADDR (0xffd00000) +#define ARRIA10_L4WD0_ADDR (0xffd00200) +#define ARRIA10_FPGAMGRREGS_ADDR (0xffd03000) +#define ARRIA10_CLKMGR_ADDR (0xffd04000) +#define ARRIA10_RSTMGR_ADDR (0xffd05000) +#define ARRIA10_SYSMGR_ADDR (0xffd06000) +#define ARRIA10_PINMUX_SHARED_3V_IO_GRP_ADDR (0xffd07000) +#define ARRIA10_PINMUX_DEDICATED_IO_GRP_ADDR (0xffd07200) +#define ARRIA10_PINMUX_CFG_DEDICATED_IO_GRP_ADDR (0xffd07300) +#define ARRIA10_PINMUX_FPGA_INTERFACE_ADDR (0xffd07400) +#define ARRIA10_NOC_L4_PRIV_L4_PRIV_FILTER_ADDR (0xffd11000) +#define ARRIA10_SDR_SCHEDULER_ADDR (0xffd12400) +#define ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR (0xffd13000) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR (0xffd13200) +#define ARRIA10_SDR_FW_MPU_FPGA_ADDR (0xffd13300) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR (0xffd13400) +#define ARRIA10_NOC_FW_SOC2FPGA_SOC2FPGA_SCR_ADDR (0xffd13500) +#define ARRIA10_DMANONSECURE_ADDR (0xffda0000) +#define ARRIA10_DMASECURE_ADDR (0xffda1000) +#define ARRIA10_MPUSCU_ADDR (0xffffc000) +#define ARRIA10_MPUL2_ADDR (0xfffff000) + +/* L2 cache controller */ +#define ARRIA10_MPUL2_ADRFLTR_START (ARRIA10_MPUL2_ADDR + 0xC00) + +/* NOC L4 Priv */ +#define ARRIA10_NOC_L4_PRIV_L4_PRIV_L4_PRIV (ARRIA10_NOC_L4_PRIV_L4_PRIV_FILTER_ADDR + 0x00) +#define ARRIA10_NOC_L4_PRIV_L4_PRIV_L4_PRIV_SET (ARRIA10_NOC_L4_PRIV_L4_PRIV_FILTER_ADDR + 0x04) +#define ARRIA10_NOC_L4_PRIV_L4_PRIV_L4_PRIV_CLR (ARRIA10_NOC_L4_PRIV_L4_PRIV_FILTER_ADDR + 0x08) + +/* NOC L4 Permissions */ +#define ARRIA10_NOC_FW_L4_PER_SCR_NAND_REG (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x00) +#define ARRIA10_NOC_FW_L4_PER_SCR_NAND_DATA (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x04) +#define ARRIA10_NOC_FW_L4_PER_SCR_QSPI_DATA (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x08) +#define ARRIA10_NOC_FW_L4_PER_SCR_USB0_REG (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x0c) +#define ARRIA10_NOC_FW_L4_PER_SCR_USB1_REG (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x10) +#define ARRIA10_NOC_FW_L4_PER_SCR_DMA_NONSECURE (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x14) +#define ARRIA10_NOC_FW_L4_PER_SCR_DMA_SECURE (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x18) +#define ARRIA10_NOC_FW_L4_PER_SCR_SPIM0 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x1c) +#define ARRIA10_NOC_FW_L4_PER_SCR_SPIM1 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x20) +#define ARRIA10_NOC_FW_L4_PER_SCR_SPIS0 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x24) +#define ARRIA10_NOC_FW_L4_PER_SCR_SPIS1 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x28) +#define ARRIA10_NOC_FW_L4_PER_SCR_EMAC0 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x2c) +#define ARRIA10_NOC_FW_L4_PER_SCR_EMAC1 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x30) +#define ARRIA10_NOC_FW_L4_PER_SCR_EMAC2 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x34) +#define ARRIA10_NOC_FW_L4_PER_SCR_EMAC3 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x38) +#define ARRIA10_NOC_FW_L4_PER_SCR_QSPI (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x3c) +#define ARRIA10_NOC_FW_L4_PER_SCR_SDMMC (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x40) +#define ARRIA10_NOC_FW_L4_PER_SCR_GPIO0 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x44) +#define ARRIA10_NOC_FW_L4_PER_SCR_GPIO1 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x48) +#define ARRIA10_NOC_FW_L4_PER_SCR_GPIO2 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x4c) +#define ARRIA10_NOC_FW_L4_PER_SCR_I2C0 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x50) +#define ARRIA10_NOC_FW_L4_PER_SCR_I2C1 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x54) +#define ARRIA10_NOC_FW_L4_PER_SCR_I2C2 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x58) +#define ARRIA10_NOC_FW_L4_PER_SCR_I2C3 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x5c) +#define ARRIA10_NOC_FW_L4_PER_SCR_I2C4 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x60) +#define ARRIA10_NOC_FW_L4_PER_SCR_SPTIMER0 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x64) +#define ARRIA10_NOC_FW_L4_PER_SCR_SPTIMER1 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x68) +#define ARRIA10_NOC_FW_L4_PER_SCR_UART0 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x6c) +#define ARRIA10_NOC_FW_L4_PER_SCR_UART1 (ARRIA10_NOC_L4_PER_L4_PER_SCR_ADDR + 0x70) + +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_EN (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x00) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_EN_SET (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x04) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_EN_CLR (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x08) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_REGION0 (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x0c) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_REGION1 (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x10) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_REGION2 (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x14) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_REGION3 (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x18) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_REGION4 (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x1c) +#define ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_REGION5 (ARRIA10_NOC_FW_OCRAM_OCRAM_SCR_ADDR + 0x20) + +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_EN (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x00) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_EN_SET (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x04) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_EN_CLR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x08) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION0 (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x0c) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION1 (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x10) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION2 (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x14) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION3 (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x18) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION4 (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x1c) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION5 (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x20) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION6 (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x24) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_REGION7 (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x28) +#define ARRIA10_NOC_FW_DDR_L3_DDR_SCR_GLOBAL (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x2c) + +#define ARRIA10_NOC_FW_SOC2FPGA_SOC2FPGA_SCR_LWSOC2FPGA (ARRIA10_NOC_FW_SOC2FPGA_SOC2FPGA_SCR_ADDR + 0x00) +#define ARRIA10_NOC_FW_SOC2FPGA_SOC2FPGA_SCR_SOC2FPGA (ARRIA10_NOC_FW_SOC2FPGA_SOC2FPGA_SCR_ADDR + 0x04) + +#endif diff --git a/arch/arm/mach-socfpga/include/mach/arria10-reset-manager.h b/arch/arm/mach-socfpga/include/mach/arria10-reset-manager.h new file mode 100644 index 0000000000..ebd2043426 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/arria10-reset-manager.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2014-2016 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _ARRIA10_RESET_MANAGER_H_ +#define _ARRIA10_RESET_MANAGER_H_ + +#define ARRIA10_RSTMGR_STATUS 0x0 +#define ARRIA10_RSTMGR_RAMSTAT 0x4 +#define ARRIA10_RSTMGR_MISCSTAT 0x8 +#define ARRIA10_RSTMGR_CTRL 0xc +#define ARRIA10_RSTMGR_HDSKEN 0x10 +#define ARRIA10_RSTMGR_HDSKREQ 0x14 +#define ARRIA10_RSTMGR_HDSKACK 0x18 +#define ARRIA10_RSTMGR_COUNTS 0x1c +#define ARRIA10_RSTMGR_MPUMODRST 0x20 +#define ARRIA10_RSTMGR_PER0MODRST 0x24 +#define ARRIA10_RSTMGR_PER1MODRST 0x28 +#define ARRIA10_RSTMGR_BRGMODRST 0x2c +#define ARRIA10_RSTMGR_SYSMODRST 0x30 +#define ARRIA10_RSTMGR_COLDMODRST 0x34 +#define ARRIA10_RSTMGR_NRSTMODRST 0x38 +#define ARRIA10_RSTMGR_DBGMODRST 0x3c +#define ARRIA10_RSTMGR_MPUWARMMASK 0x40 +#define ARRIA10_RSTMGR_PER0WARMMASK 0x44 +#define ARRIA10_RSTMGR_PER1WARMMASK 0x48 +#define ARRIA10_RSTMGR_BRGWARMMASK 0x4c +#define ARRIA10_RSTMGR_SYSWARMMASK 0x50 +#define ARRIA10_RSTMGR_NRSTWARMMASK 0x54 +#define ARRIA10_RSTMGR_L3WARMMASK 0x58 +#define ARRIA10_RSTMGR_TSTSTA 0x5c +#define ARRIA10_RSTMGR_TSTSCRATCH 0x60 +#define ARRIA10_RSTMGR_HDSKTIMEOUT 0x64 +#define ARRIA10_RSTMGR_HMCINTR 0x68 +#define ARRIA10_RSTMGR_HMCINTREN 0x6c +#define ARRIA10_RSTMGR_HMCINTRENS 0x70 +#define ARRIA10_RSTMGR_HMCINTRENR 0x74 +#define ARRIA10_RSTMGR_HMCGPOUT 0x78 +#define ARRIA10_RSTMGR_HMCGPIN 0x7c + +#define ARRIA10_RSTMGR_CTL_SWWARMRSTREQ BIT(1) +#define ARRIA10_RSTMGR_PER0MODRST_EMAC0 BIT(0) +#define ARRIA10_RSTMGR_PER0MODRST_EMAC1 BIT(1) +#define ARRIA10_RSTMGR_PER0MODRST_EMAC2 BIT(2) +#define ARRIA10_RSTMGR_PER0MODRST_USB0 BIT(3) +#define ARRIA10_RSTMGR_PER0MODRST_USB1 BIT(4) +#define ARRIA10_RSTMGR_PER0MODRST_NAND BIT(5) +#define ARRIA10_RSTMGR_PER0MODRST_QSPI BIT(6) +#define ARRIA10_RSTMGR_PER0MODRST_SDMMC BIT(7) +#define ARRIA10_RSTMGR_PER0MODRST_EMAC0OCP BIT(8) +#define ARRIA10_RSTMGR_PER0MODRST_EMAC1OCP BIT(9) +#define ARRIA10_RSTMGR_PER0MODRST_EMAC2OCP BIT(10) +#define ARRIA10_RSTMGR_PER0MODRST_USB0OCP BIT(11) +#define ARRIA10_RSTMGR_PER0MODRST_USB1OCP BIT(12) +#define ARRIA10_RSTMGR_PER0MODRST_NANDOCP BIT(13) +#define ARRIA10_RSTMGR_PER0MODRST_QSPIOCP BIT(14) +#define ARRIA10_RSTMGR_PER0MODRST_SDMMCOCP BIT(15) +#define ARRIA10_RSTMGR_PER0MODRST_DMA BIT(16) +#define ARRIA10_RSTMGR_PER0MODRST_SPIM0 BIT(17) +#define ARRIA10_RSTMGR_PER0MODRST_SPIM1 BIT(18) +#define ARRIA10_RSTMGR_PER0MODRST_SPIS0 BIT(19) +#define ARRIA10_RSTMGR_PER0MODRST_SPIS1 BIT(20) +#define ARRIA10_RSTMGR_PER0MODRST_DMAOCP BIT(21) +#define ARRIA10_RSTMGR_PER0MODRST_EMACPTP BIT(22) +#define ARRIA10_RSTMGR_PER0MODRST_DMAIF0 BIT(24) +#define ARRIA10_RSTMGR_PER0MODRST_DMAIF1 BIT(25) +#define ARRIA10_RSTMGR_PER0MODRST_DMAIF2 BIT(26) +#define ARRIA10_RSTMGR_PER0MODRST_DMAIF3 BIT(27) +#define ARRIA10_RSTMGR_PER0MODRST_DMAIF4 BIT(28) +#define ARRIA10_RSTMGR_PER0MODRST_DMAIF5 BIT(29) +#define ARRIA10_RSTMGR_PER0MODRST_DMAIF6 BIT(30) +#define ARRIA10_RSTMGR_PER0MODRST_DMAIF7 BIT(31) + +#define ARRIA10_RSTMGR_PER1MODRST_WATCHDOG0 BIT(0) +#define ARRIA10_RSTMGR_PER1MODRST_WATCHDOG1 BIT(1) +#define ARRIA10_RSTMGR_PER1MODRST_L4SYSTIMER0 BIT(2) +#define ARRIA10_RSTMGR_PER1MODRST_L4SYSTIMER1 BIT(3) +#define ARRIA10_RSTMGR_PER1MODRST_SPTIMER0 BIT(4) +#define ARRIA10_RSTMGR_PER1MODRST_SPTIMER1 BIT(5) +#define ARRIA10_RSTMGR_PER1MODRST_I2C0 BIT(8) +#define ARRIA10_RSTMGR_PER1MODRST_I2C1 BIT(9) +#define ARRIA10_RSTMGR_PER1MODRST_I2C2 BIT(10) +#define ARRIA10_RSTMGR_PER1MODRST_I2C3 BIT(11) +#define ARRIA10_RSTMGR_PER1MODRST_I2C4 BIT(12) +#define ARRIA10_RSTMGR_PER1MODRST_UART0 BIT(16) +#define ARRIA10_RSTMGR_PER1MODRST_UART1 BIT(17) +#define ARRIA10_RSTMGR_PER1MODRST_GPIO0 BIT(24) +#define ARRIA10_RSTMGR_PER1MODRST_GPIO1 BIT(25) +#define ARRIA10_RSTMGR_PER1MODRST_GPIO2 BIT(26) + +#define ARRIA10_RSTMGR_BRGMODRST_HPS2FPGA BIT(0) +#define ARRIA10_RSTMGR_BRGMODRST_LWHPS2FPGA BIT(1) +#define ARRIA10_RSTMGR_BRGMODRST_FPGA2HPS BIT(2) +#define ARRIA10_RSTMGR_BRGMODRST_F2SSDRAM0 BIT(3) +#define ARRIA10_RSTMGR_BRGMODRST_F2SSDRAM1 BIT(4) +#define ARRIA10_RSTMGR_BRGMODRST_F2SSDRAM2 BIT(5) +#define ARRIA10_RSTMGR_BRGMODRST_DDRSCH BIT(6) + +#define ARRIA10_RSTMGR_OCP_MASK (ARRIA10_RSTMGR_PER0MODRST_EMAC0OCP | \ + ARRIA10_RSTMGR_PER0MODRST_EMAC1OCP | \ + ARRIA10_RSTMGR_PER0MODRST_EMAC2OCP | \ + ARRIA10_RSTMGR_PER0MODRST_NANDOCP | \ + ARRIA10_RSTMGR_PER0MODRST_QSPIOCP | \ + ARRIA10_RSTMGR_PER0MODRST_SDMMCOCP) + +void arria10_reset_peripherals(void); +void arria10_reset_deassert_dedicated_peripherals(void); +void arria10_reset_deassert_shared_peripherals(void); +void arria10_reset_deassert_fpga_peripherals(void); + +#endif + diff --git a/arch/arm/mach-socfpga/include/mach/arria10-sdram.h b/arch/arm/mach-socfpga/include/mach/arria10-sdram.h new file mode 100644 index 0000000000..07e4dd0130 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/arria10-sdram.h @@ -0,0 +1,353 @@ +/* + * Copyright (C) 2014 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include + +#ifndef _ARRIA10_SDRAM_H_ +#define _ARRIA10_SDRAM_H_ + +#define ARRIA10_ECC_HMC_OCP_IP_REV_ID (ARRIA10_SDR_ADDR + 0x00) +#define ARRIA10_ECC_HMC_OCP_DDRIOCTRL (ARRIA10_SDR_ADDR + 0x08) +#define ARRIA10_ECC_HMC_OCP_DDRCALSTAT (ARRIA10_SDR_ADDR + 0x0c) +#define ARRIA10_ECC_HMC_OCP_MPR_OBEAT1 (ARRIA10_SDR_ADDR + 0x10) +#define ARRIA10_ECC_HMC_OCP_MPR_1BEAT1 (ARRIA10_SDR_ADDR + 0x14) +#define ARRIA10_ECC_HMC_OCP_MPR_2BEAT1 (ARRIA10_SDR_ADDR + 0x18) +#define ARRIA10_ECC_HMC_OCP_MPR_3BEAT1 (ARRIA10_SDR_ADDR + 0x1c) +#define ARRIA10_ECC_HMC_OCP_MPR_4BEAT1 (ARRIA10_SDR_ADDR + 0x20) +#define ARRIA10_ECC_HMC_OCP_MPR_5BEAT1 (ARRIA10_SDR_ADDR + 0x24) +#define ARRIA10_ECC_HMC_OCP_MPR_6BEAT1 (ARRIA10_SDR_ADDR + 0x28) +#define ARRIA10_ECC_HMC_OCP_MPR_7BEAT1 (ARRIA10_SDR_ADDR + 0x2c) +#define ARRIA10_ECC_HMC_OCP_MPR_8BEAT1 (ARRIA10_SDR_ADDR + 0x30) +#define ARRIA10_ECC_HMC_OCP_MPR_OBEAT2 (ARRIA10_SDR_ADDR + 0x34) +#define ARRIA10_ECC_HMC_OCP_MPR_1BEAT2 (ARRIA10_SDR_ADDR + 0x38) +#define ARRIA10_ECC_HMC_OCP_MPR_2BEAT2 (ARRIA10_SDR_ADDR + 0x3c) +#define ARRIA10_ECC_HMC_OCP_MPR_3BEAT2 (ARRIA10_SDR_ADDR + 0x40) +#define ARRIA10_ECC_HMC_OCP_MPR_4BEAT2 (ARRIA10_SDR_ADDR + 0x44) +#define ARRIA10_ECC_HMC_OCP_MPR_5BEAT2 (ARRIA10_SDR_ADDR + 0x48) +#define ARRIA10_ECC_HMC_OCP_MPR_6BEAT2 (ARRIA10_SDR_ADDR + 0x4c) +#define ARRIA10_ECC_HMC_OCP_MPR_7BEAT2 (ARRIA10_SDR_ADDR + 0x50) +#define ARRIA10_ECC_HMC_OCP_MPR_8BEAT2 (ARRIA10_SDR_ADDR + 0x54) +#define ARRIA10_ECC_HMC_OCP_MPR_AUTO_PRECHARGE (ARRIA10_SDR_ADDR + 0x60) +#define ARRIA10_ECC_HMC_OCP_MPR_ECCCTRL1 (ARRIA10_SDR_ADDR + 0x100) +#define ARRIA10_ECC_HMC_OCP_MPR_ECCCTRL2 (ARRIA10_SDR_ADDR + 0x104) +#define ARRIA10_ECC_HMC_OCP_MPR_ERRINTEN (ARRIA10_SDR_ADDR + 0x110) +#define ARRIA10_ECC_HMC_OCP_MPR_ERRINTENS (ARRIA10_SDR_ADDR + 0x114) +#define ARRIA10_ECC_HMC_OCP_MPR_ERRINTENR (ARRIA10_SDR_ADDR + 0x118) +#define ARRIA10_ECC_HMC_OCP_MPR_INTMODE (ARRIA10_SDR_ADDR + 0x11c) +#define ARRIA10_ECC_HMC_OCP_MPR_INTSTAT (ARRIA10_SDR_ADDR + 0x120) +#define ARRIA10_ECC_HMC_OCP_MPR_DIAGINTTEST (ARRIA10_SDR_ADDR + 0x124) +#define ARRIA10_ECC_HMC_OCP_MPR_MODSTAT (ARRIA10_SDR_ADDR + 0x128) +#define ARRIA10_ECC_HMC_OCP_MPR_DERRADDRA (ARRIA10_SDR_ADDR + 0x12c) +#define ARRIA10_ECC_HMC_OCP_MPR_SERRADDRA (ARRIA10_SDR_ADDR + 0x130) +#define ARRIA10_ECC_HMC_OCP_MPR_AUTOWB_CORRADDR (ARRIA10_SDR_ADDR + 0x138) +#define ARRIA10_ECC_HMC_OCP_MPR_SERRCNTREG (ARRIA10_SDR_ADDR + 0x13c) +#define ARRIA10_ECC_HMC_OCP_MPR_AUTOWB_DROP_CNTREG (ARRIA10_SDR_ADDR + 0x140) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_REG2WRECCDATABUS (ARRIA10_SDR_ADDR + 0x144) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_RDECCDATA2REGBUS (ARRIA10_SDR_ADDR + 0x148) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_REG2RDECCDATABUS (ARRIA10_SDR_ADDR + 0x14c) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_DIAGON (ARRIA10_SDR_ADDR + 0x150) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_DECSTAT (ARRIA10_SDR_ADDR + 0x154) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_ERRGENADDR_0 (ARRIA10_SDR_ADDR + 0x160) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_ERRGENADDR_1 (ARRIA10_SDR_ADDR + 0x164) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_ERRGENADDR_2 (ARRIA10_SDR_ADDR + 0x168) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_ERRGENADDR_3 (ARRIA10_SDR_ADDR + 0x16c) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_REG2RDDATABUS_BEAT0 (ARRIA10_SDR_ADDR + 0x170) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_REG2RDDATABUS_BEAT1 (ARRIA10_SDR_ADDR + 0x174) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_REG2RDDATABUS_BEAT2 (ARRIA10_SDR_ADDR + 0x178) +#define ARRIA10_ECC_HMC_OCP_MPR_ECC_REG2RDDATABUS_BEAT3 (ARRIA10_SDR_ADDR + 0x17c) + +#define ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_ID_COREID (ARRIA10_SDR_SCHEDULER_ADDR + 0x00) +#define ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_ID_REVISIONID (ARRIA10_SDR_SCHEDULER_ADDR + 0x04) +#define ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_DDRCONF (ARRIA10_SDR_SCHEDULER_ADDR + 0x08) +#define ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_DDRTIMING (ARRIA10_SDR_SCHEDULER_ADDR + 0x0c) +#define ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_DDRMODE (ARRIA10_SDR_SCHEDULER_ADDR + 0x10) +#define ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_READLATENCY (ARRIA10_SDR_SCHEDULER_ADDR + 0x14) +#define ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_ACTIVATE (ARRIA10_SDR_SCHEDULER_ADDR + 0x38) +#define ARRIA10_NOC_DDR_T_MAIN_SCHEDULER_DEVTODEV (ARRIA10_SDR_SCHEDULER_ADDR + 0x3c) + +#define ARRIA10_IO48_HMC_MMR_DBGCFG0 (ARRIA10_HMC_MMR_IO48_ADDR + 0x00) +#define ARRIA10_IO48_HMC_MMR_DBGCFG1 (ARRIA10_HMC_MMR_IO48_ADDR + 0x04) +#define ARRIA10_IO48_HMC_MMR_DBGCFG2 (ARRIA10_HMC_MMR_IO48_ADDR + 0x08) +#define ARRIA10_IO48_HMC_MMR_DBGCFG3 (ARRIA10_HMC_MMR_IO48_ADDR + 0x0c) +#define ARRIA10_IO48_HMC_MMR_DBGCFG4 (ARRIA10_HMC_MMR_IO48_ADDR + 0x10) +#define ARRIA10_IO48_HMC_MMR_DBGCFG5 (ARRIA10_HMC_MMR_IO48_ADDR + 0x14) +#define ARRIA10_IO48_HMC_MMR_DBGCFG6 (ARRIA10_HMC_MMR_IO48_ADDR + 0x18) +#define ARRIA10_IO48_HMC_MMR_RESERVE0 (ARRIA10_HMC_MMR_IO48_ADDR + 0x1c) +#define ARRIA10_IO48_HMC_MMR_RESERVE1 (ARRIA10_HMC_MMR_IO48_ADDR + 0x20) +#define ARRIA10_IO48_HMC_MMR_RESERVE2 (ARRIA10_HMC_MMR_IO48_ADDR + 0x24) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG0 (ARRIA10_HMC_MMR_IO48_ADDR + 0x28) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG1 (ARRIA10_HMC_MMR_IO48_ADDR + 0x2c) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG2 (ARRIA10_HMC_MMR_IO48_ADDR + 0x30) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG3 (ARRIA10_HMC_MMR_IO48_ADDR + 0x34) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG4 (ARRIA10_HMC_MMR_IO48_ADDR + 0x38) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG5 (ARRIA10_HMC_MMR_IO48_ADDR + 0x3c) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG6 (ARRIA10_HMC_MMR_IO48_ADDR + 0x40) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG7 (ARRIA10_HMC_MMR_IO48_ADDR + 0x44) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG8 (ARRIA10_HMC_MMR_IO48_ADDR + 0x48) +#define ARRIA10_IO48_HMC_MMR_CTRLCFG9 (ARRIA10_HMC_MMR_IO48_ADDR + 0x4c) +#define ARRIA10_IO48_HMC_MMR_DRAMTIMING0 (ARRIA10_HMC_MMR_IO48_ADDR + 0x50) +#define ARRIA10_IO48_HMC_MMR_DRAMODT0 (ARRIA10_HMC_MMR_IO48_ADDR + 0x54) +#define ARRIA10_IO48_HMC_MMR_DRAMODT1 (ARRIA10_HMC_MMR_IO48_ADDR + 0x58) +#define ARRIA10_IO48_HMC_MMR_SBCFG0 (ARRIA10_HMC_MMR_IO48_ADDR + 0x5c) +#define ARRIA10_IO48_HMC_MMR_SBCFG1 (ARRIA10_HMC_MMR_IO48_ADDR + 0x60) +#define ARRIA10_IO48_HMC_MMR_SBCFG2 (ARRIA10_HMC_MMR_IO48_ADDR + 0x64) +#define ARRIA10_IO48_HMC_MMR_SBCFG3 (ARRIA10_HMC_MMR_IO48_ADDR + 0x68) +#define ARRIA10_IO48_HMC_MMR_SBCFG4 (ARRIA10_HMC_MMR_IO48_ADDR + 0x6c) +#define ARRIA10_IO48_HMC_MMR_SBCFG5 (ARRIA10_HMC_MMR_IO48_ADDR + 0x70) +#define ARRIA10_IO48_HMC_MMR_SBCFG6 (ARRIA10_HMC_MMR_IO48_ADDR + 0x74) +#define ARRIA10_IO48_HMC_MMR_SBCFG7 (ARRIA10_HMC_MMR_IO48_ADDR + 0x78) +#define ARRIA10_IO48_HMC_MMR_CALTIMING0 (ARRIA10_HMC_MMR_IO48_ADDR + 0x7c) +#define ARRIA10_IO48_HMC_MMR_CALTIMING1 (ARRIA10_HMC_MMR_IO48_ADDR + 0x80) +#define ARRIA10_IO48_HMC_MMR_CALTIMING2 (ARRIA10_HMC_MMR_IO48_ADDR + 0x84) +#define ARRIA10_IO48_HMC_MMR_CALTIMING3 (ARRIA10_HMC_MMR_IO48_ADDR + 0x88) +#define ARRIA10_IO48_HMC_MMR_CALTIMING4 (ARRIA10_HMC_MMR_IO48_ADDR + 0x8c) +#define ARRIA10_IO48_HMC_MMR_CALTIMING5 (ARRIA10_HMC_MMR_IO48_ADDR + 0x90) +#define ARRIA10_IO48_HMC_MMR_CALTIMING6 (ARRIA10_HMC_MMR_IO48_ADDR + 0x94) +#define ARRIA10_IO48_HMC_MMR_CALTIMING7 (ARRIA10_HMC_MMR_IO48_ADDR + 0x98) +#define ARRIA10_IO48_HMC_MMR_CALTIMING8 (ARRIA10_HMC_MMR_IO48_ADDR + 0x9c) +#define ARRIA10_IO48_HMC_MMR_CALTIMING9 (ARRIA10_HMC_MMR_IO48_ADDR + 0xa0) +#define ARRIA10_IO48_HMC_MMR_CALTIMING10 (ARRIA10_HMC_MMR_IO48_ADDR + 0xa4) +#define ARRIA10_IO48_HMC_MMR_DRAMADDRW (ARRIA10_HMC_MMR_IO48_ADDR + 0xa8) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND0 (ARRIA10_HMC_MMR_IO48_ADDR + 0xac) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND1 (ARRIA10_HMC_MMR_IO48_ADDR + 0xb0) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND2 (ARRIA10_HMC_MMR_IO48_ADDR + 0xb4) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND3 (ARRIA10_HMC_MMR_IO48_ADDR + 0xb8) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND4 (ARRIA10_HMC_MMR_IO48_ADDR + 0xbc) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND5 (ARRIA10_HMC_MMR_IO48_ADDR + 0xc0) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND6 (ARRIA10_HMC_MMR_IO48_ADDR + 0xc4) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND7 (ARRIA10_HMC_MMR_IO48_ADDR + 0xc8) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND8 (ARRIA10_HMC_MMR_IO48_ADDR + 0xcc) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND9 (ARRIA10_HMC_MMR_IO48_ADDR + 0xd0) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND10 (ARRIA10_HMC_MMR_IO48_ADDR + 0xd4) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND11 (ARRIA10_HMC_MMR_IO48_ADDR + 0xd8) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND12 (ARRIA10_HMC_MMR_IO48_ADDR + 0xdc) +#define ARRIA10_IO48_HMC_MMR_SIDEBANB13 (ARRIA10_HMC_MMR_IO48_ADDR + 0xe0) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND14 (ARRIA10_HMC_MMR_IO48_ADDR + 0xe4) +#define ARRIA10_IO48_HMC_MMR_SIDEBAND15 (ARRIA10_HMC_MMR_IO48_ADDR + 0xe8) +#define ARRIA10_IO48_HMC_MMR_DRAMSTS (ARRIA10_HMC_MMR_IO48_ADDR + 0xec) +#define ARRIA10_IO48_HMC_MMR_DBGDONE (ARRIA10_HMC_MMR_IO48_ADDR + 0xf0) +#define ARRIA10_IO48_HMC_MMR_DBGSIGNALS (ARRIA10_HMC_MMR_IO48_ADDR + 0xf4) +#define ARRIA10_IO48_HMC_MMR_DBGRESET (ARRIA10_HMC_MMR_IO48_ADDR + 0xf8) +#define ARRIA10_IO48_HMC_MMR_DBGMATCH (ARRIA10_HMC_MMR_IO48_ADDR + 0xfc) +#define ARRIA10_IO48_HMC_MMR_COUNTER0MASK (ARRIA10_HMC_MMR_IO48_ADDR + 0x100) +#define ARRIA10_IO48_HMC_MMR_COUNTER1MASK (ARRIA10_HMC_MMR_IO48_ADDR + 0x104) +#define ARRIA10_IO48_HMC_MMR_COUNTER0MATCH (ARRIA10_HMC_MMR_IO48_ADDR + 0x108) +#define ARRIA10_IO48_HMC_MMR_COUNTER1MATCH (ARRIA10_HMC_MMR_IO48_ADDR + 0x10c) +#define ARRIA10_IO48_HMC_MMR_NIOSRESERVE0 (ARRIA10_HMC_MMR_IO48_ADDR + 0x110) +#define ARRIA10_IO48_HMC_MMR_NIOSRESERVE1 (ARRIA10_HMC_MMR_IO48_ADDR + 0x114) +#define ARRIA10_IO48_HMC_MMR_NIOSRESERVE2 (ARRIA10_HMC_MMR_IO48_ADDR + 0x118) + +union dramaddrw_reg { + struct { + u32 cfg_col_addr_width:5; + u32 cfg_row_addr_width:5; + u32 cfg_bank_addr_width:4; + u32 cfg_bank_group_addr_width:2; + u32 cfg_cs_addr_width:3; + u32 reserved:13; + }; + u32 word; +}; + +union ctrlcfg0_reg { + struct { + u32 cfg_mem_type:4; + u32 cfg_dimm_type:3; + u32 cfg_ac_pos:2; + u32 cfg_ctrl_burst_len:5; + u32 reserved:18; /* Other fields unused */ + }; + u32 word; +}; + +union ctrlcfg1_reg { + struct { + u32 cfg_dbc3_burst_len:5; + u32 cfg_addr_order:2; + u32 cfg_ctrl_enable_ecc:1; + u32 reserved:24; /* Other fields unused */ + }; + u32 word; +}; + +union caltiming0_reg { + struct { + u32 cfg_act_to_rdwr:6; + u32 cfg_act_to_pch:6; + u32 cfg_act_to_act:6; + u32 cfg_act_to_act_db:6; + u32 reserved:8; /* Other fields unused */ + }; + u32 word; +}; + +union caltiming1_reg { + struct { + u32 cfg_rd_to_rd:6; + u32 cfg_rd_to_rd_dc:6; + u32 cfg_rd_to_rd_db:6; + u32 cfg_rd_to_wr:6; + u32 cfg_rd_to_wr_dc:6; + u32 reserved:2; + }; + u32 word; +}; + +union caltiming2_reg { + struct { + u32 cfg_rd_to_wr_db:6; + u32 cfg_rd_to_pch:6; + u32 cfg_rd_ap_to_valid:6; + u32 cfg_wr_to_wr:6; + u32 cfg_wr_to_wr_dc:6; + u32 reserved:2; + }; + u32 word; +}; + +union caltiming3_reg { + struct { + u32 cfg_wr_to_wr_db:6; + u32 cfg_wr_to_rd:6; + u32 cfg_wr_to_rd_dc:6; + u32 cfg_wr_to_rd_db:6; + u32 cfg_wr_to_pch:6; + u32 reserved:2; + }; + u32 word; +}; + +union caltiming4_reg { + struct { + u32 cfg_wr_ap_to_valid:6; + u32 cfg_pch_to_valid:6; + u32 cfg_pch_all_to_valid:6; + u32 cfg_arf_to_valid:8; + u32 cfg_pdn_to_valid:6; + }; + u32 word; +}; + +union caltiming9_reg { + struct { + u32 cfg_4_act_to_act:8; + u32 reserved:24; + }; + u32 word; +}; + +#define IRQ_ECC_SERR 34 +#define IRQ_ECC_DERR 32 + +#define ARRIA10_ECC_HMC_OCP_DDRIOCTRL_IO_SIZE 0x00000001 + +#define ARRIA10_ECC_HMC_OCP_INTSTAT_SERRPENA 0x00000001 +#define ARRIA10_ECC_HMC_OCP_INTSTAT_DERRPENA 0x00000002 +#define ARRIA10_ECC_HMC_OCP_ERRINTEN_SERRINTEN 0x00000001 +#define ARRIA10_ECC_HMC_OCP_ERRINTEN_DERRINTEN 0x00000002 +#define ARRIA10_ECC_HMC_OCP_INTMOD_INTONCMP 0x00010000 +#define ARRIA10_ECC_HMC_OCP_INTMOD_SERR 0x00000001 +#define ARRIA10_ECC_HMC_OCP_INTMOD_EXT_ADDRPARITY 0x00000100 +#define ARRIA10_ECC_HMC_OCP_ECCCTL_AWB_CNT_RST 0x00010000 +#define ARRIA10_ECC_HMC_OCP_ECCCTL_CNT_RST 0x00000100 +#define ARRIA10_ECC_HMC_OCP_ECCCTL_ECC_EN 0x00000000 +#define ARRIA10_ECC_HMC_OCP_ECCCTL2_RMW_EN 0x00000100 +#define ARRIA10_ECC_HMC_OCP_ECCCTL2_AWB_EN 0x00000001 +#define ARRIA10_ECC_HMC_OCP_ERRINTEN_SERR 0x00000001 +#define ARRIA10_ECC_HMC_OCP_ERRINTEN_DERR 0x00000002 +#define ARRIA10_ECC_HMC_OCP_ERRINTEN_HMI 0x00000004 +#define ARRIA10_ECC_HMC_OCP_INTSTAT_SERR 0x00000001 +#define ARRIA10_ECC_HMC_OCP_INTSTAT_DERR 0x00000002 +#define ARRIA10_ECC_HMC_OCP_INTSTAT_HMI 0x00000004 +#define ARRIA10_ECC_HMC_OCP_INTSTAT_ADDRMTCFLG 0x00010000 +#define ARRIA10_ECC_HMC_OCP_INTSTAT_ADDRPARFLG 0x00020000 +#define ARRIA10_ECC_HMC_OCP_INTSTAT_DERRBUSFLG 0x00040000 + +#define ARRIA10_ECC_HMC_OCP_SERRCNTREG_VALUE 8 + +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_ACTTOACT_LSB 22 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_RDTOMISS_LSB 0 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_WRTOMISS_LSB 0 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_BURSTLEN_LSB 2 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_RDTOWR_LSB 6 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_WRTORD_LSB 15 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRTIMING_BWRATIO_LSB 0 + +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRMOD_AUTOPRECHARGE_LSB 0 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DDRMOD_BWRATIOEXTENDED_LSB 0 + +#define ARRIA10_NOC_MPU_DDR_T_SCHED_ACTIVATE_RRD_LSB 4 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_ACTIVATE_FAW_LSB 13 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_ACTIVATE_FAWBANK_LSB 4 + +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSRDTORD_LSB 4 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSRDTOWR_LSB 6 +#define ARRIA10_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSWRTORD_LSB 6 + +#define ARRIA10_SDR_FW_MPU_FPGA_EN (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x00) +#define ARRIA10_SDR_FW_MPU_FPGA_EN_SET (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x04) +#define ARRIA10_SDR_FW_MPU_FPGA_EN_CLR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x08) +#define ARRIA10_SDR_FW_MPU_FPGA_MPUREGION0ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x10) +#define ARRIA10_SDR_FW_MPU_FPGA_MPUREGION1ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x14) +#define ARRIA10_SDR_FW_MPU_FPGA_MPUREGION2ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x18) +#define ARRIA10_SDR_FW_MPU_FPGA_MPUREGION3ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x1c) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM0REGION0ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x20) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM0REGION1ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x24) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM0REGION2ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x28) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM0REGION3ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x2c) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM1REGION0ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x30) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM1REGION1ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x34) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM1REGION2ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x38) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM1REGION3ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x3c) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM2REGION0ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x40) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM2REGION1ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x44) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM2REGION2ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x48) +#define ARRIA10_SDR_FW_MPU_FPGA_FPGA2SDRAM2REGION3ADDR (ARRIA10_SDR_FW_MPU_FPGA_ADDR + 0x4c) + +#define ARRIA10_NOC_FW_DDR_MPU_MPUREG0EN BIT(0) +#define ARRIA10_NOC_FW_DDR_MPU_MPUREG1EN BIT(1) +#define ARRIA10_NOC_FW_DDR_MPU_MPUREG2EN BIT(2) +#define ARRIA10_NOC_FW_DDR_MPU_MPUREG3EN BIT(3) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR0REG0EN BIT(4) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR0REG1EN BIT(5) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR0REG2EN BIT(6) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR0REG3EN BIT(7) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR1REG0EN BIT(8) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR1REG1EN BIT(9) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR1REG2EN BIT(10) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR1REG3EN BIT(11) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR2REG0EN BIT(12) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR2REG1EN BIT(13) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR2REG2EN BIT(14) +#define ARRIA10_NOC_FW_DDR_MPU_F2SDR2REG3EN BIT(15) + +#define ARRIA10_NOC_FW_DDR_L3_EN (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x00) +#define ARRIA10_NOC_FW_DDR_L3_EN_SET (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x04) +#define ARRIA10_NOC_FW_DDR_L3_EN_CLR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x08) +#define ARRIA10_NOC_FW_DDR_L3_HPSREGION0ADDR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x0c) +#define ARRIA10_NOC_FW_DDR_L3_HPSREGION1ADDR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x10) +#define ARRIA10_NOC_FW_DDR_L3_HPSREGION2ADDR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x14) +#define ARRIA10_NOC_FW_DDR_L3_HPSREGION3ADDR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x18) +#define ARRIA10_NOC_FW_DDR_L3_HPSREGION4ADDR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x1c) +#define ARRIA10_NOC_FW_DDR_L3_HPSREGION5ADDR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x20) +#define ARRIA10_NOC_FW_DDR_L3_HPSREGION6ADDR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x24) +#define ARRIA10_NOC_FW_DDR_L3_HPSREGION7ADDR (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x28) +#define ARRIA10_NOC_FW_DDR_L3_GLOBAL (ARRIA10_NOC_FW_DDR_L3_DDR_SCR_ADDR + 0x2c) + +#define ARRIA10_NOC_FW_DDR_L3_HPSREG0EN BIT(0) +#define ARRIA10_NOC_FW_DDR_L3_HPSREG1EN BIT(1) +#define ARRIA10_NOC_FW_DDR_L3_HPSREG2EN BIT(2) +#define ARRIA10_NOC_FW_DDR_L3_HPSREG3EN BIT(3) +#define ARRIA10_NOC_FW_DDR_L3_HPSREG4EN BIT(4) +#define ARRIA10_NOC_FW_DDR_L3_HPSREG5EN BIT(5) +#define ARRIA10_NOC_FW_DDR_L3_HPSREG6EN BIT(6) +#define ARRIA10_NOC_FW_DDR_L3_HPSREG7EN BIT(7) + +#define ARRIA10_IO48_DRAMTIME_MEM_READ_LATENCY 0x0000003f + +int arria10_ddr_calibration_sequence(void); + +#endif diff --git a/arch/arm/mach-socfpga/include/mach/arria10-system-manager.h b/arch/arm/mach-socfpga/include/mach/arria10-system-manager.h new file mode 100644 index 0000000000..f98cc36c76 --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/arria10-system-manager.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2014-2016 Altera Corporation + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _ARRIA10_SYSTEM_MANAGER_H_ +#define _ARRIA10_SYSTEM_MANAGER_H_ + +#include + +#define ARRIA10_SYSMGR_SILICONID1 (ARRIA10_SYSMGR_ADDR + 0x00) +#define ARRIA10_SYSMGR_SILICONID2 (ARRIA10_SYSMGR_ADDR + 0x04) +#define ARRIA10_SYSMGR_WDDBG (ARRIA10_SYSMGR_ADDR + 0x08) +#define ARRIA10_SYSMGR_BOOTINFO (ARRIA10_SYSMGR_ADDR + 0x0c) +#define ARRIA10_SYSMGR_MPU_CTRL_L2_ECC (ARRIA10_SYSMGR_ADDR + 0x10) +#define ARRIA10_SYSMGR_DMA (ARRIA10_SYSMGR_ADDR + 0x20) +#define ARRIA10_SYSMGR_DMA_PERIPH (ARRIA10_SYSMGR_ADDR + 0x24) +#define ARRIA10_SYSMGR_SDMMC (ARRIA10_SYSMGR_ADDR + 0x28) +#define ARRIA10_SYSMGR_SDMMC_L3MASTER (ARRIA10_SYSMGR_ADDR + 0x2c) +#define ARRIA10_SYSMGR_NAND_BOOTSTRAP (ARRIA10_SYSMGR_ADDR + 0x30) +#define ARRIA10_SYSMGR_NAND_L3MASTER (ARRIA10_SYSMGR_ADDR + 0x34) +#define ARRIA10_SYSMGR_USB0_L3MASTER (ARRIA10_SYSMGR_ADDR + 0x38) +#define ARRIA10_SYSMGR_USB1_L3MASTER (ARRIA10_SYSMGR_ADDR + 0x3c) +#define ARRIA10_SYSMGR_EMAC_GLOBAL (ARRIA10_SYSMGR_ADDR + 0x40) +#define ARRIA10_SYSMGR_EMAC0 (ARRIA10_SYSMGR_ADDR + 0x44) +#define ARRIA10_SYSMGR_EMAC1 (ARRIA10_SYSMGR_ADDR + 0x48) +#define ARRIA10_SYSMGR_EMAC2 (ARRIA10_SYSMGR_ADDR + 0x4c) +#define ARRIA10_SYSMGR_FPGAINTF_GLOBAL (ARRIA10_SYSMGR_ADDR + 0x60) +#define ARRIA10_SYSMGR_FPGAINTF_EN_0 (ARRIA10_SYSMGR_ADDR + 0x64) +#define ARRIA10_SYSMGR_FPGAINTF_EN_1 (ARRIA10_SYSMGR_ADDR + 0x68) +#define ARRIA10_SYSMGR_FPGAINTF_EN_2 (ARRIA10_SYSMGR_ADDR + 0x6c) +#define ARRIA10_SYSMGR_FPGAINTF_EN_3 (ARRIA10_SYSMGR_ADDR + 0x70) +#define ARRIA10_SYSMGR_NOC_ADDR_REMAP_VALUE (ARRIA10_SYSMGR_ADDR + 0x80) +#define ARRIA10_SYSMGR_NOC_ADDR_REMAP_SET (ARRIA10_SYSMGR_ADDR + 0x84) +#define ARRIA10_SYSMGR_NOC_ADDR_REMAP_CLEAR (ARRIA10_SYSMGR_ADDR + 0x88) +#define ARRIA10_SYSMGR_ECC_INTMASK_VALUE (ARRIA10_SYSMGR_ADDR + 0x90) +#define ARRIA10_SYSMGR_ECC_INTMASK_SET (ARRIA10_SYSMGR_ADDR + 0x94) +#define ARRIA10_SYSMGR_ECC_INTMASK_CLR (ARRIA10_SYSMGR_ADDR + 0x98) +#define ARRIA10_SYSMGR_ECC_INTSTATUS_SERR (ARRIA10_SYSMGR_ADDR + 0x9c) +#define ARRIA10_SYSMGR_ECC_INTSTATUS_DERR (ARRIA10_SYSMGR_ADDR + 0xa0) +#define ARRIA10_SYSMGR_MPU_STATUS_L2_ECC (ARRIA10_SYSMGR_ADDR + 0xa4) +#define ARRIA10_SYSMGR_MPU_CLEAR_L2_ECC (ARRIA10_SYSMGR_ADDR + 0xa8) +#define ARRIA10_SYSMGR_MPU_STATUS_L1_PARITY (ARRIA10_SYSMGR_ADDR + 0xac) +#define ARRIA10_SYSMGR_MPU_CLEAR_L1_PARITY (ARRIA10_SYSMGR_ADDR + 0xb0) +#define ARRIA10_SYSMGR_MPU_SET_L1_PARITY (ARRIA10_SYSMGR_ADDR + 0xb4) +#define ARRIA10_SYSMGR_NOC_TIMEOUT (ARRIA10_SYSMGR_ADDR + 0xc0) +#define ARRIA10_SYSMGR_NOC_IDLEREQ_SET (ARRIA10_SYSMGR_ADDR + 0xc4) +#define ARRIA10_SYSMGR_NOC_IDLEREQ_CLR (ARRIA10_SYSMGR_ADDR + 0xc8) +#define ARRIA10_SYSMGR_NOC_IDLEREQ_VALUE (ARRIA10_SYSMGR_ADDR + 0xcc) +#define ARRIA10_SYSMGR_NOC_IDLEACK (ARRIA10_SYSMGR_ADDR + 0xd0) +#define ARRIA10_SYSMGR_NOC_IDLESTATUS (ARRIA10_SYSMGR_ADDR + 0xd4) +#define ARRIA10_SYSMGR_FPGA2SOC_CTRL (ARRIA10_SYSMGR_ADDR + 0xd8) + +/* pin mux */ +#define ARRIA10_SYSMGR_PINMUXGRP (ARRIA10_SYSMGR_ADDR + 0x400) +#define ARRIA10_SYSMGR_PINMUXGRP_NANDUSEFPGA (ARRIA10_SYSMGR_PINMUXGRP + 0x2F0) +#define ARRIA10_SYSMGR_PINMUXGRP_EMAC1USEFPGA (ARRIA10_SYSMGR_PINMUXGRP + 0x2F8) +#define ARRIA10_SYSMGR_PINMUXGRP_SDMMCUSEFPGA (ARRIA10_SYSMGR_PINMUXGRP + 0x308) +#define ARRIA10_SYSMGR_PINMUXGRP_EMAC0USEFPGA (ARRIA10_SYSMGR_PINMUXGRP + 0x314) +#define ARRIA10_SYSMGR_PINMUXGRP_SPIM1USEFPGA (ARRIA10_SYSMGR_PINMUXGRP + 0x330) +#define ARRIA10_SYSMGR_PINMUXGRP_SPIM0USEFPGA (ARRIA10_SYSMGR_PINMUXGRP + 0x338) + +/* bit fields */ +#define ARRIA10_SYSMGR_ROMCODEGRP_CTRL_WARMRSTCFGPINMUX BIT(0) +#define ARRIA10_SYSMGR_ROMCODEGRP_CTRL_WARMRSTCFGIO BIT(1) +#define ARRIA10_SYSMGR_ECC_OCRAM_EN BIT(0) +#define ARRIA10_SYSMGR_ECC_OCRAM_SERR BIT(3) +#define ARRIA10_SYSMGR_ECC_OCRAM_DERR BIT(4) +#define ARRIA10_SYSMGR_FPGAINTF_USEFPGA BIT(1) +#define ARRIA10_SYSMGR_FPGAINTF_SPIM0 BIT(0) +#define ARRIA10_SYSMGR_FPGAINTF_SPIM1 BIT(1) +#define ARRIA10_SYSMGR_FPGAINTF_EMAC0 BIT(2) +#define ARRIA10_SYSMGR_FPGAINTF_EMAC1 BIT(3) +#define ARRIA10_SYSMGR_FPGAINTF_NAND BIT(4) +#define ARRIA10_SYSMGR_FPGAINTF_SDMMC BIT(5) + +#define ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 +#define ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 +#define ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 +#define ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_LSB 0 +#define ARRIA10_SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 + +#define ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC0 BIT(0) +#define ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC0_SW BIT(4) +#define ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC1 BIT(8) +#define ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC1_SW BIT(12) +#define ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC2 BIT(16) +#define ARRIA10_SYSMGR_FPGAINTF_EN3_EMAC2_SW BIT(20) + +#define ARRIA10_SYSMGR_SDMMC_SMPLSEL(smplsel) (((smplsel) & 0x7) << 4) +#define ARRIA10_SYSMGR_SDMMC_DRVSEL(drvsel) ((drvsel) & 0x7) + +#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \ + ((drvsel << 0) & 0x7) | ((smplsel << 4) & 0x70) + +#endif diff --git a/arch/arm/mach-socfpga/include/mach/barebox-arm-head.h b/arch/arm/mach-socfpga/include/mach/barebox-arm-head.h new file mode 100644 index 0000000000..28fb1c92fc --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/barebox-arm-head.h @@ -0,0 +1,42 @@ +static inline void __barebox_arm_head(void) +{ + __asm__ __volatile__ ( +#ifdef CONFIG_THUMB2_BAREBOX + ".arm\n" + "adr r9, 1f + 1\n" + "bx r9\n" + ".thumb\n" + "1:\n" + "bl 2f\n" + ".rept 10\n" + "1: b 1b\n" + ".endr\n" +#else + "b 2f\n" + "1: b 1b\n" + "1: b 1b\n" + "1: b 1b\n" + "1: b 1b\n" + "1: b 1b\n" + "1: b 1b\n" + "1: b 1b\n" +#endif + ".asciz \"barebox\"\n" + ".word _text\n" /* text base. If copied there, + * barebox can skip relocation + */ + ".word _barebox_image_size\n" /* image size to copy */ + + ".rept 10\n" + ".word 0x55555555\n" + ".endr\n" + "2:\n" + ); +} +static inline void barebox_arm_head(void) +{ + __barebox_arm_head(); + __asm__ __volatile__ ( + "b barebox_arm_reset_vector\n" + ); +} diff --git a/arch/arm/mach-socfpga/include/mach/debug_ll.h b/arch/arm/mach-socfpga/include/mach/debug_ll.h index 4e906ea66e..f41258c504 100644 --- a/arch/arm/mach-socfpga/include/mach/debug_ll.h +++ b/arch/arm/mach-socfpga/include/mach/debug_ll.h @@ -53,6 +53,17 @@ static inline void INIT_LL(void) writel(FCRVAL, UART_BASE + FCR); } +#ifdef CONFIG_ARCH_SOCFPGA_ARRIA10 +static inline void PUTC_LL(char c) +{ + /* Wait until there is space in the FIFO */ + while ((readl(UART_BASE + LSR) & LSR_THRE) == 0); + /* Send the character */ + writel(c, UART_BASE + THR); + /* Wait to make sure it hits the line, in case we die too soon. */ + while ((readl(UART_BASE + LSR) & LSR_THRE) == 0); +} +#else static inline void PUTC_LL(char c) { /* Wait until there is space in the FIFO */ @@ -62,6 +73,7 @@ static inline void PUTC_LL(char c) /* Wait to make sure it hits the line, in case we die too soon. */ while ((readb(UART_BASE + LSR) & LSR_THRE) == 0); } +#endif #else static inline unsigned int ns16550_calc_divisor(unsigned int clk, diff --git a/arch/arm/mach-socfpga/include/mach/generic.h b/arch/arm/mach-socfpga/include/mach/generic.h index 2a7e0ea499..9d6dd1f26c 100644 --- a/arch/arm/mach-socfpga/include/mach/generic.h +++ b/arch/arm/mach-socfpga/include/mach/generic.h @@ -1,13 +1,49 @@ #ifndef __MACH_SOCFPGA_GENERIC_H #define __MACH_SOCFPGA_GENERIC_H +#include + struct socfpga_cm_config; struct socfpga_io_config; +struct arria10_mainpll_cfg; +struct arria10_perpll_cfg; +struct arria10_pinmux_cfg; + +void arria10_init(struct arria10_mainpll_cfg *mainpll, + struct arria10_perpll_cfg *perpll, uint32_t *pinmux); + void socfpga_lowlevel_init(struct socfpga_cm_config *cm_config, struct socfpga_io_config *io_config); +#if defined(CONFIG_ARCH_SOCFPGA_CYCLONE5) +void socfpga_cyclone5_mmc_init(void); +void socfpga_cyclone5_uart_init(void); +void socfpga_cyclone5_timer_init(void); +void socfpga_cyclone5_qspi_init(void); +#else +static inline void socfpga_cyclone5_mmc_init(void) +{ + return; +} + +static inline void socfpga_cyclone5_uart_init(void) +{ + return; +} + +static inline void socfpga_cyclone5_timer_init(void) +{ + return; +} + +static inline void socfpga_cyclone5_qspi_init(void) +{ + return; +} +#endif + static inline void __udelay(unsigned us) { volatile unsigned int i; diff --git a/arch/arm/mach-socfpga/xload.c b/arch/arm/mach-socfpga/xload.c index 5d47bb9d3e..ee7d194427 100644 --- a/arch/arm/mach-socfpga/xload.c +++ b/arch/arm/mach-socfpga/xload.c @@ -1,8 +1,5 @@ -#include -#include #include #include -#include #include #include #include @@ -10,7 +7,6 @@ #include #include #include -#include #include #include @@ -30,84 +26,6 @@ static struct socfpga_barebox_part default_parts[] = { }; const struct socfpga_barebox_part *barebox_parts = default_parts; -enum socfpga_clks { - timer, mmc, qspi_clk, uart, clk_max -}; - -static struct clk *clks[clk_max]; - -static struct dw_mmc_platform_data mmc_pdata = { - .bus_width_caps = MMC_CAP_4_BIT_DATA, - .ciu_div = 3, -}; - -static void socfpga_mmc_init(void) -{ - clks[mmc] = clk_fixed("mmc", 400000000); - clkdev_add_physbase(clks[mmc], CYCLONE5_SDMMC_ADDRESS, NULL); - add_generic_device("dw_mmc", 0, NULL, CYCLONE5_SDMMC_ADDRESS, SZ_4K, - IORESOURCE_MEM, &mmc_pdata); -} - -#if defined(CONFIG_SPI_CADENCE_QUADSPI) -static struct cadence_qspi_platform_data qspi_pdata = { - .ext_decoder = 0, - .fifo_depth = 128, -}; - -static __maybe_unused void add_cadence_qspi_device(int id, resource_size_t ctrl, - resource_size_t data, void *pdata) -{ - struct resource *res; - - res = xzalloc(sizeof(struct resource) * 2); - res[0].start = ctrl; - res[0].end = ctrl + 0x100 - 1; - res[0].flags = IORESOURCE_MEM; - res[1].start = data; - res[1].end = data + 0x100 - 1; - res[1].flags = IORESOURCE_MEM; - - add_generic_device_res("cadence_qspi", id, res, 2, pdata); -} - -static __maybe_unused void socfpga_qspi_init(void) -{ - clks[qspi_clk] = clk_fixed("qspi_clk", 370000000); - clkdev_add_physbase(clks[qspi_clk], CYCLONE5_QSPI_CTRL_ADDRESS, NULL); - clkdev_add_physbase(clks[qspi_clk], CYCLONE5_QSPI_DATA_ADDRESS, NULL); - add_cadence_qspi_device(0, CYCLONE5_QSPI_CTRL_ADDRESS, - CYCLONE5_QSPI_DATA_ADDRESS, &qspi_pdata); -} -#else -static void socfpga_qspi_init(void) -{ - return; -} -#endif - -static struct NS16550_plat uart_pdata = { - .clock = 100000000, - .shift = 2, -}; - -static void socfpga_uart_init(void) -{ - clks[uart] = clk_fixed("uart", 100000000); - clkdev_add_physbase(clks[uart], CYCLONE5_UART0_ADDRESS, NULL); - clkdev_add_physbase(clks[uart], CYCLONE5_UART1_ADDRESS, NULL); - add_ns16550_device(0, 0xffc02000, 1024, IORESOURCE_MEM | - IORESOURCE_MEM_8BIT, &uart_pdata); -} - -static void socfpga_timer_init(void) -{ - clks[timer] = clk_fixed("timer", 200000000); - clkdev_add_physbase(clks[timer], CYCLONE5_SMP_TWD_ADDRESS, NULL); - add_generic_device("smp_twd", 0, NULL, CYCLONE5_SMP_TWD_ADDRESS, 0x100, - IORESOURCE_MEM, NULL); -} - static __noreturn int socfpga_xload(void) { enum bootsource bootsource = bootsource_get(); @@ -116,7 +34,7 @@ static __noreturn int socfpga_xload(void) switch (bootsource) { case BOOTSOURCE_MMC: - socfpga_mmc_init(); + socfpga_cyclone5_mmc_init(); for (part = barebox_parts; part->mmc_disk; part++) { buf = bootstrap_read_disk(barebox_parts->mmc_disk, "fat"); @@ -132,8 +50,7 @@ static __noreturn int socfpga_xload(void) } break; case BOOTSOURCE_SPI: - socfpga_qspi_init(); - + socfpga_cyclone5_qspi_init(); for (part = barebox_parts; part->nor_size; part++) { buf = bootstrap_read_devfs("mtd0", false, part->nor_offset, part->nor_size, SZ_1M); @@ -142,7 +59,6 @@ static __noreturn int socfpga_xload(void) part->nor_offset); continue; } - break; } @@ -167,8 +83,8 @@ static __noreturn int socfpga_xload(void) static int socfpga_devices_init(void) { barebox_set_model("SoCFPGA"); - socfpga_timer_init(); - socfpga_uart_init(); + socfpga_cyclone5_timer_init(); + socfpga_cyclone5_uart_init(); barebox_main = socfpga_xload; diff --git a/common/Kconfig b/common/Kconfig index 4c7a2d2679..459f0b18fd 100644 --- a/common/Kconfig +++ b/common/Kconfig @@ -1129,6 +1129,13 @@ config DEBUG_SOCFPGA_UART0 Say Y here if you want kernel low-level debugging support on SOCFPGA(Cyclone 5 and Arria 5) based platforms. +config DEBUG_SOCFPGA_UART1 + bool "Use SOCFPGA UART1 for low-level debug" + depends on ARCH_SOCFPGA + help + Say Y here if you want kernel low-level debugging support + on SOCFPGA(Arria 10) based platforms. + endchoice @@ -1175,11 +1182,13 @@ config DEBUG_ROCKCHIP_UART_PORT config DEBUG_SOCFPGA_UART_PHYS_ADDR hex "Physical base address of debug UART" if DEBUG_LL default 0xffc02000 if DEBUG_SOCFPGA_UART0 + default 0xffc02100 if DEBUG_SOCFPGA_UART1 depends on ARCH_SOCFPGA config DEBUG_SOCFPGA_UART_CLOCK int "SoCFPGA UART debug clock" if DEBUG_LL - default 100000000 + default 100000000 if ARCH_SOCFPGA_CYCLONE5 + default 50000000 if ARCH_SOCFPGA_ARRIA10 depends on ARCH_SOCFPGA help Choose UART root clock. diff --git a/images/Makefile.socfpga b/images/Makefile.socfpga index 21804d93df..a764b1a5fe 100644 --- a/images/Makefile.socfpga +++ b/images/Makefile.socfpga @@ -4,8 +4,11 @@ # %.socfpgaimg - convert into socfpga image # ---------------------------------------------------------------- +SOCFPGA_IMAGE_ARGS-$(CONFIG_ARCH_SOCFPGA_ARRIA10) += -v1 +SOCFPGA_IMAGE_ARGS-$(CONFIG_ARCH_SOCFPGA_CYCLONE5) += -v0 + quiet_cmd_socfpga_image = SOCFPGA-IMG $@ - cmd_socfpga_image = scripts/socfpga_mkimage -o $@ $< + cmd_socfpga_image = scripts/socfpga_mkimage -o $@ $(SOCFPGA_IMAGE_ARGS-y) $< $(obj)/%.socfpgaimg: $(obj)/% FORCE $(call if_changed,socfpga_image) diff --git a/scripts/socfpga_xml_to_config.sh b/scripts/socfpga_xml_to_config.sh new file mode 100755 index 0000000000..7e22ebb9e8 --- /dev/null +++ b/scripts/socfpga_xml_to_config.sh @@ -0,0 +1,117 @@ +#!/bin/bash + +## TODO: +## - read in mpuclk and nocclk, must be calculated by hand at the moment +## - read in cfg_dedicated_io_*, must be calculated by hand at the moment + +if [ "$#" -lt "2" ] +then + echo "USAGE: $0 " + exit 1 +fi + +dir=$1 +xml=$2 + +pll_config() { + local src + local tgt + src=$1 + tgt=$2 + + MAINPLL=`grep mainpll "$src" | \ + sed -e 's/^.*mainpllgrp\.//g' | \ + sed -e 's/\./_/g' | \ + sed -e "s/' value/ /g" | \ + sed -e "s/'/ /g" | \ + sed -e "s# />#,#g" | \ + sed -e "s/^/\t./g" | + sort` + + # FIXME: Find solution + MAINPLL_FIXME=".mpuclk = FIXME, + .nocclk = FIXME," + + PERPLL=`grep perpll "$src" | \ + sed -e 's/^.*perpllgrp\.//g' | \ + sed -e 's/\./_/g' | \ + sed -e "s/' value/ /g" | \ + sed -e "s/'/ /g" | \ + sed -e "s# />#,#g" | \ + sed -e "s/^/\t./g" | + sort` + + echo "#include " > $tgt + echo >> $tgt + echo "static struct arria10_mainpll_cfg mainpll_cfg = {" >> $tgt + echo "$MAINPLL" >> $tgt + echo "$MAINPLL_FIXME" >> $tgt + echo "};" >> $tgt + echo >> $tgt + echo "static struct arria10_perpll_cfg perpll_cfg = {" >> $tgt + echo "$PERPLL" >> $tgt + echo "};" >> $tgt + + dos2unix $tgt +} + +pinmux_config() { + local src + local tgt + src=$1 + tgt=$2 + + SHARED=`grep pinmux_shared "$src" | \ + sed -e 's/^.*pinmux_/[arria10_pinmux_/g' | \ + sed -e "s/\.sel' value='/] = /g" | \ + sed -e "s/' \/>/,/g"` + + DEDICATED=`grep pinmux_dedicated "$src" | \ + sed -e 's/^.*pinmux_/[arria10_pinmux_/g' | \ + sed -e "s/\.sel' value='/] = /g" | \ + sed -e "s/' \/>/,/g"` + + # FIXME: Either find solution how to parse these values too or replace + # script with something that goes more in the direction of a programming + # language + DEDICATED_FIXME="[arria10_pincfg_dedicated_io_bank] = FIXME, + [arria10_pincfg_dedicated_io_1] = FIXME, + [arria10_pincfg_dedicated_io_2] = FIXME, + [arria10_pincfg_dedicated_io_3] = FIXME, + [arria10_pincfg_dedicated_io_4] = FIXME, + [arria10_pincfg_dedicated_io_5] = FIXME, + [arria10_pincfg_dedicated_io_6] = FIXME, + [arria10_pincfg_dedicated_io_7] = FIXME, + [arria10_pincfg_dedicated_io_8] = FIXME, + [arria10_pincfg_dedicated_io_9] = FIXME, + [arria10_pincfg_dedicated_io_10] = FIXME, + [arria10_pincfg_dedicated_io_11] = FIXME, + [arria10_pincfg_dedicated_io_12] = FIXME, + [arria10_pincfg_dedicated_io_13] = FIXME, + [arria10_pincfg_dedicated_io_14] = FIXME, + [arria10_pincfg_dedicated_io_15] = FIXME, + [arria10_pincfg_dedicated_io_16] = FIXME, + [arria10_pincfg_dedicated_io_17] = FIXME" + + FPGA=`grep _fpga_interface_grp "$src" | \ + grep -v -e usb -e pll_clock_out | \ + sed -e 's/^.*pinmux_/[arria10_pinmux_/g' | \ + sed -e "s/\.sel' value='/] = /g" | \ + sed -e "s/' \/>/,/g"` + + echo "#include " > $tgt + echo >> $tgt + echo "static uint32_t pinmux[] = {" >> $tgt + echo "$SHARED" >> $tgt + echo "$DEDICATED" >> $tgt + echo "$DEDICATED_FIXME" >> $tgt + echo "$FPGA" >> $tgt + echo "};" >> $tgt + echo >> $tgt + + dos2unix $tgt +} + +pll_config $xml $dir/pll-config-arria10.c + +pinmux_config $xml $dir/pinmux-config-arria10.c -- cgit v1.2.3 From 2f7ca3ab16be2035e54bc905bc6052d98c61094a Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Fri, 28 Apr 2017 16:41:42 +0200 Subject: clk: socfpga: add arria10 clk drivers Arria10 has a (slightly) different clock controller than the Cyclone5. Add new drivers for it. This driver only reads out the setup and builds the clocktree, it does not setup any clocks. Signed-off-by: Steffen Trumtrar Signed-off-by: Sascha Hauer --- drivers/clk/socfpga/Makefile | 2 + drivers/clk/socfpga/clk-gate-a10.c | 197 +++++++++++++++++++++++++++++++++++ drivers/clk/socfpga/clk-periph-a10.c | 130 +++++++++++++++++++++++ drivers/clk/socfpga/clk-pll-a10.c | 143 +++++++++++++++++++++++++ drivers/clk/socfpga/clk.c | 11 +- drivers/clk/socfpga/clk.h | 90 ++++++++++++++++ 6 files changed, 572 insertions(+), 1 deletion(-) create mode 100644 drivers/clk/socfpga/clk-gate-a10.c create mode 100644 drivers/clk/socfpga/clk-periph-a10.c create mode 100644 drivers/clk/socfpga/clk-pll-a10.c create mode 100644 drivers/clk/socfpga/clk.h diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile index fc216adb74..341e6433e5 100644 --- a/drivers/clk/socfpga/Makefile +++ b/drivers/clk/socfpga/Makefile @@ -1 +1,3 @@ obj-y += clk.o + +obj-$(CONFIG_ARCH_SOCFPGA_ARRIA10) += clk-gate-a10.o clk-periph-a10.o clk-pll-a10.o diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c new file mode 100644 index 0000000000..07f6026c2e --- /dev/null +++ b/drivers/clk/socfpga/clk-gate-a10.c @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2015 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk.h" + +#define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, clk) + +/* SDMMC Group for System Manager defines */ +#define SYSMGR_SDMMCGRP_CTRL_OFFSET 0x28 + +static unsigned long socfpga_gate_clk_recalc_rate(struct clk *clk, + unsigned long parent_rate) +{ + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(clk); + u32 div = 1, val; + + if (socfpgaclk->fixed_div) + div = socfpgaclk->fixed_div; + else if (socfpgaclk->div_reg) { + val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; + val &= GENMASK(socfpgaclk->width - 1, 0); + div = (1 << val); + } + + return parent_rate / div; +} + +static int socfpga_clk_prepare(struct clk *clk) +{ + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(clk); + int i; + u32 hs_timing; + u32 clk_phase[2]; + + if (socfpgaclk->clk_phase[0] || socfpgaclk->clk_phase[1]) { + for (i = 0; i < ARRAY_SIZE(clk_phase); i++) { + switch (socfpgaclk->clk_phase[i]) { + case 0: + clk_phase[i] = 0; + break; + case 45: + clk_phase[i] = 1; + break; + case 90: + clk_phase[i] = 2; + break; + case 135: + clk_phase[i] = 3; + break; + case 180: + clk_phase[i] = 4; + break; + case 225: + clk_phase[i] = 5; + break; + case 270: + clk_phase[i] = 6; + break; + case 315: + clk_phase[i] = 7; + break; + default: + clk_phase[i] = 0; + break; + } + } + + hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1]); + writel(hs_timing, ARRIA10_SYSMGR_SDMMC); + } + return 0; +} + +static int clk_socfpga_enable(struct clk *clk) +{ + struct socfpga_gate_clk *socfpga_clk = to_socfpga_gate_clk(clk); + u32 val; + + socfpga_clk_prepare(clk); + + val = readl(socfpga_clk->reg); + val |= 1 << socfpga_clk->bit_idx; + writel(val, socfpga_clk->reg); + + return 0; +} + +static void clk_socfpga_disable(struct clk *clk) +{ + struct socfpga_gate_clk *socfpga_clk = to_socfpga_gate_clk(clk); + u32 val; + + val = readl(socfpga_clk->reg); + val &= ~(1 << socfpga_clk->shift); + writel(val, socfpga_clk->reg); +} + +static struct clk_ops gateclk_ops = { + .recalc_rate = socfpga_gate_clk_recalc_rate, +}; + +static struct clk *__socfpga_gate_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 clk_gate[2]; + u32 div_reg[3]; + u32 clk_phase[2]; + u32 fixed_div; + struct socfpga_gate_clk *socfpga_clk; + const char *clk_name = node->name; + int rc; + int i; + + socfpga_clk = xzalloc(sizeof(*socfpga_clk)); + + rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); + if (rc) + clk_gate[0] = 0; + + if (clk_gate[0]) { + socfpga_clk->reg = clk_mgr_base_addr + clk_gate[0]; + socfpga_clk->bit_idx = clk_gate[1]; + + gateclk_ops.enable = clk_socfpga_enable; + gateclk_ops.disable = clk_socfpga_disable; + } + + rc = of_property_read_u32(node, "fixed-divider", &fixed_div); + if (rc) + socfpga_clk->fixed_div = 0; + else + socfpga_clk->fixed_div = fixed_div; + + rc = of_property_read_u32_array(node, "div-reg", div_reg, 3); + if (!rc) { + socfpga_clk->div_reg = clk_mgr_base_addr + div_reg[0]; + socfpga_clk->shift = div_reg[1]; + socfpga_clk->width = div_reg[2]; + } else { + socfpga_clk->div_reg = NULL; + } + + rc = of_property_read_u32_array(node, "clk-phase", clk_phase, 2); + if (!rc) { + socfpga_clk->clk_phase[0] = clk_phase[0]; + socfpga_clk->clk_phase[1] = clk_phase[1]; + } + + of_property_read_string(node, "clock-output-names", &clk_name); + + socfpga_clk->clk.name = xstrdup(clk_name); + socfpga_clk->clk.ops = ops; + + for (i = 0; i < SOCFPGA_MAX_PARENTS; i++) { + socfpga_clk->parent_names[i] = of_clk_get_parent_name(node, i); + if (!socfpga_clk->parent_names[i]) + break; + } + + socfpga_clk->clk.num_parents = i; + socfpga_clk->clk.parent_names = socfpga_clk->parent_names; + + rc = clk_register(&socfpga_clk->clk); + if (rc) { + free(socfpga_clk); + return ERR_PTR(rc); + } + + return &socfpga_clk->clk; +} + +struct clk *socfpga_a10_gate_init(struct device_node *node) +{ + return __socfpga_gate_init(node, &gateclk_ops); +} diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c new file mode 100644 index 0000000000..9dd7fc9c25 --- /dev/null +++ b/drivers/clk/socfpga/clk-periph-a10.c @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2015 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#include "clk.h" + +#define CLK_MGR_FREE_SHIFT 16 +#define CLK_MGR_FREE_MASK 0x7 + +#define SOCFPGA_MPU_FREE_CLK "mpu_free_clk" +#define SOCFPGA_NOC_FREE_CLK "noc_free_clk" +#define SOCFPGA_SDMMC_FREE_CLK "sdmmc_free_clk" +#define to_socfpga_periph_clk(p) container_of(p, struct socfpga_periph_clk, clk) + +static unsigned long clk_periclk_recalc_rate(struct clk *clk, + unsigned long parent_rate) +{ + struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(clk); + u32 div; + + if (socfpgaclk->fixed_div) { + div = socfpgaclk->fixed_div; + } else if (socfpgaclk->div_reg) { + div = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; + div &= GENMASK(socfpgaclk->width - 1, 0); + div += 1; + } else { + div = ((readl(socfpgaclk->reg) & 0x7ff) + 1); + } + + return parent_rate / div; +} + +static int clk_periclk_get_parent(struct clk *clk) +{ + struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(clk); + u32 clk_src; + + clk_src = readl(socfpgaclk->reg); + if (streq(clk->name, SOCFPGA_MPU_FREE_CLK) || + streq(clk->name, SOCFPGA_NOC_FREE_CLK) || + streq(clk->name, SOCFPGA_SDMMC_FREE_CLK)) + return (clk_src >> CLK_MGR_FREE_SHIFT) & + CLK_MGR_FREE_MASK; + else + return 0; +} + +static const struct clk_ops periclk_ops = { + .recalc_rate = clk_periclk_recalc_rate, + .get_parent = clk_periclk_get_parent, +}; + +static struct clk *__socfpga_periph_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 reg; + struct socfpga_periph_clk *periph_clk; + const char *clk_name = node->name; + int rc; + u32 fixed_div; + u32 div_reg[3]; + int i; + + of_property_read_u32(node, "reg", ®); + + periph_clk = xzalloc(sizeof(*periph_clk)); + + periph_clk->reg = clk_mgr_base_addr + reg; + + rc = of_property_read_u32_array(node, "div-reg", div_reg, 3); + if (!rc) { + periph_clk->div_reg = clk_mgr_base_addr + div_reg[0]; + periph_clk->shift = div_reg[1]; + periph_clk->width = div_reg[2]; + } else { + periph_clk->div_reg = NULL; + } + + rc = of_property_read_u32(node, "fixed-divider", &fixed_div); + if (rc) + periph_clk->fixed_div = 0; + else + periph_clk->fixed_div = fixed_div; + + of_property_read_string(node, "clock-output-names", &clk_name); + + for (i = 0; i < SOCFPGA_MAX_PARENTS; i++) { + periph_clk->parent_names[i] = of_clk_get_parent_name(node, i); + if (!periph_clk->parent_names[i]) + break; + } + + periph_clk->clk.num_parents = i; + periph_clk->clk.parent_names = periph_clk->parent_names; + + periph_clk->clk.name = xstrdup(clk_name); + periph_clk->clk.ops = ops; + + rc = clk_register(&periph_clk->clk); + if (rc) { + free(periph_clk); + return ERR_PTR(rc); + } + + return &periph_clk->clk; +} + +struct clk *socfpga_a10_periph_init(struct device_node *node) +{ + return __socfpga_periph_init(node, &periclk_ops); +} diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c new file mode 100644 index 0000000000..4dae3e537b --- /dev/null +++ b/drivers/clk/socfpga/clk-pll-a10.c @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2015 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include +#include +#include +#include +#include +#include + +#include "clk.h" + +/* Clock Manager offsets */ +#define CLK_MGR_PLL_CLK_SRC_SHIFT 8 +#define CLK_MGR_PLL_CLK_SRC_MASK 0x3 + +/* Clock bypass bits */ +#define SOCFPGA_PLL_BG_PWRDWN 0 +#define SOCFPGA_PLL_PWR_DOWN 1 +#define SOCFPGA_PLL_EXT_ENA 2 +#define SOCFPGA_PLL_DIVF_MASK 0x00001FFF +#define SOCFPGA_PLL_DIVF_SHIFT 0 +#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000 +#define SOCFPGA_PLL_DIVQ_SHIFT 16 +#define SOCFGPA_MAX_PARENTS 5 + +#define SOCFPGA_MAIN_PLL_CLK "main_pll" +#define SOCFPGA_PERIP_PLL_CLK "periph_pll" + +#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, clk) + +static unsigned long clk_pll_recalc_rate(struct clk *clk, + unsigned long parent_rate) +{ + struct socfpga_pll *socfpgaclk = to_socfpga_clk(clk); + unsigned long divf, divq, reg; + unsigned long long vco_freq; + + /* read VCO1 reg for numerator and denominator */ + reg = readl(socfpgaclk->reg + 0x4); + divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT; + divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT; + vco_freq = (unsigned long long)parent_rate * (divf + 1); + do_div(vco_freq, (1 + divq)); + return (unsigned long)vco_freq; +} + +static int clk_pll_get_parent(struct clk *clk) +{ + struct socfpga_pll *socfpgaclk = to_socfpga_clk(clk); + u32 pll_src; + + pll_src = readl(socfpgaclk->reg); + + return (pll_src >> CLK_MGR_PLL_CLK_SRC_SHIFT) & + CLK_MGR_PLL_CLK_SRC_MASK; +} + +static int clk_socfpga_enable(struct clk *clk) +{ + struct socfpga_pll *socfpga_clk = to_socfpga_clk(clk); + u32 val; + + val = readl(socfpga_clk->reg); + val |= 1 << socfpga_clk->bit_idx; + writel(val, socfpga_clk->reg); + + return 0; +} + +static void clk_socfpga_disable(struct clk *clk) +{ + struct socfpga_pll *socfpga_clk = to_socfpga_clk(clk); + u32 val; + + val = readl(socfpga_clk->reg); + val &= ~(1 << socfpga_clk->bit_idx); + writel(val, socfpga_clk->reg); +} + +static struct clk_ops clk_pll_ops = { + .recalc_rate = clk_pll_recalc_rate, + .get_parent = clk_pll_get_parent, +}; + +static struct clk *__socfpga_pll_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 reg; + struct socfpga_pll *pll_clk; + const char *clk_name = node->name; + int rc; + int i; + + of_property_read_u32(node, "reg", ®); + + pll_clk = xzalloc(sizeof(*pll_clk)); + + pll_clk->reg = clk_mgr_base_addr + reg; + + of_property_read_string(node, "clock-output-names", &clk_name); + + pll_clk->clk.name = xstrdup(clk_name); + pll_clk->clk.ops = ops; + + for (i = 0; i < SOCFPGA_MAX_PARENTS; i++) { + pll_clk->parent_names[i] = of_clk_get_parent_name(node, i); + if (!pll_clk->parent_names[i]) + break; + } + + pll_clk->bit_idx = SOCFPGA_PLL_EXT_ENA; + pll_clk->clk.num_parents = i; + pll_clk->clk.parent_names = pll_clk->parent_names; + + clk_pll_ops.enable = clk_socfpga_enable; + clk_pll_ops.disable = clk_socfpga_disable; + + rc = clk_register(&pll_clk->clk); + if (rc) { + free(pll_clk); + return NULL; + } + + return &pll_clk->clk; +} + +struct clk *socfpga_a10_pll_init(struct device_node *node) +{ + return __socfpga_pll_init(node, &clk_pll_ops); +} diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c index 6af0632caf..ade608ffe3 100644 --- a/drivers/clk/socfpga/clk.c +++ b/drivers/clk/socfpga/clk.c @@ -12,6 +12,7 @@ */ #include +#include #include #include #include @@ -20,6 +21,8 @@ #include #include +#include "clk.h" + /* Clock Manager offsets */ #define CLKMGR_CTRL 0x0 #define CLKMGR_BYPASS 0x4 @@ -52,7 +55,7 @@ #define div_mask(width) ((1 << (width)) - 1) #define streq(a, b) (strcmp((a), (b)) == 0) -static void __iomem *clk_mgr_base_addr; +void __iomem *clk_mgr_base_addr; struct clk_pll { struct clk clk; @@ -385,6 +388,12 @@ static void socfpga_register_clocks(struct device_d *dev, struct device_node *no clk = socfpga_periph_clk(node); else if (of_device_is_compatible(node, "altr,socfpga-gate-clk")) clk = socfpga_gate_clk(node); + else if (of_device_is_compatible(node, "altr,socfpga-a10-pll-clock")) + clk = socfpga_a10_pll_init(node); + else if (of_device_is_compatible(node, "altr,socfpga-a10-perip-clk")) + clk = socfpga_a10_periph_init(node); + else if (of_device_is_compatible(node, "altr,socfpga-a10-gate-clk")) + clk = socfpga_a10_gate_init(node); else return; diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h new file mode 100644 index 0000000000..6d6c28344d --- /dev/null +++ b/drivers/clk/socfpga/clk.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2013, Steffen Trumtrar + * + * based on drivers/clk/tegra/clk.h + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __SOCFPGA_CLK_H +#define __SOCFPGA_CLK_H + +#include + +/* Clock Manager offsets */ +#define CLKMGR_CTRL 0x0 +#define CLKMGR_BYPASS 0x4 +#define CLKMGR_DBCTRL 0x10 +#define CLKMGR_L4SRC 0x70 +#define CLKMGR_PERPLL_SRC 0xAC + +#define SOCFPGA_MAX_PARENTS 5 + +#define streq(a, b) (strcmp((a), (b)) == 0) + +extern void __iomem *clk_mgr_base_addr; + +void __init socfpga_pll_init(struct device_node *node); +void __init socfpga_periph_init(struct device_node *node); +void __init socfpga_gate_init(struct device_node *node); + +#ifdef CONFIG_ARCH_SOCFPGA_ARRIA10 +struct clk *socfpga_a10_pll_init(struct device_node *node); +struct clk *socfpga_a10_periph_init(struct device_node *node); +struct clk *socfpga_a10_gate_init(struct device_node *node); +#else +struct clk *socfpga_a10_pll_init(struct device_node *node) +{ + return ERR_PTR(-ENOSYS); +} +struct clk *socfpga_a10_periph_init(struct device_node *node) +{ + return ERR_PTR(-ENOSYS); +} +struct clk *socfpga_a10_gate_init(struct device_node *node) +{ + return ERR_PTR(-ENOSYS); +} +#endif + +struct socfpga_pll { + struct clk clk; + void __iomem *reg; + u32 bit_idx; + const char *parent_names[SOCFPGA_MAX_PARENTS]; +}; + +struct socfpga_gate_clk { + struct clk clk; + char *parent_name; + u32 fixed_div; + void __iomem *div_reg; + struct regmap *sys_mgr_base_addr; + u32 width; /* only valid if div_reg != 0 */ + u32 shift; /* only valid if div_reg != 0 */ + u32 bit_idx; + void __iomem *reg; + u32 clk_phase[2]; + const char *parent_names[SOCFPGA_MAX_PARENTS]; +}; + +struct socfpga_periph_clk { + struct clk clk; + void __iomem *reg; + char *parent_name; + u32 fixed_div; + void __iomem *div_reg; + u32 width; /* only valid if div_reg != 0 */ + u32 shift; /* only valid if div_reg != 0 */ + const char *parent_names[SOCFPGA_MAX_PARENTS]; +}; + +#endif /* SOCFPGA_CLK_H */ -- cgit v1.2.3 From 711683575c396759cc95f6c6c971b6ec436bae38 Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Fri, 28 Apr 2017 16:41:43 +0200 Subject: ARM: socfpga: add support for reflex achilles board Signed-off-by: Steffen Trumtrar Signed-off-by: Sascha Hauer --- arch/arm/boards/Makefile | 1 + arch/arm/boards/reflex-achilles/Makefile | 2 + arch/arm/boards/reflex-achilles/lowlevel.c | 48 ++++++++ .../boards/reflex-achilles/pinmux-config-arria10.c | 102 +++++++++++++++++ .../boards/reflex-achilles/pll-config-arria10.c | 54 +++++++++ arch/arm/dts/Makefile | 1 + arch/arm/dts/socfpga_arria10_achilles.dts | 124 +++++++++++++++++++++ arch/arm/mach-socfpga/Kconfig | 5 + images/Makefile.socfpga | 4 + 9 files changed, 341 insertions(+) create mode 100644 arch/arm/boards/reflex-achilles/Makefile create mode 100644 arch/arm/boards/reflex-achilles/lowlevel.c create mode 100644 arch/arm/boards/reflex-achilles/pinmux-config-arria10.c create mode 100644 arch/arm/boards/reflex-achilles/pll-config-arria10.c create mode 100644 arch/arm/dts/socfpga_arria10_achilles.dts diff --git a/arch/arm/boards/Makefile b/arch/arm/boards/Makefile index 250ccb8889..bcd94a0569 100644 --- a/arch/arm/boards/Makefile +++ b/arch/arm/boards/Makefile @@ -110,6 +110,7 @@ obj-$(CONFIG_MACH_SAMA5D4EK) += sama5d4ek/ obj-$(CONFIG_MACH_SCB9328) += scb9328/ obj-$(CONFIG_MACH_SOCFPGA_ALTERA_SOCDK) += altera-socdk/ obj-$(CONFIG_MACH_SOCFPGA_EBV_SOCRATES) += ebv-socrates/ +obj-$(CONFIG_MACH_SOCFPGA_REFLEX_ACHILLES) += reflex-achilles/ obj-$(CONFIG_MACH_SOCFPGA_TERASIC_DE0_NANO_SOC) += terasic-de0-nano-soc/ obj-$(CONFIG_MACH_SOCFPGA_TERASIC_SOCKIT) += terasic-sockit/ obj-$(CONFIG_MACH_SOLIDRUN_CUBOX) += solidrun-cubox/ diff --git a/arch/arm/boards/reflex-achilles/Makefile b/arch/arm/boards/reflex-achilles/Makefile new file mode 100644 index 0000000000..6b42141153 --- /dev/null +++ b/arch/arm/boards/reflex-achilles/Makefile @@ -0,0 +1,2 @@ +obj-y += lowlevel.o +pbl-y += lowlevel.o diff --git a/arch/arm/boards/reflex-achilles/lowlevel.c b/arch/arm/boards/reflex-achilles/lowlevel.c new file mode 100644 index 0000000000..12994177cc --- /dev/null +++ b/arch/arm/boards/reflex-achilles/lowlevel.c @@ -0,0 +1,48 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pll-config-arria10.c" +#include "pinmux-config-arria10.c" +#include + +extern char __dtb_socfpga_arria10_achilles_start[]; + +static noinline void achilles_entry(void) +{ + void *fdt; + + arm_early_mmu_cache_invalidate(); + + relocate_to_current_adr(); + setup_c(); + + arria10_init(&mainpll_cfg, &perpll_cfg, pinmux); + + puts_ll("lowlevel init done\n"); + + arria10_ddr_calibration_sequence(); + + puts_ll("SDRAM setup done\n"); + + fdt = __dtb_socfpga_arria10_achilles_start - get_runtime_offset(); + + barebox_arm_entry(0x0, SZ_2G + SZ_1G, fdt); +} + +ENTRY_FUNCTION(start_socfpga_achilles, r0, r1, r2) +{ + arm_cpu_lowlevel_init(); + + arm_setup_stack(0xffe00000 + SZ_256K - SZ_32K - SZ_4K - 16); + + achilles_entry(); +} diff --git a/arch/arm/boards/reflex-achilles/pinmux-config-arria10.c b/arch/arm/boards/reflex-achilles/pinmux-config-arria10.c new file mode 100644 index 0000000000..246838a228 --- /dev/null +++ b/arch/arm/boards/reflex-achilles/pinmux-config-arria10.c @@ -0,0 +1,102 @@ +#include + +static uint32_t pinmux[] = { +[arria10_pinmux_shared_io_q4_12] = 8, +[arria10_pinmux_shared_io_q4_11] = 8, +[arria10_pinmux_shared_io_q4_10] = 8, +[arria10_pinmux_shared_io_q4_9] = 8, +[arria10_pinmux_shared_io_q4_8] = 8, +[arria10_pinmux_shared_io_q4_6] = 8, +[arria10_pinmux_shared_io_q4_7] = 8, +[arria10_pinmux_shared_io_q4_5] = 8, +[arria10_pinmux_shared_io_q4_4] = 8, +[arria10_pinmux_shared_io_q4_3] = 8, +[arria10_pinmux_shared_io_q4_2] = 8, +[arria10_pinmux_shared_io_q4_1] = 8, +[arria10_pinmux_shared_io_q3_12] = 8, +[arria10_pinmux_shared_io_q3_11] = 8, +[arria10_pinmux_shared_io_q3_10] = 8, +[arria10_pinmux_shared_io_q3_8] = 8, +[arria10_pinmux_shared_io_q3_9] = 8, +[arria10_pinmux_shared_io_q3_7] = 8, +[arria10_pinmux_shared_io_q3_6] = 8, +[arria10_pinmux_shared_io_q3_5] = 8, +[arria10_pinmux_shared_io_q3_4] = 8, +[arria10_pinmux_shared_io_q3_3] = 8, +[arria10_pinmux_shared_io_q3_2] = 8, +[arria10_pinmux_shared_io_q3_1] = 8, +[arria10_pinmux_shared_io_q2_12] = 8, +[arria10_pinmux_shared_io_q2_10] = 8, +[arria10_pinmux_shared_io_q2_11] = 8, +[arria10_pinmux_shared_io_q2_9] = 8, +[arria10_pinmux_shared_io_q2_8] = 8, +[arria10_pinmux_shared_io_q2_7] = 8, +[arria10_pinmux_shared_io_q2_6] = 8, +[arria10_pinmux_shared_io_q2_5] = 8, +[arria10_pinmux_shared_io_q2_4] = 8, +[arria10_pinmux_shared_io_q2_3] = 8, +[arria10_pinmux_shared_io_q2_2] = 8, +[arria10_pinmux_shared_io_q2_1] = 8, +[arria10_pinmux_shared_io_q1_12] = 10, +[arria10_pinmux_shared_io_q1_11] = 10, +[arria10_pinmux_shared_io_q1_10] = 1, +[arria10_pinmux_shared_io_q1_9] = 1, +[arria10_pinmux_shared_io_q1_8] = 1, +[arria10_pinmux_shared_io_q1_7] = 1, +[arria10_pinmux_shared_io_q1_6] = 0, +[arria10_pinmux_shared_io_q1_5] = 0, +[arria10_pinmux_shared_io_q1_4] = 13, +[arria10_pinmux_shared_io_q1_3] = 13, +[arria10_pinmux_shared_io_q1_2] = 13, +[arria10_pinmux_shared_io_q1_1] = 13, +[arria10_pinmux_dedicated_io_4] = 8, +[arria10_pinmux_dedicated_io_5] = 8, +[arria10_pinmux_dedicated_io_6] = 8, +[arria10_pinmux_dedicated_io_7] = 8, +[arria10_pinmux_dedicated_io_8] = 8, +[arria10_pinmux_dedicated_io_9] = 8, +[arria10_pinmux_dedicated_io_10] = 10, +[arria10_pinmux_dedicated_io_11] = 10, +[arria10_pinmux_dedicated_io_12] = 8, +[arria10_pinmux_dedicated_io_13] = 8, +[arria10_pinmux_dedicated_io_14] = 8, +[arria10_pinmux_dedicated_io_15] = 8, +[arria10_pinmux_dedicated_io_16] = 15, +[arria10_pinmux_dedicated_io_17] = 15, +[arria10_pincfg_dedicated_io_bank] = 0x101, +[arria10_pincfg_dedicated_io_1] = 0xb080a, +[arria10_pincfg_dedicated_io_2] = 0xb080a, +[arria10_pincfg_dedicated_io_3] = 0xb080a, +[arria10_pincfg_dedicated_io_4] = 0xa282a, +[arria10_pincfg_dedicated_io_5] = 0xa282a, +[arria10_pincfg_dedicated_io_6] = 0xa282a, +[arria10_pincfg_dedicated_io_7] = 0xa282a, +[arria10_pincfg_dedicated_io_8] = 0xa282a, +[arria10_pincfg_dedicated_io_9] = 0xa282a, +[arria10_pincfg_dedicated_io_10] = 0x90000, +[arria10_pincfg_dedicated_io_11] = 0x90000, +[arria10_pincfg_dedicated_io_12] = 0xa282a, +[arria10_pincfg_dedicated_io_13] = 0xa282a, +[arria10_pincfg_dedicated_io_14] = 0xa282a, +[arria10_pincfg_dedicated_io_15] = 0xa282a, +[arria10_pincfg_dedicated_io_16] = 0xa282a, +[arria10_pincfg_dedicated_io_17] = 0xa282a, +[arria10_pinmux_rgmii0_usefpga] = 0, +[arria10_pinmux_rgmii1_usefpga] = 0, +[arria10_pinmux_rgmii2_usefpga] = 0, +[arria10_pinmux_nand_usefpga] = 0, +[arria10_pinmux_qspi_usefpga] = 0, +[arria10_pinmux_sdmmc_usefpga] = 0, +[arria10_pinmux_spim0_usefpga] = 1, +[arria10_pinmux_spim1_usefpga] = 0, +[arria10_pinmux_spis0_usefpga] = 0, +[arria10_pinmux_spis1_usefpga] = 0, +[arria10_pinmux_uart0_usefpga] = 0, +[arria10_pinmux_uart1_usefpga] = 0, +[arria10_pinmux_i2c0_usefpga] = 0, +[arria10_pinmux_i2c1_usefpga] = 0, +[arria10_pinmux_i2cemac0_usefpga] = 0, +[arria10_pinmux_i2cemac1_usefpga] = 0, +[arria10_pinmux_i2cemac2_usefpga] = 0, +}; + diff --git a/arch/arm/boards/reflex-achilles/pll-config-arria10.c b/arch/arm/boards/reflex-achilles/pll-config-arria10.c new file mode 100644 index 0000000000..94d596606e --- /dev/null +++ b/arch/arm/boards/reflex-achilles/pll-config-arria10.c @@ -0,0 +1,54 @@ +#include + +static struct arria10_mainpll_cfg mainpll_cfg = { + .cntr15clk_cnt = 900, + .cntr2clk_cnt = 900, + .cntr3clk_cnt = 900, + .cntr4clk_cnt = 900, + .cntr5clk_cnt = 900, + .cntr6clk_cnt = 7, + .cntr7clk_cnt = 900, + .cntr7clk_src = 0, + .cntr8clk_cnt = 900, + .cntr9clk_cnt = 900, + .cntr9clk_src = 0, + .mpuclk_cnt = 0, + .mpuclk_src = 0, + .nocclk_cnt = 0, + .nocclk_src = 0, + .nocdiv_csatclk = 2, + .nocdiv_cspdbgclk = 0, + .nocdiv_cstraceclk = 0, + .nocdiv_l4mainclk = 2, + .nocdiv_l4mpclk = 2, + .nocdiv_l4spclk = 2, + .vco0_psrc = 0, + .vco1_denom = 1, + .vco1_numer = 127, + .mpuclk = 0x3840001, + .nocclk = 0x3840003, +}; + +static struct arria10_perpll_cfg perpll_cfg = { + .cntr2clk_cnt = 5, + .cntr2clk_src = 1, + .cntr3clk_cnt = 900, + .cntr3clk_src = 1, + .cntr4clk_cnt = 14, + .cntr4clk_src = 1, + .cntr5clk_cnt = 374, + .cntr5clk_src = 1, + .cntr6clk_cnt = 900, + .cntr6clk_src = 0, + .cntr7clk_cnt = 900, + .cntr8clk_cnt = 900, + .cntr8clk_src = 0, + .cntr9clk_cnt = 900, + .emacctl_emac0sel = 0, + .emacctl_emac1sel = 0, + .emacctl_emac2sel = 0, + .gpiodiv_gpiodbclk = 32000, + .vco0_psrc = 0, + .vco1_denom = 1, + .vco1_numer = 119, +}; diff --git a/arch/arm/dts/Makefile b/arch/arm/dts/Makefile index e8ad43bfd8..96e54d815f 100644 --- a/arch/arm/dts/Makefile +++ b/arch/arm/dts/Makefile @@ -65,6 +65,7 @@ pbl-dtb-$(CONFIG_MACH_SABRESD) += imx6q-sabresd.dtb.o pbl-dtb-$(CONFIG_MACH_FREESCALE_IMX6SX_SABRESDB) += imx6sx-sdb.dtb.o pbl-dtb-$(CONFIG_MACH_SOCFPGA_ALTERA_SOCDK) += socfpga_cyclone5_socdk.dtb.o pbl-dtb-$(CONFIG_MACH_SOCFPGA_EBV_SOCRATES) += socfpga_cyclone5_socrates.dtb.o +pbl-dtb-$(CONFIG_MACH_SOCFPGA_REFLEX_ACHILLES) += socfpga_arria10_achilles.dtb.o pbl-dtb-$(CONFIG_MACH_SOCFPGA_TERASIC_DE0_NANO_SOC) += socfpga_cyclone5_de0_nano_soc.dtb.o pbl-dtb-$(CONFIG_MACH_SOCFPGA_TERASIC_SOCKIT) += socfpga_cyclone5_sockit.dtb.o pbl-dtb-$(CONFIG_MACH_SOLIDRUN_CUBOX) += dove-cubox-bb.dtb.o diff --git a/arch/arm/dts/socfpga_arria10_achilles.dts b/arch/arm/dts/socfpga_arria10_achilles.dts new file mode 100644 index 0000000000..dd991318e2 --- /dev/null +++ b/arch/arm/dts/socfpga_arria10_achilles.dts @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2015 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +/dts-v1/; +#include + +/ { + model = "Reflex SOCFPGA Arria 10 Achilles"; + compatible = "reflex,achilles", "altr,socfpga-arria10", "altr,socfpga"; + + aliases { + serial0 = &uart0; + }; + + chosen { + linux,stdout-path = &uart0; + + environment@0 { + compatible = "barebox,environment"; + device-path = &mmc, "partname:1"; + file-path = "barebox.env"; + }; + }; + + memory { + name = "memory"; + device_type = "memory"; + reg = <0x0 0xc0000000>; + }; + + soc { + clkmgr@ffd04000 { + clocks { + osc1 { + clock-frequency = <25000000>; + }; + + cb_intosc_hs_div2_clk { + clock-frequency = <0>; + }; + cb_intosc_ls_clk { + clock-frequency = <60000000>; + }; + f2s_free_clk { + clock-frequency = <200000000>; + }; + }; + }; + }; +}; + +&gmac1 { + phy-mode = "rgmii"; + phy-addr = <0x00fffff0>; /* probe for phy addr */ + + /* + * These skews assume the user's FPGA design is adding 600ps of delay + * for TX_CLK on Arria 10. + * + * All skews are offset since hardware skew values for the ksz9031 + * range from a negative skew to a positive skew. + * See the micrel-ksz90x1.txt Documentation file for details. + */ + txd0-skew-ps = <0>; /* -420ps */ + txd1-skew-ps = <0>; /* -420ps */ + txd2-skew-ps = <0>; /* -420ps */ + txd3-skew-ps = <0>; /* -420ps */ + rxd0-skew-ps = <420>; /* 0ps */ + rxd1-skew-ps = <420>; /* 0ps */ + rxd2-skew-ps = <420>; /* 0ps */ + rxd3-skew-ps = <420>; /* 0ps */ + txen-skew-ps = <0>; /* -420ps */ + txc-skew-ps = <1860>; /* 960ps */ + rxdv-skew-ps = <420>; /* 0ps */ + rxc-skew-ps = <1680>; /* 780ps */ + max-frame-size = <3800>; + status = "okay"; +}; + +&i2c0 { + status = "okay"; + + tempsensor: ti,tmp102@0x48 { + compatible = "ti,tmp102"; + reg = <0x48>; + }; + + rtc: nxp,pcf8563@0x51 { + compatible = "nxp,pcf8563"; + reg = <0x51>; + }; + + eeprom: at24@0x54 { + compatible = "at24"; + reg = <0x54>; + bytelen = <256>; + pagesize = <16>; + }; +}; + +&mmc { + supports-highspeed; + broken-cd; + bus-width = <1>; + status = "okay"; +}; + +&uart0 { + reg-io-width = <4>; + status = "okay"; +}; diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index 0a33e88644..caff566bdc 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -33,6 +33,11 @@ config MACH_SOCFPGA_EBV_SOCRATES select ARCH_SOCFPGA_CYCLONE5 bool "EBV Socrates" +config MACH_SOCFPGA_REFLEX_ACHILLES + select HAVE_DEFAULT_ENVIRONMENT_NEW + select ARCH_SOCFPGA_ARRIA10 + bool "Reflex Achilles" + config MACH_SOCFPGA_TERASIC_DE0_NANO_SOC select HAVE_DEFAULT_ENVIRONMENT_NEW select ARCH_SOCFPGA_CYCLONE5 diff --git a/images/Makefile.socfpga b/images/Makefile.socfpga index a764b1a5fe..60b98d1ef2 100644 --- a/images/Makefile.socfpga +++ b/images/Makefile.socfpga @@ -30,6 +30,10 @@ pblx-$(CONFIG_MACH_SOCFPGA_TERASIC_DE0_NANO_SOC) += start_socfpga_de0_nano_soc FILE_barebox-socfpga-de0_nano_soc.img = start_socfpga_de0_nano_soc.pblx socfpga-barebox-$(CONFIG_MACH_SOCFPGA_TERASIC_DE0_NANO_SOC) += barebox-socfpga-de0_nano_soc.img +pblx-$(CONFIG_MACH_SOCFPGA_REFLEX_ACHILLES) += start_socfpga_achilles +FILE_barebox-socfpga-achilles.img = start_socfpga_achilles.pblx.socfpgaimg +socfpga-barebox-$(CONFIG_MACH_SOCFPGA_REFLEX_ACHILLES) += barebox-socfpga-achilles.img + pblx-$(CONFIG_MACH_SOCFPGA_TERASIC_SOCKIT) += start_socfpga_sockit_xload FILE_barebox-socfpga-sockit-xload.img = start_socfpga_sockit_xload.pblx.socfpgaimg socfpga-xload-$(CONFIG_MACH_SOCFPGA_TERASIC_SOCKIT) += barebox-socfpga-sockit-xload.img -- cgit v1.2.3 From 243530107ad99daa9456e8843efef25326d2a613 Mon Sep 17 00:00:00 2001 From: Steffen Trumtrar Date: Fri, 28 Apr 2017 16:41:44 +0200 Subject: ARM: socfpga: add arria10 defconfig Signed-off-by: Steffen Trumtrar Signed-off-by: Sascha Hauer --- arch/arm/configs/socfpga-arria10_defconfig | 89 ++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 arch/arm/configs/socfpga-arria10_defconfig diff --git a/arch/arm/configs/socfpga-arria10_defconfig b/arch/arm/configs/socfpga-arria10_defconfig new file mode 100644 index 0000000000..e661895d6e --- /dev/null +++ b/arch/arm/configs/socfpga-arria10_defconfig @@ -0,0 +1,89 @@ +CONFIG_ARCH_SOCFPGA=y +CONFIG_MACH_SOCFPGA_REFLEX_ACHILLES=y +CONFIG_THUMB2_BAREBOX=y +CONFIG_ARM_OPTIMZED_STRING_FUNCTIONS=y +CONFIG_ARM_UNWIND=y +CONFIG_MMU=y +CONFIG_MALLOC_SIZE=0x0 +CONFIG_MALLOC_TLSF=y +CONFIG_KALLSYMS=y +CONFIG_RELOCATABLE=y +CONFIG_HUSH_FANCY_PROMPT=y +CONFIG_CMDLINE_EDITING=y +CONFIG_AUTO_COMPLETE=y +CONFIG_MENU=y +# CONFIG_TIMESTAMP is not set +CONFIG_BOOTM_SHOW_TYPE=y +CONFIG_BOOTM_VERBOSE=y +CONFIG_BOOTM_INITRD=y +CONFIG_BOOTM_OFTREE=y +CONFIG_PBL_CONSOLE=y +CONFIG_DEFAULT_COMPRESSION_LZO=y +CONFIG_DEFAULT_ENVIRONMENT_GENERIC_NEW=y +CONFIG_POLLER=y +CONFIG_STATE=y +CONFIG_DEBUG_INFO=y +CONFIG_LONGHELP=y +CONFIG_CMD_IOMEM=y +CONFIG_CMD_MEMINFO=y +CONFIG_CMD_ARM_MMUINFO=y +CONFIG_CMD_GO=y +CONFIG_CMD_RESET=y +CONFIG_CMD_PARTITION=y +CONFIG_CMD_EXPORT=y +CONFIG_CMD_PRINTENV=y +CONFIG_CMD_MAGICVAR=y +CONFIG_CMD_MAGICVAR_HELP=y +CONFIG_CMD_SAVEENV=y +CONFIG_CMD_FILETYPE=y +CONFIG_CMD_LN=y +CONFIG_CMD_MD5SUM=y +CONFIG_CMD_SHA256SUM=y +CONFIG_CMD_LET=y +CONFIG_CMD_MSLEEP=y +CONFIG_CMD_SLEEP=y +CONFIG_CMD_DHCP=y +CONFIG_CMD_PING=y +CONFIG_CMD_ECHO_E=y +CONFIG_CMD_EDIT=y +CONFIG_CMD_MENU=y +CONFIG_CMD_MENU_MANAGEMENT=y +CONFIG_CMD_READLINE=y +CONFIG_CMD_TIMEOUT=y +CONFIG_CMD_CRC=y +CONFIG_CMD_CRC_CMP=y +CONFIG_CMD_MEMTEST=y +CONFIG_CMD_MM=y +CONFIG_CMD_CLK=y +CONFIG_CMD_DETECT=y +CONFIG_CMD_FLASH=y +CONFIG_CMD_GPIO=y +CONFIG_CMD_BAREBOX_UPDATE=y +CONFIG_CMD_FIRMWARELOAD=y +CONFIG_CMD_OF_NODE=y +CONFIG_CMD_OF_PROPERTY=y +CONFIG_CMD_OFTREE=y +CONFIG_CMD_TIME=y +CONFIG_CMD_STATE=y +CONFIG_NET=y +CONFIG_NET_NETCONSOLE=y +CONFIG_NET_RESOLV=y +CONFIG_OF_BAREBOX_DRIVERS=y +CONFIG_OF_BAREBOX_ENV_IN_FS=y +CONFIG_DRIVER_SERIAL_NS16550=y +CONFIG_DRIVER_NET_DESIGNWARE=y +CONFIG_MICREL_PHY=y +# CONFIG_SPI is not set +CONFIG_MCI=y +CONFIG_MCI_STARTUP=y +CONFIG_MCI_MMC_BOOT_PARTITIONS=y +CONFIG_MCI_DW=y +# CONFIG_PINCTRL is not set +CONFIG_FS_TFTP=y +CONFIG_FS_NFS=y +CONFIG_FS_FAT=y +CONFIG_FS_FAT_WRITE=y +CONFIG_FS_FAT_LFN=y +CONFIG_ZLIB=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_DIGEST_SHA256_ARM=y -- cgit v1.2.3