summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2022-03-14 10:10:25 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2022-03-14 10:10:25 +0100
commit26dc1bf751724540716a4a17a80f7605ebf61b3a (patch)
treeabdbff429227f48dfd0855dac8ad6381ee8b9ac1 /drivers
parentd8ef1573dfc3475d4f5dffa37cb4444b5d6f21eb (diff)
parent79d2a2d4a4608fd0ca9f1eeb06e4aee0fb095f19 (diff)
downloadbarebox-26dc1bf751724540716a4a17a80f7605ebf61b3a.tar.gz
barebox-26dc1bf751724540716a4a17a80f7605ebf61b3a.tar.xz
Merge branch 'for-next/stm32'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/driver.c28
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-scmi.c192
-rw-r--r--drivers/clk/clk-stm32mp1.c899
-rw-r--r--drivers/clk/clk.c22
-rw-r--r--drivers/firmware/Kconfig10
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_scmi/Makefile10
-rw-r--r--drivers/firmware/arm_scmi/base.c281
-rw-r--r--drivers/firmware/arm_scmi/bus.c226
-rw-r--r--drivers/firmware/arm_scmi/clock.c374
-rw-r--r--drivers/firmware/arm_scmi/common.h333
-rw-r--r--drivers/firmware/arm_scmi/driver.c1279
-rw-r--r--drivers/firmware/arm_scmi/reset.c229
-rw-r--r--drivers/firmware/arm_scmi/shmem.c89
-rw-r--r--drivers/firmware/arm_scmi/smc.c137
-rw-r--r--drivers/firmware/arm_scmi/voltage.c379
-rw-r--r--drivers/pinctrl/pinctrl-stm32.c14
-rw-r--r--drivers/power/reset/Kconfig7
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/stm32-reboot.c (renamed from drivers/reset/reset-stm32.c)143
-rw-r--r--drivers/regulator/Kconfig9
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/scmi-regulator.c391
-rw-r--r--drivers/reset/Kconfig17
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/core.c21
-rw-r--r--drivers/reset/reset-scmi.c130
-rw-r--r--drivers/reset/reset-simple.c3
-rw-r--r--drivers/serial/serial_stm32.c2
32 files changed, 4839 insertions, 405 deletions
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 2347b5c71f..303ca061ce 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -619,3 +619,31 @@ int dev_err_probe(const struct device_d *dev, int err, const char *fmt, ...)
return err;
}
EXPORT_SYMBOL_GPL(dev_err_probe);
+
+/*
+ * device_find_child - device iterator for locating a particular device.
+ * @parent: parent struct device_d
+ * @match: Callback function to check device
+ * @data: Data to pass to match function
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
+ */
+struct device_d *device_find_child(struct device_d *parent, void *data,
+ int (*match)(struct device_d *dev, void *data))
+{
+ struct device_d *child;
+
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(child, &parent->children, sibling) {
+ if (match(child, data))
+ return child;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(device_find_child);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e01ffe10f2..5b5acf4e06 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -20,10 +20,21 @@ config CLK_SOCFPGA
select COMMON_CLK_OF_PROVIDER
default y if ARCH_SOCFPGA && OFDEVICE
+
config COMMON_CLK_STM32F
bool "STM32F4 and STM32F7 clock driver" if COMPILE_TEST
depends on COMMON_CLK && ARCH_STM32
help
Support for stm32f4 and stm32f7 SoC families clocks
+config COMMON_CLK_SCMI
+ tristate "Clock driver controlled via SCMI interface"
+ depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
+ help
+ This driver provides support for clocks that are controlled
+ by firmware that implements the SCMI interface.
+
+ This driver uses SCMI Message Protocol to interact with the
+ firmware providing all the clock controls.
+
source "drivers/clk/sifive/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index c60b38ff60..ee503c1edb 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-$(CONFIG_SOC_STARFIVE) += starfive/
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_MACH_RPI_COMMON) += clk-rpi.o
+obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
new file mode 100644
index 0000000000..9170dba393
--- /dev/null
+++ b/drivers/clk/clk-scmi.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Power Interface (SCMI) Protocol based clock driver
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#include <common.h>
+#include <linux/clk.h>
+#include <driver.h>
+#include <linux/err.h>
+#include <of.h>
+#include <linux/scmi_protocol.h>
+#include <linux/overflow.h>
+#include <linux/math64.h>
+
+static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
+
+struct scmi_clk {
+ u32 id;
+ struct clk_hw hw;
+ const struct scmi_clock_info *info;
+ const struct scmi_protocol_handle *ph;
+};
+
+#define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
+
+static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ int ret;
+ u64 rate;
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate);
+ if (ret)
+ return 0;
+ return rate;
+}
+
+static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u64 fmin, fmax, ftmp;
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ /*
+ * We can't figure out what rate it will be, so just return the
+ * rate back to the caller. scmi_clk_recalc_rate() will be called
+ * after the rate is set and we'll know what rate the clock is
+ * running at then.
+ */
+ if (clk->info->rate_discrete)
+ return rate;
+
+ fmin = clk->info->range.min_rate;
+ fmax = clk->info->range.max_rate;
+ if (rate <= fmin)
+ return fmin;
+ else if (rate >= fmax)
+ return fmax;
+
+ ftmp = rate - fmin;
+ ftmp += clk->info->range.step_size - 1; /* to round up */
+ do_div(ftmp, clk->info->range.step_size);
+
+ return ftmp * clk->info->range.step_size + fmin;
+}
+
+static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
+}
+
+static int scmi_clk_enable(struct clk_hw *hw)
+{
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ return scmi_proto_clk_ops->enable(clk->ph, clk->id);
+}
+
+static void scmi_clk_disable(struct clk_hw *hw)
+{
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ scmi_proto_clk_ops->disable(clk->ph, clk->id);
+}
+
+static const struct clk_ops scmi_clk_ops = {
+ .recalc_rate = scmi_clk_recalc_rate,
+ .round_rate = scmi_clk_round_rate,
+ .set_rate = scmi_clk_set_rate,
+ /*
+ * Unlike Linux, we can provide enable/disable callback as everything
+ * runs in atomic context.
+ */
+ .enable = scmi_clk_enable,
+ .disable = scmi_clk_disable,
+};
+
+static int scmi_clk_ops_init(struct device_d *dev, struct scmi_clk *sclk)
+{
+ struct clk_init_data init = {
+ .flags = CLK_GET_RATE_NOCACHE,
+ .num_parents = 0,
+ .ops = &scmi_clk_ops,
+ .name = sclk->info->name,
+ };
+
+ sclk->hw.init = &init;
+ return clk_hw_register(dev, &sclk->hw);
+}
+
+static int scmi_clocks_probe(struct scmi_device *sdev)
+{
+ int idx, count, err;
+ struct clk **clks;
+ struct clk_onecell_data *clk_data;
+ struct device_d *dev = &sdev->dev;
+ struct device_node *np = dev->device_node;
+ const struct scmi_handle *handle = sdev->handle;
+ struct scmi_protocol_handle *ph;
+
+ if (!handle)
+ return -ENODEV;
+
+ scmi_proto_clk_ops =
+ handle->protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph);
+ if (IS_ERR(scmi_proto_clk_ops))
+ return PTR_ERR(scmi_proto_clk_ops);
+
+ count = scmi_proto_clk_ops->count_get(ph);
+ if (count < 0) {
+ dev_err(dev, "%pOFn: invalid clock output count\n", np);
+ return -EINVAL;
+ }
+
+ clk_data = kzalloc(sizeof (*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->clk_num = count;
+ clks = clk_data->clks = calloc(clk_data->clk_num, sizeof(struct clk *));
+
+ for (idx = 0; idx < count; idx++) {
+ struct scmi_clk *sclk;
+
+ sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
+ if (!sclk)
+ return -ENOMEM;
+
+ sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
+ if (!sclk->info) {
+ dev_dbg(dev, "invalid clock info for idx %d\n", idx);
+ continue;
+ }
+
+ sclk->id = idx;
+ sclk->ph = ph;
+
+ err = scmi_clk_ops_init(dev, sclk);
+ if (err) {
+ dev_err(dev, "failed to register clock %d\n", idx);
+ kfree(sclk);
+ clks[idx] = NULL;
+ } else {
+ dev_dbg(dev, "Registered clock:%s\n", sclk->info->name);
+ clks[idx] = &sclk->hw.clk;
+ }
+ }
+
+ return of_clk_add_provider(dev->device_node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_CLOCK, "clocks" },
+ { },
+};
+
+static struct scmi_driver scmi_clocks_driver = {
+ .name = "scmi-clocks",
+ .probe = scmi_clocks_probe,
+ .id_table = scmi_id_table,
+};
+core_scmi_driver(scmi_clocks_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 8f1ba96e63..c4b03e9f6d 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -6,14 +6,22 @@
*/
#include <linux/clk.h>
+#include <clock.h>
#include <linux/err.h>
+#include <linux/overflow.h>
#include <io.h>
#include <of.h>
#include <of_address.h>
-#include <linux/math64.h>
+#include <driver.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <soc/stm32/reboot.h>
#include <dt-bindings/clock/stm32mp1-clks.h>
+static DEFINE_SPINLOCK(rlock);
+
#define RCC_OCENSETR 0x0C
#define RCC_HSICFGR 0x18
#define RCC_RDLSICR 0x144
@@ -116,7 +124,7 @@ static const char * const cpu_src[] = {
};
static const char * const axi_src[] = {
- "ck_hsi", "ck_hse", "pll2_p", "pll3_p"
+ "ck_hsi", "ck_hse", "pll2_p"
};
static const char * const per_src[] = {
@@ -220,19 +228,19 @@ static const char * const usart6_src[] = {
};
static const char * const fdcan_src[] = {
- "ck_hse", "pll3_q", "pll4_q"
+ "ck_hse", "pll3_q", "pll4_q", "pll4_r"
};
static const char * const sai_src[] = {
- "pll4_q", "pll3_q", "i2s_ckin", "ck_per"
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
};
static const char * const sai2_src[] = {
- "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb"
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r"
};
static const char * const adc12_src[] = {
- "pll4_q", "ck_per"
+ "pll4_r", "ck_per", "pll3_q"
};
static const char * const dsi_src[] = {
@@ -240,7 +248,7 @@ static const char * const dsi_src[] = {
};
static const char * const rtc_src[] = {
- "off", "ck_lse", "ck_lsi", "ck_hse_rtc"
+ "off", "ck_lse", "ck_lsi", "ck_hse"
};
static const char * const mco1_src[] = {
@@ -264,7 +272,7 @@ static const struct clk_div_table axi_div_table[] = {
static const struct clk_div_table mcu_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
{ 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
- { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 },
+ { 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 },
{ 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
{ 0 },
};
@@ -312,7 +320,10 @@ struct clock_config {
int num_parents;
unsigned long flags;
void *cfg;
- struct clk * (*func)(void __iomem *base, const struct clock_config *cfg);
+ struct clk_hw * (*func)(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg);
};
#define NO_ID ~0
@@ -368,53 +379,69 @@ struct stm32_composite_cfg {
const struct stm32_mux_cfg *mux;
};
-static struct clk *
-_clk_hw_register_gate(void __iomem *base,
+static struct clk_hw *
+_clk_hw_register_gate(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct gate_cfg *gate_cfg = cfg->cfg;
- return clk_gate(cfg->name, cfg->parent_name, gate_cfg->reg_off + base,
- gate_cfg->bit_idx, cfg->flags, gate_cfg->gate_flags);
+ return clk_hw_register_gate(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ gate_cfg->reg_off + base,
+ gate_cfg->bit_idx,
+ gate_cfg->gate_flags,
+ lock);
}
-static struct clk *
-_clk_hw_register_fixed_factor(void __iomem *base,
+static struct clk_hw *
+_clk_hw_register_fixed_factor(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct fixed_factor_cfg *ff_cfg = cfg->cfg;
- return clk_fixed_factor(cfg->name, cfg->parent_name, ff_cfg->mult,
- ff_cfg->div, cfg->flags);
+ return clk_hw_register_fixed_factor(dev, cfg->name, cfg->parent_name,
+ cfg->flags, ff_cfg->mult,
+ ff_cfg->div);
}
-static struct clk *
-_clk_hw_register_divider_table(void __iomem *base,
+static struct clk_hw *
+_clk_hw_register_divider_table(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
-
struct div_cfg *div_cfg = cfg->cfg;
- if (div_cfg->table)
- return clk_divider_table(cfg->name, cfg->parent_name, cfg->flags,
- div_cfg->reg_off + base, div_cfg->shift,
- div_cfg->width, div_cfg->table,
- div_cfg->div_flags);
- else
- return clk_divider(cfg->name, cfg->parent_name, cfg->flags,
- div_cfg->reg_off + base, div_cfg->shift,
- div_cfg->width, div_cfg->div_flags);
+ return clk_hw_register_divider_table(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ div_cfg->reg_off + base,
+ div_cfg->shift,
+ div_cfg->width,
+ div_cfg->div_flags,
+ div_cfg->table,
+ lock);
}
-static struct clk *
-_clk_hw_register_mux(void __iomem *base,
+static struct clk_hw *
+_clk_hw_register_mux(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
struct mux_cfg *mux_cfg = cfg->cfg;
- return clk_mux(cfg->name,cfg->flags, mux_cfg->reg_off + base, mux_cfg->shift,
- mux_cfg->width, cfg->parent_names, cfg->num_parents,
- mux_cfg->mux_flags);
+ return clk_hw_register_mux(dev, cfg->name, cfg->parent_names,
+ cfg->num_parents, cfg->flags,
+ mux_cfg->reg_off + base, mux_cfg->shift,
+ mux_cfg->width, mux_cfg->mux_flags, lock);
}
/* MP1 Gate clock with set & clear registers */
@@ -430,9 +457,12 @@ static int mp1_gate_clk_enable(struct clk_hw *hw)
static void mp1_gate_clk_disable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
if (clk_gate_ops.is_enabled(hw)) {
- writel(BIT(gate->shift), gate->reg + RCC_CLR);
+ spin_lock_irqsave(gate->lock, flags);
+ writel_relaxed(BIT(gate->shift), gate->reg + RCC_CLR);
+ spin_unlock_irqrestore(gate->lock, flags);
}
}
@@ -442,8 +472,9 @@ static const struct clk_ops mp1_gate_clk_ops = {
.is_enabled = clk_gate_is_enabled,
};
-static struct clk_hw *_get_stm32_mux(void __iomem *base,
- const struct stm32_mux_cfg *cfg)
+static struct clk_hw *_get_stm32_mux(struct device_d *dev, void __iomem *base,
+ const struct stm32_mux_cfg *cfg,
+ spinlock_t *lock)
{
struct stm32_clk_mmux *mmux;
struct clk_mux *mux;
@@ -457,10 +488,13 @@ static struct clk_hw *_get_stm32_mux(void __iomem *base,
mmux->mux.reg = cfg->mux->reg_off + base;
mmux->mux.shift = cfg->mux->shift;
mmux->mux.width = cfg->mux->width;
+ mmux->mux.flags = cfg->mux->mux_flags;
+ mmux->mux.table = cfg->mux->table;
+ mmux->mux.lock = lock;
mmux->mmux = cfg->mmux;
mux_hw = &mmux->mux.hw;
cfg->mmux->hws[cfg->mmux->nbr_clk++] = mux_hw;
- mux = &mmux->mux;
+
} else {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -469,23 +503,23 @@ static struct clk_hw *_get_stm32_mux(void __iomem *base,
mux->reg = cfg->mux->reg_off + base;
mux->shift = cfg->mux->shift;
mux->width = cfg->mux->width;
+ mux->flags = cfg->mux->mux_flags;
+ mux->table = cfg->mux->table;
+ mux->lock = lock;
mux_hw = &mux->hw;
}
- if (cfg->ops)
- mux->hw.clk.ops = cfg->ops;
- else
- mux->hw.clk.ops = &clk_mux_ops;
-
return mux_hw;
}
-static struct clk_hw *_get_stm32_div(void __iomem *base,
- const struct stm32_div_cfg *cfg)
+static struct clk_hw *_get_stm32_div(struct device_d *dev, void __iomem *base,
+ const struct stm32_div_cfg *cfg,
+ spinlock_t *lock)
{
struct clk_divider *div;
div = kzalloc(sizeof(*div), GFP_KERNEL);
+
if (!div)
return ERR_PTR(-ENOMEM);
@@ -494,21 +528,18 @@ static struct clk_hw *_get_stm32_div(void __iomem *base,
div->width = cfg->div->width;
div->flags = cfg->div->div_flags;
div->table = cfg->div->table;
-
- if (cfg->ops)
- div->hw.clk.ops = cfg->ops;
- else
- div->hw.clk.ops = &clk_divider_ops;
+ div->lock = lock;
return &div->hw;
}
-static struct clk_gate *
-_get_stm32_gate(void __iomem *base,
- const struct stm32_gate_cfg *cfg)
+static struct clk_hw *_get_stm32_gate(struct device_d *dev, void __iomem *base,
+ const struct stm32_gate_cfg *cfg,
+ spinlock_t *lock)
{
struct stm32_clk_mgate *mgate;
struct clk_gate *gate;
+ struct clk_hw *gate_hw;
if (cfg->mgate) {
mgate = kzalloc(sizeof(*mgate), GFP_KERNEL);
@@ -518,11 +549,12 @@ _get_stm32_gate(void __iomem *base,
mgate->gate.reg = cfg->gate->reg_off + base;
mgate->gate.shift = cfg->gate->bit_idx;
mgate->gate.flags = cfg->gate->gate_flags;
+ mgate->gate.lock = lock;
mgate->mask = BIT(cfg->mgate->nbr_clk++);
mgate->mgate = cfg->mgate;
- gate = &mgate->gate;
+ gate_hw = &mgate->gate.hw;
} else {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
@@ -532,71 +564,103 @@ _get_stm32_gate(void __iomem *base,
gate->reg = cfg->gate->reg_off + base;
gate->shift = cfg->gate->bit_idx;
gate->flags = cfg->gate->gate_flags;
+ gate->lock = lock;
+
+ gate_hw = &gate->hw;
}
-
- if (cfg->ops)
- gate->hw.clk.ops = cfg->ops;
- else
- gate->hw.clk.ops = &clk_gate_ops;
- return gate;
+ return gate_hw;
}
-static struct clk *
-clk_stm32_register_gate_ops(const char *name,
+static struct clk_hw *
+clk_stm32_register_gate_ops(struct device_d *dev,
+ const char *name,
const char *parent_name,
unsigned long flags,
void __iomem *base,
- const struct stm32_gate_cfg *cfg)
+ const struct stm32_gate_cfg *cfg,
+ spinlock_t *lock)
{
- struct clk *clk;
- struct clk_gate *gate;
+ struct clk_init_data init = { NULL };
+ struct clk_hw *hw;
int ret;
- gate = _get_stm32_gate(base, cfg);
- if (IS_ERR(gate))
+ init.name = name;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ init.ops = &clk_gate_ops;
+
+ if (cfg->ops)
+ init.ops = cfg->ops;
+
+ hw = _get_stm32_gate(dev, base, cfg, lock);
+ if (IS_ERR(hw))
return ERR_PTR(-ENOMEM);
- gate->parent = parent_name;
- clk = &gate->hw.clk;
- clk->name = name;
- clk->parent_names = &gate->parent;
- clk->num_parents = 1;
- clk->flags = flags;
+ hw->init = &init;
- ret = bclk_register(clk);
+ ret = clk_hw_register(dev, hw);
if (ret)
- clk = ERR_PTR(ret);
+ hw = ERR_PTR(ret);
- return clk;
+ return hw;
}
-static struct clk *
-clk_stm32_register_composite(const char *name, const char * const *parent_names,
+static struct clk_hw *
+clk_stm32_register_composite(struct device_d *dev,
+ const char *name, const char * const *parent_names,
int num_parents, void __iomem *base,
const struct stm32_composite_cfg *cfg,
- unsigned long flags)
+ unsigned long flags, spinlock_t *lock)
{
+ const struct clk_ops *mux_ops, *div_ops, *gate_ops;
struct clk_hw *mux_hw, *div_hw, *gate_hw;
- struct clk_gate *gate;
mux_hw = NULL;
div_hw = NULL;
gate_hw = NULL;
+ mux_ops = NULL;
+ div_ops = NULL;
+ gate_ops = NULL;
+
+ if (cfg->mux) {
+ mux_hw = _get_stm32_mux(dev, base, cfg->mux, lock);
+
+ if (!IS_ERR(mux_hw)) {
+ mux_ops = &clk_mux_ops;
+
+ if (cfg->mux->ops)
+ mux_ops = cfg->mux->ops;
+ }
+ }
+
+ if (cfg->div) {
+ div_hw = _get_stm32_div(dev, base, cfg->div, lock);
- if (cfg->mux)
- mux_hw = _get_stm32_mux(base, cfg->mux);
+ if (!IS_ERR(div_hw)) {
+ div_ops = &clk_divider_ops;
- if (cfg->div)
- div_hw = _get_stm32_div(base, cfg->div);
+ if (cfg->div->ops)
+ div_ops = cfg->div->ops;
+ }
+ }
if (cfg->gate) {
- gate = _get_stm32_gate(base, cfg->gate);
- gate_hw = &gate->hw;
+ gate_hw = _get_stm32_gate(dev, base, cfg->gate, lock);
+
+ if (!IS_ERR(gate_hw)) {
+ gate_ops = &clk_gate_ops;
+
+ if (cfg->gate->ops)
+ gate_ops = cfg->gate->ops;
+ }
}
- return clk_register_composite(name, parent_names, num_parents,
- &mux_hw->clk, &div_hw->clk, &gate_hw->clk, flags);
+ return clk_hw_register_composite(dev, name, parent_names, num_parents,
+ mux_hw, mux_ops, div_hw, div_ops,
+ gate_hw, gate_ops, flags);
}
#define to_clk_mgate(_gate) container_of(_gate, struct stm32_clk_mgate, gate)
@@ -640,36 +704,25 @@ static int clk_mmux_get_parent(struct clk_hw *hw)
static int clk_mmux_set_parent(struct clk_hw *hw, u8 index)
{
- struct clk_mux *mux = to_clk_mux(hw);
- struct stm32_clk_mmux *clk_mmux = to_clk_mmux(mux);
- struct clk_hw *hwp;
- int ret, n;
-
- ret = clk_mux_ops.set_parent(hw, index);
- if (ret)
- return ret;
-
- hwp = clk_hw_get_parent(hw);
-
- for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
- clk_hw_set_parent(clk_mmux->mmux->hws[n], hw);
-
- return 0;
+ return clk_mux_ops.set_parent(hw, index);
}
static const struct clk_ops clk_mmux_ops = {
.get_parent = clk_mmux_get_parent,
.set_parent = clk_mmux_set_parent,
+ .round_rate = clk_mux_round_rate,
};
/* STM32 PLL */
struct stm32_pll_obj {
+ /* lock pll enable/disable registers */
+ spinlock_t *lock;
void __iomem *reg;
- const char *parent;
- struct clk clk;
+ struct clk_hw hw;
+ struct clk_mux mux;
};
-#define to_pll(clk) container_of(clk, struct stm32_pll_obj, clk)
+#define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
#define PLL_ON BIT(0)
#define PLL_RDY BIT(1)
@@ -681,30 +734,34 @@ struct stm32_pll_obj {
#define FRAC_MASK 0x1FFF
#define FRAC_SHIFT 3
#define FRACLE BIT(16)
+#define PLL_MUX_SHIFT 0
+#define PLL_MUX_WIDTH 2
-static int __pll_is_enabled(struct clk *clk)
+static int __pll_is_enabled(struct clk_hw *hw)
{
- struct stm32_pll_obj *clk_elem = to_pll(clk);
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
- return readl(clk_elem->reg) & PLL_ON;
+ return readl_relaxed(clk_elem->reg) & PLL_ON;
}
#define TIMEOUT 5
static int pll_enable(struct clk_hw *hw)
{
- struct clk *clk = clk_hw_to_clk(hw);
- struct stm32_pll_obj *clk_elem = to_pll(clk);
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
u32 reg;
+ unsigned long flags = 0;
unsigned int timeout = TIMEOUT;
int bit_status = 0;
- if (__pll_is_enabled(clk))
+ spin_lock_irqsave(clk_elem->lock, flags);
+
+ if (__pll_is_enabled(hw))
goto unlock;
- reg = readl(clk_elem->reg);
+ reg = readl_relaxed(clk_elem->reg);
reg |= PLL_ON;
- writel(reg, clk_elem->reg);
+ writel_relaxed(reg, clk_elem->reg);
/* We can't use readl_poll_timeout() because we can be blocked if
* someone enables this clock before clocksource changes.
@@ -712,7 +769,7 @@ static int pll_enable(struct clk_hw *hw)
* interruptions and enable op does not allow to be interrupted.
*/
do {
- bit_status = !(readl(clk_elem->reg) & PLL_RDY);
+ bit_status = !(readl_relaxed(clk_elem->reg) & PLL_RDY);
if (bit_status)
udelay(120);
@@ -720,26 +777,32 @@ static int pll_enable(struct clk_hw *hw)
} while (bit_status && --timeout);
unlock:
+ spin_unlock_irqrestore(clk_elem->lock, flags);
+
return bit_status;
}
static void pll_disable(struct clk_hw *hw)
{
- struct clk *clk = clk_hw_to_clk(hw);
- struct stm32_pll_obj *clk_elem = to_pll(clk);
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
u32 reg;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(clk_elem->lock, flags);
- reg = readl(clk_elem->reg);
+ reg = readl_relaxed(clk_elem->reg);
reg &= ~PLL_ON;
- writel(reg, clk_elem->reg);
+ writel_relaxed(reg, clk_elem->reg);
+
+ spin_unlock_irqrestore(clk_elem->lock, flags);
}
-static u32 pll_frac_val(struct clk *clk)
+static u32 pll_frac_val(struct clk_hw *hw)
{
- struct stm32_pll_obj *clk_elem = to_pll(clk);
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
u32 reg, frac = 0;
- reg = readl(clk_elem->reg + FRAC_OFFSET);
+ reg = readl_relaxed(clk_elem->reg + FRAC_OFFSET);
if (reg & FRACLE)
frac = (reg >> FRAC_SHIFT) & FRAC_MASK;
@@ -749,13 +812,12 @@ static u32 pll_frac_val(struct clk *clk)
static unsigned long pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct clk *clk = clk_hw_to_clk(hw);
- struct stm32_pll_obj *clk_elem = to_pll(clk);
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
u32 reg;
u32 frac, divm, divn;
u64 rate, rate_frac = 0;
- reg = readl(clk_elem->reg + 4);
+ reg = readl_relaxed(clk_elem->reg + 4);
divm = ((reg >> DIVM_SHIFT) & DIVM_MASK) + 1;
divn = ((reg >> DIVN_SHIFT) & DIVN_MASK) + 1;
@@ -763,7 +825,7 @@ static unsigned long pll_recalc_rate(struct clk_hw *hw,
do_div(rate, divm);
- frac = pll_frac_val(clk);
+ frac = pll_frac_val(hw);
if (frac) {
rate_frac = (u64)parent_rate * (u64)frac;
do_div(rate_frac, (divm * 8192));
@@ -772,79 +834,89 @@ static unsigned long pll_recalc_rate(struct clk_hw *hw,
return rate + rate_frac;
}
-static int pll_is_enabled(struct clk_hw *hw)
+static int pll_get_parent(struct clk_hw *hw)
{
- struct clk *clk = clk_hw_to_clk(hw);
- int ret;
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ struct clk_hw *mux_hw = &clk_elem->mux.hw;
- ret = __pll_is_enabled(clk);
+ mux_hw->clk = hw->clk;
- return ret;
+ return clk_mux_ops.get_parent(mux_hw);
}
static const struct clk_ops pll_ops = {
.enable = pll_enable,
.disable = pll_disable,
.recalc_rate = pll_recalc_rate,
- .is_enabled = pll_is_enabled,
+ .is_enabled = __pll_is_enabled,
+ .get_parent = pll_get_parent,
};
-static struct clk *clk_register_pll(const char *name,
- const char *parent_name,
- void __iomem *reg,
- unsigned long flags)
+static struct clk_hw *clk_register_pll(struct device_d *dev, const char *name,
+ const char * const *parent_names,
+ int num_parents,
+ void __iomem *reg,
+ void __iomem *mux_reg,
+ unsigned long flags,
+ spinlock_t *lock)
{
struct stm32_pll_obj *element;
- struct clk *clk;
+ struct clk_init_data init;
+ struct clk_hw *hw;
int err;
element = kzalloc(sizeof(*element), GFP_KERNEL);
if (!element)
return ERR_PTR(-ENOMEM);
- element->parent = parent_name;
-
- clk = &element->clk;
+ init.name = name;
+ init.ops = &pll_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
- clk->name = name;
- clk->ops = &pll_ops;
- clk->flags = flags;
- clk->parent_names = &element->parent;
- clk->num_parents = 1;
+ element->mux.lock = lock;
+ element->mux.reg = mux_reg;
+ element->mux.shift = PLL_MUX_SHIFT;
+ element->mux.width = PLL_MUX_WIDTH;
+ element->mux.flags = CLK_MUX_READ_ONLY;
+ element->mux.reg = mux_reg;
+ element->hw.init = &init;
element->reg = reg;
+ element->lock = lock;
- err = bclk_register(clk);
+ hw = &element->hw;
+ err = clk_hw_register(dev, hw);
- if (err) {
- kfree(element);
+ if (err)
return ERR_PTR(err);
- }
- return clk;
+ return hw;
}
/* Kernel Timer */
struct timer_cker {
+ /* lock the kernel output divider register */
+ spinlock_t *lock;
void __iomem *apbdiv;
void __iomem *timpre;
- const char *parent;
- struct clk clk;
+ struct clk_hw hw;
};
-#define to_timer_cker(_hw) container_of(_hw, struct timer_cker, clk)
+#define to_timer_cker(_hw) container_of(_hw, struct timer_cker, hw)
#define APB_DIV_MASK 0x07
#define TIM_PRE_MASK 0x01
-static unsigned long __bestmult(struct clk *clk, unsigned long rate,
+static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct timer_cker *tim_ker = to_timer_cker(clk);
+ struct timer_cker *tim_ker = to_timer_cker(hw);
u32 prescaler;
unsigned int mult = 0;
- prescaler = readl(tim_ker->apbdiv) & APB_DIV_MASK;
+ prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
if (prescaler < 2)
return 1;
@@ -859,8 +931,7 @@ static unsigned long __bestmult(struct clk *clk, unsigned long rate,
static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- struct clk *clk = clk_hw_to_clk(hw);
- unsigned long factor = __bestmult(clk, rate, *parent_rate);
+ unsigned long factor = __bestmult(hw, rate, *parent_rate);
return *parent_rate * factor;
}
@@ -868,23 +939,26 @@ static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct clk *clk = clk_hw_to_clk(hw);
- struct timer_cker *tim_ker = to_timer_cker(clk);
- unsigned long factor = __bestmult(clk, rate, parent_rate);
+ struct timer_cker *tim_ker = to_timer_cker(hw);
+ unsigned long flags = 0;
+ unsigned long factor = __bestmult(hw, rate, parent_rate);
int ret = 0;
+ spin_lock_irqsave(tim_ker->lock, flags);
+
switch (factor) {
case 1:
break;
case 2:
- writel(0, tim_ker->timpre);
+ writel_relaxed(0, tim_ker->timpre);
break;
case 4:
- writel(1, tim_ker->timpre);
+ writel_relaxed(1, tim_ker->timpre);
break;
default:
ret = -EINVAL;
}
+ spin_unlock_irqrestore(tim_ker->lock, flags);
return ret;
}
@@ -892,14 +966,13 @@ static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct clk *clk = clk_hw_to_clk(hw);
- struct timer_cker *tim_ker = to_timer_cker(clk);
+ struct timer_cker *tim_ker = to_timer_cker(hw);
u32 prescaler, timpre;
u32 mul;
- prescaler = readl(tim_ker->apbdiv) & APB_DIV_MASK;
+ prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
- timpre = readl(tim_ker->timpre) & TIM_PRE_MASK;
+ timpre = readl_relaxed(tim_ker->timpre) & TIM_PRE_MASK;
if (!prescaler)
return parent_rate;
@@ -916,51 +989,59 @@ static const struct clk_ops timer_ker_ops = {
};
-static struct clk *clk_register_cktim(const char *name,
+static struct clk_hw *clk_register_cktim(struct device_d *dev, const char *name,
const char *parent_name,
unsigned long flags,
void __iomem *apbdiv,
- void __iomem *timpre)
+ void __iomem *timpre,
+ spinlock_t *lock)
{
struct timer_cker *tim_ker;
- struct clk *clk;
+ struct clk_init_data init;
+ struct clk_hw *hw;
int err;
tim_ker = kzalloc(sizeof(*tim_ker), GFP_KERNEL);
if (!tim_ker)
return ERR_PTR(-ENOMEM);
- clk = &tim_ker->clk;
- tim_ker->parent = parent_name;
- clk->name = name;
- clk->parent_names = &tim_ker->parent;
- clk->num_parents = 1;
- clk->ops = &timer_ker_ops;
- clk->flags = flags;
+ init.name = name;
+ init.ops = &timer_ker_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ tim_ker->hw.init = &init;
+ tim_ker->lock = lock;
tim_ker->apbdiv = apbdiv;
tim_ker->timpre = timpre;
- err = bclk_register(clk);
- if (err) {
- kfree(tim_ker);
+ hw = &tim_ker->hw;
+ err = clk_hw_register(dev, hw);
+
+ if (err)
return ERR_PTR(err);
- }
- return clk;
+ return hw;
}
struct stm32_pll_cfg {
u32 offset;
+ u32 muxoff;
};
-static struct clk *_clk_register_pll(void __iomem *base,
- const struct clock_config *cfg)
+static struct clk_hw *_clk_register_pll(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
{
struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg;
- return clk_register_pll(cfg->name, cfg->parent_name,
- base + stm_pll_cfg->offset, cfg->flags);
+ return clk_register_pll(dev, cfg->name, cfg->parent_names,
+ cfg->num_parents,
+ base + stm_pll_cfg->offset,
+ base + stm_pll_cfg->muxoff,
+ cfg->flags, lock);
}
struct stm32_cktim_cfg {
@@ -968,31 +1049,42 @@ struct stm32_cktim_cfg {
u32 offset_timpre;
};
-static struct clk *_clk_register_cktim(void __iomem *base,
- const struct clock_config *cfg)
+static struct clk_hw *_clk_register_cktim(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
{
struct stm32_cktim_cfg *cktim_cfg = cfg->cfg;
- return clk_register_cktim(cfg->name, cfg->parent_name, cfg->flags,
+ return clk_register_cktim(dev, cfg->name, cfg->parent_name, cfg->flags,
cktim_cfg->offset_apbdiv + base,
- cktim_cfg->offset_timpre + base);
+ cktim_cfg->offset_timpre + base, lock);
}
-static struct clk *
-_clk_stm32_register_gate(void __iomem *base,
+static struct clk_hw *
+_clk_stm32_register_gate(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
- return clk_stm32_register_gate_ops(cfg->name, cfg->parent_name,
- cfg->flags, base, cfg->cfg);
+ return clk_stm32_register_gate_ops(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ base,
+ cfg->cfg,
+ lock);
}
-static struct clk *
-_clk_stm32_register_composite(void __iomem *base,
+static struct clk_hw *
+_clk_stm32_register_composite(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
- return clk_stm32_register_composite(cfg->name, cfg->parent_names,
+ return clk_stm32_register_composite(dev, cfg->name, cfg->parent_names,
cfg->num_parents, base, cfg->cfg,
- cfg->flags);
+ cfg->flags, lock);
}
#define GATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
@@ -1059,14 +1151,16 @@ _clk_stm32_register_composite(void __iomem *base,
.func = _clk_hw_register_mux,\
}
-#define PLL(_id, _name, _parent, _flags, _offset)\
+#define PLL(_id, _name, _parents, _flags, _offset_p, _offset_mux)\
{\
.id = _id,\
.name = _name,\
- .parent_name = _parent,\
- .flags = _flags,\
+ .parent_names = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .flags = CLK_IGNORE_UNUSED | (_flags),\
.cfg = &(struct stm32_pll_cfg) {\
- .offset = _offset,\
+ .offset = _offset_p,\
+ .muxoff = _offset_mux,\
},\
.func = _clk_register_pll,\
}
@@ -1099,13 +1193,14 @@ _clk_stm32_register_composite(void __iomem *base,
.func = _clk_stm32_register_gate,\
}
-#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _ops)\
+#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops)\
(&(struct stm32_gate_cfg) {\
&(struct gate_cfg) {\
.reg_off = _gate_offset,\
.bit_idx = _gate_bit_idx,\
.gate_flags = _gate_flags,\
},\
+ .mgate = _mgate,\
.ops = _ops,\
})
@@ -1114,11 +1209,11 @@ _clk_stm32_register_composite(void __iomem *base,
#define _GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
- NULL)\
+ NULL, NULL)\
#define _GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
- &mp1_gate_clk_ops)\
+ NULL, &mp1_gate_clk_ops)\
#define _MGATE_MP1(_mgate)\
.gate = &per_gate_cfg[_mgate]
@@ -1191,10 +1286,11 @@ _clk_stm32_register_composite(void __iomem *base,
MGATE_MP1(_id, _name, _parent, _flags, _mgate)
#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
- COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\
- _MGATE_MP1(_mgate),\
- _MMUX(_mmux),\
- _NO_DIV)
+ COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\
+ CLK_SET_RATE_NO_REPARENT | _flags,\
+ _MGATE_MP1(_mgate),\
+ _MMUX(_mmux),\
+ _NO_DIV)
enum {
G_SAI1,
@@ -1306,6 +1402,7 @@ enum {
G_CRYP1,
G_HASH1,
G_BKPSRAM,
+ G_DDRPERFM,
G_LAST
};
@@ -1392,6 +1489,7 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_GATE(G_STGENRO, RCC_APB4ENSETR, 20, 0),
K_MGATE(G_USBPHY, RCC_APB4ENSETR, 16, 0),
K_GATE(G_IWDG2, RCC_APB4ENSETR, 15, 0),
+ K_GATE(G_DDRPERFM, RCC_APB4ENSETR, 8, 0),
K_MGATE(G_DSI, RCC_APB4ENSETR, 4, 0),
K_MGATE(G_LTDC, RCC_APB4ENSETR, 0, 0),
@@ -1533,7 +1631,7 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
K_MMUX(M_ETHCK, RCC_ETHCKSELR, 0, 2, 0),
K_MMUX(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
- /* Kernel simple mux */
+ /* Kernel simple mux */
K_MUX(M_RNG2, RCC_RNG2CKSELR, 0, 2, 0),
K_MUX(M_SDMMC3, RCC_SDMMC3CKSELR, 0, 3, 0),
K_MUX(M_FMC, RCC_FMCCKSELR, 0, 2, 0),
@@ -1559,34 +1657,26 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
};
static const struct clock_config stm32mp1_clock_cfg[] = {
- /* Oscillator divider */
- DIV(NO_ID, "clk-hsi-div", "clk-hsi", 0, RCC_HSICFGR, 0, 2,
- CLK_DIVIDER_READ_ONLY),
-
/* External / Internal Oscillators */
GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
- GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0),
- GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
+ /* ck_csi is used by IO compensation and should be critical */
+ GATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL,
+ RCC_OCENSETR, 4, 0),
+ COMPOSITE(CK_HSI, "ck_hsi", PARENT("clk-hsi"), 0,
+ _GATE_MP1(RCC_OCENSETR, 0, 0),
+ _NO_MUX,
+ _DIV(RCC_HSICFGR, 0, 2, CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_READ_ONLY, NULL)),
GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
FIXED_FACTOR(CK_HSE_DIV2, "clk-hse-div2", "ck_hse", 0, 1, 2),
- /* ref clock pll */
- MUX(NO_ID, "ref1", ref12_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK12SELR,
- 0, 2, CLK_MUX_READ_ONLY),
-
- MUX(NO_ID, "ref3", ref3_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK3SELR,
- 0, 2, CLK_MUX_READ_ONLY),
-
- MUX(NO_ID, "ref4", ref4_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK4SELR,
- 0, 2, CLK_MUX_READ_ONLY),
-
/* PLLs */
- PLL(PLL1, "pll1", "ref1", CLK_IGNORE_UNUSED, RCC_PLL1CR),
- PLL(PLL2, "pll2", "ref1", CLK_IGNORE_UNUSED, RCC_PLL2CR),
- PLL(PLL3, "pll3", "ref3", CLK_IGNORE_UNUSED, RCC_PLL3CR),
- PLL(PLL4, "pll4", "ref4", CLK_IGNORE_UNUSED, RCC_PLL4CR),
+ PLL(PLL1, "pll1", ref12_parents, 0, RCC_PLL1CR, RCC_RCK12SELR),
+ PLL(PLL2, "pll2", ref12_parents, 0, RCC_PLL2CR, RCC_RCK12SELR),
+ PLL(PLL3, "pll3", ref3_parents, 0, RCC_PLL3CR, RCC_RCK3SELR),
+ PLL(PLL4, "pll4", ref4_parents, 0, RCC_PLL4CR, RCC_RCK4SELR),
/* ODF */
COMPOSITE(PLL1_P, "pll1_p", PARENT("pll1"), 0,
@@ -1801,6 +1891,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
PCLK(CRC1, "crc1", "ck_axi", 0, G_CRC1),
PCLK(USBH, "usbh", "ck_axi", 0, G_USBH),
PCLK(ETHSTP, "ethstp", "ck_axi", 0, G_ETHSTP),
+ PCLK(DDRPERFM, "ddrperfm", "pclk4", 0, G_DDRPERFM),
/* Kernel clocks */
KCLK(SDMMC1_K, "sdmmc1_k", sdmmc12_src, 0, G_SDMMC1, M_SDMMC12),
@@ -1857,18 +1948,17 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
- COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE,
+ COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_NO_REPARENT,
_NO_GATE,
_MMUX(M_ETHCK),
_DIV(RCC_ETHCKSELR, 4, 4, 0, NULL)),
/* RTC clock */
- DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7, 0),
-
COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE |
- CLK_SET_RATE_PARENT,
- _GATE(RCC_BDCR, 20, 0),
- _MUX(RCC_BDCR, 16, 2, 0),
+ CLK_SET_RATE_PARENT,
+ _NO_GATE,
+ _NO_MUX,
_NO_DIV),
/* MCO clocks */
@@ -1894,16 +1984,76 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
_DIV(RCC_DBGCFGR, 0, 3, 0, ck_trace_div_table)),
};
-struct stm32_clock_match_data {
+static const u32 stm32mp1_clock_secured[] = {
+ CK_HSE,
+ CK_HSI,
+ CK_CSI,
+ CK_LSI,
+ CK_LSE,
+ PLL1,
+ PLL2,
+ PLL1_P,
+ PLL2_P,
+ PLL2_Q,
+ PLL2_R,
+ CK_MPU,
+ CK_AXI,
+ SPI6,
+ I2C4,
+ I2C6,
+ USART1,
+ RTCAPB,
+ TZC1,
+ TZC2,
+ TZPC,
+ IWDG1,
+ BSEC,
+ STGEN,
+ GPIOZ,
+ CRYP1,
+ HASH1,
+ RNG1,
+ BKPSRAM,
+ RNG1_K,
+ STGEN_K,
+ SPI6_K,
+ I2C4_K,
+ I2C6_K,
+ USART1_K,
+ RTC,
+};
+
+static bool stm32_check_security(const struct clock_config *cfg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(stm32mp1_clock_secured); i++)
+ if (cfg->id == stm32mp1_clock_secured[i])
+ return true;
+ return false;
+}
+
+struct stm32_rcc_match_data {
const struct clock_config *cfg;
unsigned int num;
unsigned int maxbinding;
+ u32 clear_offset;
+ bool (*check_security)(const struct clock_config *cfg);
+};
+
+static struct stm32_rcc_match_data stm32mp1_data = {
+ .cfg = stm32mp1_clock_cfg,
+ .num = ARRAY_SIZE(stm32mp1_clock_cfg),
+ .maxbinding = STM32MP1_LAST_CLK,
+ .clear_offset = RCC_CLR,
};
-static struct stm32_clock_match_data stm32mp1_data = {
+static struct stm32_rcc_match_data stm32mp1_data_secure = {
.cfg = stm32mp1_clock_cfg,
.num = ARRAY_SIZE(stm32mp1_clock_cfg),
.maxbinding = STM32MP1_LAST_CLK,
+ .clear_offset = RCC_CLR,
+ .check_security = &stm32_check_security
};
static const struct of_device_id stm32mp1_match_data[] = {
@@ -1911,85 +2061,282 @@ static const struct of_device_id stm32mp1_match_data[] = {
.compatible = "st,stm32mp1-rcc",
.data = &stm32mp1_data,
},
+ {
+ .compatible = "st,stm32mp1-rcc-secure",
+ .data = &stm32mp1_data_secure,
+ },
{ }
};
-static int stm32_register_hw_clk(struct clk_onecell_data *clk_data,
- void __iomem *base,
+static int stm32_register_hw_clk(struct device_d *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
- struct clk *clk = ERR_PTR(-ENOENT);
+ struct clk_hw **hws;
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
+
+ hws = clk_data->hws;
if (cfg->func)
- clk = (*cfg->func)(base, cfg);
+ hw = (*cfg->func)(dev, clk_data, base, lock, cfg);
- if (IS_ERR(clk)) {
+ if (IS_ERR(hw)) {
pr_err("Unable to register %s\n", cfg->name);
- return PTR_ERR(clk);
+ return PTR_ERR(hw);
}
if (cfg->id != NO_ID)
- clk_data->clks[cfg->id] = clk;
+ hws[cfg->id] = hw;
return 0;
}
-static int stm32_rcc_init(struct device_node *np,
- void __iomem *base,
- const struct of_device_id *match_data)
+#define STM32_RESET_ID_MASK GENMASK(15, 0)
+
+struct stm32_reset_data {
+ /* reset lock */
+ spinlock_t lock;
+ struct reset_controller_dev rcdev;
+ void __iomem *membase;
+ u32 clear_offset;
+};
+
+static inline struct stm32_reset_data *
+to_stm32_reset_data(struct reset_controller_dev *rcdev)
{
- struct clk_onecell_data *clk_data;
- struct clk **clks;
- const struct of_device_id *match;
- const struct stm32_clock_match_data *data;
- int err, n, max_binding;
+ return container_of(rcdev, struct stm32_reset_data, rcdev);
+}
- match = of_match_node(match_data, np);
- if (!match) {
- pr_err("%s: match data not found\n", __func__);
- return -ENODEV;
+static int stm32_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+
+ if (data->clear_offset) {
+ void __iomem *addr;
+
+ addr = data->membase + (bank * reg_width);
+ if (!assert)
+ addr += data->clear_offset;
+
+ writel(BIT(offset), addr);
+
+ } else {
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * reg_width));
+
+ if (assert)
+ reg |= BIT(offset);
+ else
+ reg &= ~BIT(offset);
+
+ writel(reg, data->membase + (bank * reg_width));
+
+ spin_unlock_irqrestore(&data->lock, flags);
}
- data = match->data;
+ return 0;
+}
+
+static int stm32_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return stm32_reset_update(rcdev, id, true);
+}
+
+static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return stm32_reset_update(rcdev, id, false);
+}
+
+static int stm32_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+ u32 reg;
+
+ reg = readl(data->membase + (bank * reg_width));
- max_binding = data->maxbinding;
+ return !!(reg & BIT(offset));
+}
- clk_data = xzalloc(sizeof(*clk_data));
- clk_data->clks = xzalloc(sizeof(void *) * max_binding);
- clk_data->clk_num = max_binding;
+static const struct reset_control_ops stm32_reset_ops = {
+ .assert = stm32_reset_assert,
+ .deassert = stm32_reset_deassert,
+ .status = stm32_reset_status,
+};
- clks = clk_data->clks;
+static int stm32_rcc_reset_init(struct device_d *dev, void __iomem *base,
+ const struct of_device_id *match)
+{
+ const struct stm32_rcc_match_data *data = match->data;
+ struct stm32_reset_data *reset_data = NULL;
+
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+ if (!reset_data)
+ return -ENOMEM;
+
+ reset_data->membase = base;
+ reset_data->rcdev.ops = &stm32_reset_ops;
+ reset_data->rcdev.of_node = dev_of_node(dev);
+ reset_data->rcdev.nr_resets = STM32_RESET_ID_MASK;
+ reset_data->clear_offset = data->clear_offset;
+
+ return reset_controller_register(&reset_data->rcdev);
+}
+
+static int stm32_rcc_clock_init(struct device_d *dev, void __iomem *base,
+ const struct of_device_id *match)
+{
+ const struct stm32_rcc_match_data *data = match->data;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **hws;
+ int err, n, max_binding;
+
+ max_binding = data->maxbinding;
+
+ clk_data = kzalloc(struct_size(clk_data, hws, max_binding),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = max_binding;
+
+ hws = clk_data->hws;
for (n = 0; n < max_binding; n++)
- clks[n] = ERR_PTR(-ENOENT);
+ hws[n] = ERR_PTR(-ENOENT);
for (n = 0; n < data->num; n++) {
- err = stm32_register_hw_clk(clk_data, base,
+ if (data->check_security && data->check_security(&data->cfg[n]))
+ continue;
+
+ err = stm32_register_hw_clk(dev, clk_data, base, &rlock,
&data->cfg[n]);
if (err) {
- pr_err("%s: can't register %s\n", __func__,
- data->cfg[n].name);
-
- kfree(clk_data);
+ dev_err(dev, "Can't register clk %s: %d\n",
+ data->cfg[n].name, err);
return err;
}
}
- return of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_hw_provider(dev_of_node(dev), of_clk_hw_onecell_get, clk_data);
}
-static void stm32mp1_rcc_init(struct device_node *np)
+static int stm32_rcc_init(struct device_d *dev, void __iomem *base,
+ const struct of_device_id *match_data)
+{
+ const struct of_device_id *match;
+ int err;
+
+ match = of_match_node(match_data, dev_of_node(dev));
+ if (!match) {
+ dev_err(dev, "match data not found\n");
+ return -ENODEV;
+ }
+
+ /* RCC Reset Configuration */
+ err = stm32_rcc_reset_init(dev, base, match);
+ if (err) {
+ pr_err("stm32mp1 reset failed to initialize\n");
+ return err;
+ }
+
+ /* RCC Clock Configuration */
+ err = stm32_rcc_clock_init(dev, base, match);
+ if (err) {
+ pr_err("stm32mp1 clock failed to initialize\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int stm32mp1_rcc_init(struct device_d *dev)
{
void __iomem *base;
+ int ret;
- base = of_iomap(np, 0);
+ base = of_iomap(dev_of_node(dev), 0);
if (!base) {
- pr_err("%pOFn: unable to map resource", np);
- return;
+ dev_err(dev, "unable to map resource\n");
+ return -ENOMEM;
}
- stm32_rcc_init(np, base, stm32mp1_match_data);
+ ret = stm32_rcc_init(dev, base, stm32mp1_match_data);
+ if (ret)
+ return ret;
+
+ stm32mp_system_restart_init(base);
+ return 0;
+}
+
+static int get_clock_deps(struct device_d *dev)
+{
+ static const char * const clock_deps_name[] = {
+ "hsi", "hse", "csi", "lsi", "lse",
+ };
+ size_t deps_size = sizeof(struct clk *) * ARRAY_SIZE(clock_deps_name);
+ struct clk **clk_deps;
+ int i;
+
+ clk_deps = kzalloc(deps_size, GFP_KERNEL);
+ if (!clk_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(clock_deps_name); i++) {
+ struct clk *clk = of_clk_get_by_name(dev_of_node(dev),
+ clock_deps_name[i]);
+
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EINVAL && PTR_ERR(clk) != -ENOENT)
+ return PTR_ERR(clk);
+ } else {
+ /* Device gets a reference count on the clock */
+ clk_deps[i] = clk_get(dev, __clk_get_name(clk));
+ clk_put(clk);
+ }
+ }
+
+ return 0;
+}
+
+static int stm32mp1_rcc_clocks_probe(struct device_d *dev)
+{
+ int ret = get_clock_deps(dev);
+
+ if (!ret)
+ ret = stm32mp1_rcc_init(dev);
+
+ return ret;
+}
+
+static void stm32mp1_rcc_clocks_remove(struct device_d *dev)
+{
+ struct device_node *child, *np = dev_of_node(dev);
+
+ for_each_available_child_of_node(np, child)
+ of_clk_del_provider(child);
}
-CLK_OF_DECLARE_DRIVER(stm32mp1_rcc, "st,stm32mp1-rcc", stm32mp1_rcc_init);
+static struct driver_d stm32mp1_rcc_clocks_driver = {
+ .name = "stm32mp1_rcc",
+ .of_compatible = stm32mp1_match_data,
+ .probe = stm32mp1_rcc_clocks_probe,
+ .remove = stm32mp1_rcc_clocks_remove,
+};
+
+core_platform_driver(stm32mp1_rcc_clocks_driver);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 227ab75ed6..8e317b4b05 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -578,6 +578,12 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
+struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
+{
+ return data;
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
+
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
{
struct clk_onecell_data *clk_data = data;
@@ -592,6 +598,22 @@ struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
}
EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
+struct clk_hw *
+of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct clk_hw_onecell_data *hw_data = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= hw_data->num) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return hw_data->hws[idx];
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
+
+
static int __of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
void *data),
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index ffba7146f7..d3cca41a7e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -22,4 +22,14 @@ config FIRMWARE_ZYNQMP_FPGA
select FIRMWARE
help
Load a bitstream to the PL of Zynq Ultrascale+
+
+config ARM_SCMI_PROTOCOL
+ tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
+ depends on ARM || COMPILE_TEST
+ depends on ARM_SMCCC
+ help
+ ARM System Control and Management Interface (SCMI) protocol is a
+ set of operating system-independent software interfaces that are
+ used in system management.
+
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index d16469cf72..26d6f3275a 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_FIRMWARE_ALTERA_SERIAL) += altera_serial.o
obj-$(CONFIG_FIRMWARE_ALTERA_SOCFPGA) += socfpga.o socfpga_sdr.o
obj-$(CONFIG_FIRMWARE_ZYNQMP_FPGA) += zynqmp-fpga.o
+obj-y += arm_scmi/
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
new file mode 100644
index 0000000000..4b21e6609a
--- /dev/null
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+scmi-bus-y = bus.o
+scmi-driver-y = driver.o
+scmi-transport-y = shmem.o
+scmi-transport-$(CONFIG_ARM_SMCCC) += smc.o
+scmi-protocols-y = base.o reset.o clock.o voltage.o
+
+scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
+ $(scmi-transport-y)
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
new file mode 100644
index 0000000000..d4af40c40c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/base.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Base Protocol
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI BASE - " fmt
+
+#include <common.h>
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+
+#define SCMI_BASE_NUM_SOURCES 1
+#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024
+
+struct scmi_msg_resp_base_attributes {
+ u8 num_protocols;
+ u8 num_agents;
+ __le16 reserved;
+};
+
+enum scmi_base_protocol_cmd {
+ BASE_DISCOVER_VENDOR = 0x3,
+ BASE_DISCOVER_SUB_VENDOR = 0x4,
+ BASE_DISCOVER_IMPLEMENT_VERSION = 0x5,
+ BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
+ BASE_DISCOVER_AGENT = 0x7,
+ BASE_NOTIFY_ERRORS = 0x8,
+ BASE_SET_DEVICE_PERMISSIONS = 0x9,
+ BASE_SET_PROTOCOL_PERMISSIONS = 0xa,
+ BASE_RESET_AGENT_CONFIGURATION = 0xb,
+};
+
+/**
+ * scmi_base_attributes_get() - gets the implementation details
+ * that are associated with the base protocol.
+ *
+ * @ph: SCMI protocol handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_attributes_get(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_base_attributes *attr_info;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr_info), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ attr_info = t->rx.buf;
+ rev->num_protocols = attr_info->num_protocols;
+ rev->num_agents = attr_info->num_agents;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
+ *
+ * @ph: SCMI protocol handle
+ * @sub_vendor: specify true if sub-vendor ID is needed
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor)
+{
+ u8 cmd;
+ int ret, size;
+ char *vendor_id;
+ struct scmi_xfer *t;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
+
+ if (sub_vendor) {
+ cmd = BASE_DISCOVER_SUB_VENDOR;
+ vendor_id = rev->sub_vendor_id;
+ size = ARRAY_SIZE(rev->sub_vendor_id);
+ } else {
+ cmd = BASE_DISCOVER_VENDOR;
+ vendor_id = rev->vendor_id;
+ size = ARRAY_SIZE(rev->vendor_id);
+ }
+
+ ret = ph->xops->xfer_get_init(ph, cmd, 0, size, &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ memcpy(vendor_id, t->rx.buf, size);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_implementation_version_get() - gets a vendor-specific
+ * implementation 32-bit version. The format of the version number is
+ * vendor-specific
+ *
+ * @ph: SCMI protocol handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_implementation_version_get(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ __le32 *impl_ver;
+ struct scmi_xfer *t;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_IMPLEMENT_VERSION,
+ 0, sizeof(*impl_ver), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ impl_ver = t->rx.buf;
+ rev->impl_ver = le32_to_cpu(*impl_ver);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_implementation_list_get() - gets the list of protocols it is
+ * OSPM is allowed to access
+ *
+ * @ph: SCMI protocol handle
+ * @protocols_imp: pointer to hold the list of protocol identifiers
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
+ u8 *protocols_imp)
+{
+ u8 *list;
+ int ret, loop;
+ struct scmi_xfer *t;
+ __le32 *num_skip, *num_ret;
+ u32 tot_num_ret = 0, loop_num_ret;
+ struct device_d *dev = ph->dev;
+
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS,
+ sizeof(*num_skip), 0, &t);
+ if (ret)
+ return ret;
+
+ num_skip = t->tx.buf;
+ num_ret = t->rx.buf;
+ list = t->rx.buf + sizeof(*num_ret);
+
+ do {
+ /* Set the number of protocols to be skipped/already read */
+ *num_skip = cpu_to_le32(tot_num_ret);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (ret)
+ break;
+
+ loop_num_ret = le32_to_cpu(*num_ret);
+ if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
+ dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
+ break;
+ }
+
+ for (loop = 0; loop < loop_num_ret; loop++)
+ protocols_imp[tot_num_ret + loop] = *(list + loop);
+
+ tot_num_ret += loop_num_ret;
+
+ ph->xops->reset_rx_to_maxsz(ph, t);
+ } while (loop_num_ret);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_discover_agent_get() - discover the name of an agent
+ *
+ * @ph: SCMI protocol handle
+ * @id: Agent identifier
+ * @name: Agent identifier ASCII string
+ *
+ * An agent id of 0 is reserved to identify the platform itself.
+ * Generally operating system is represented as "OSPM"
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
+ int id, char *name)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
+ sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(id, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int id, ret;
+ u8 *prot_imp;
+ u32 version;
+ char name[SCMI_MAX_STR_SIZE];
+ struct device_d *dev = ph->dev;
+ struct scmi_revision_info *rev = scmi_revision_area_get(ph);
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ prot_imp = kcalloc(MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL);
+ if (!prot_imp)
+ return -ENOMEM;
+
+ rev->major_ver = PROTOCOL_REV_MAJOR(version),
+ rev->minor_ver = PROTOCOL_REV_MINOR(version);
+ ph->set_priv(ph, rev);
+
+ scmi_base_attributes_get(ph);
+ scmi_base_vendor_id_get(ph, false);
+ scmi_base_vendor_id_get(ph, true);
+ scmi_base_implementation_version_get(ph);
+ scmi_base_implementation_list_get(ph, prot_imp);
+
+ scmi_setup_protocol_implemented(ph, prot_imp);
+
+ dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
+ rev->major_ver, rev->minor_ver, rev->vendor_id,
+ rev->sub_vendor_id, rev->impl_ver);
+ dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
+ rev->num_agents);
+
+ for (id = 0; id < rev->num_agents; id++) {
+ scmi_base_discover_agent_get(ph, id, name);
+ dev_dbg(dev, "Agent %d: %s\n", id, name);
+ }
+
+ return 0;
+}
+
+static const struct scmi_protocol scmi_base = {
+ .id = SCMI_PROTOCOL_BASE,
+ .instance_init = &scmi_base_protocol_init,
+ .ops = NULL,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER(base, scmi_base)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
new file mode 100644
index 0000000000..e8297be4d2
--- /dev/null
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol bus layer
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <common.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <driver.h>
+
+#include "common.h"
+
+static DEFINE_IDR(scmi_protocols);
+
+static const struct scmi_device_id *
+scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
+{
+ const struct scmi_device_id *id = scmi_drv->id_table;
+
+ if (!id)
+ return NULL;
+
+ for (; id->protocol_id; id++)
+ if (id->protocol_id == scmi_dev->protocol_id) {
+ if (!id->name)
+ return id;
+ else if (!strcmp(id->name, scmi_dev->name))
+ return id;
+ }
+
+ return NULL;
+}
+
+static int scmi_dev_match(struct device_d *dev, struct driver_d *drv)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(drv);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+ const struct scmi_device_id *id;
+
+ id = scmi_dev_match_id(scmi_dev, scmi_drv);
+ if (id)
+ return 0;
+
+ return -1;
+}
+
+static int scmi_match_by_id_table(struct device_d *dev, void *data)
+{
+ struct scmi_device *sdev = to_scmi_dev(dev);
+ struct scmi_device_id *id_table = data;
+
+ return sdev->protocol_id == id_table->protocol_id &&
+ !strcmp(sdev->name, id_table->name);
+}
+
+struct scmi_device *scmi_child_dev_find(struct device_d *parent,
+ int prot_id, const char *name)
+{
+ struct scmi_device_id id_table;
+ struct device_d *dev;
+
+ id_table.protocol_id = prot_id;
+ id_table.name = name;
+
+ dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
+ if (!dev)
+ return NULL;
+
+ return to_scmi_dev(dev);
+}
+
+const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (!proto) {
+ pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+ return NULL;
+ }
+
+ pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+
+ return proto;
+}
+
+static int scmi_dev_probe(struct device_d *dev)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+ const struct scmi_device_id *id;
+
+ id = scmi_dev_match_id(scmi_dev, scmi_drv);
+ if (!id)
+ return -ENODEV;
+
+ if (!scmi_dev->handle)
+ return -EPROBE_DEFER;
+
+ return scmi_drv->probe(scmi_dev);
+}
+
+static void scmi_dev_remove(struct device_d *dev)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ if (scmi_drv->remove)
+ scmi_drv->remove(scmi_dev);
+}
+
+static struct bus_type scmi_bus_type = {
+ .name = "scmi_protocol",
+ .match = scmi_dev_match,
+ .probe = scmi_dev_probe,
+ .remove = scmi_dev_remove,
+};
+
+int scmi_driver_register(struct scmi_driver *driver)
+{
+ int retval;
+
+ retval = scmi_protocol_device_request(driver->id_table);
+ if (retval)
+ return retval;
+
+ driver->driver.bus = &scmi_bus_type;
+ driver->driver.name = driver->name;
+
+ retval = register_driver(&driver->driver);
+ if (!retval)
+ pr_debug("registered new scmi driver %s\n", driver->name);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(scmi_driver_register);
+
+struct scmi_device *
+scmi_device_alloc(struct device_node *np, struct device_d *parent, int protocol,
+ const char *name)
+{
+ struct scmi_device *scmi_dev;
+
+ scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
+ if (!scmi_dev)
+ return NULL;
+
+ scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL);
+ if (!scmi_dev->name) {
+ kfree(scmi_dev);
+ return NULL;
+ }
+
+ scmi_dev->dev.id = DEVICE_ID_DYNAMIC;
+ scmi_dev->protocol_id = protocol;
+ scmi_dev->dev.parent = parent;
+ scmi_dev->dev.device_node = np;
+ scmi_dev->dev.bus = &scmi_bus_type;
+ dev_set_name(&scmi_dev->dev, "scmi_dev");
+
+ return scmi_dev;
+}
+
+void scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+ kfree_const(scmi_dev->name);
+ unregister_device(&scmi_dev->dev);
+}
+
+void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+ scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+}
+
+int scmi_protocol_register(const struct scmi_protocol *proto)
+{
+ int ret;
+
+ if (!proto) {
+ pr_err("invalid protocol\n");
+ return -EINVAL;
+ }
+
+ if (!proto->instance_init) {
+ pr_err("missing init for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ ret = idr_alloc_one(&scmi_protocols, (void *)proto, proto->id);
+ if (ret != proto->id) {
+ pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ proto->id, ret);
+ return ret;
+ }
+
+ pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(const struct scmi_protocol *proto)
+{
+ idr_remove(&scmi_protocols, proto->id);
+
+ pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+int __init scmi_bus_init(void)
+{
+ int retval;
+
+ retval = bus_register(&scmi_bus_type);
+ if (retval)
+ pr_err("scmi protocol bus register failed (%d)\n", retval);
+
+ return retval;
+}
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
new file mode 100644
index 0000000000..8f9017206c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Clock Protocol
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#include <common.h>
+#include <qsort.h>
+
+#include "common.h"
+
+enum scmi_clock_protocol_cmd {
+ CLOCK_ATTRIBUTES = 0x3,
+ CLOCK_DESCRIBE_RATES = 0x4,
+ CLOCK_RATE_SET = 0x5,
+ CLOCK_RATE_GET = 0x6,
+ CLOCK_CONFIG_SET = 0x7,
+};
+
+struct scmi_msg_resp_clock_protocol_attributes {
+ __le16 num_clocks;
+ u8 max_async_req;
+ u8 reserved;
+};
+
+struct scmi_msg_resp_clock_attributes {
+ __le32 attributes;
+#define CLOCK_ENABLE BIT(0)
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_clock_set_config {
+ __le32 id;
+ __le32 attributes;
+};
+
+struct scmi_msg_clock_describe_rates {
+ __le32 id;
+ __le32 rate_index;
+};
+
+struct scmi_msg_resp_clock_describe_rates {
+ __le32 num_rates_flags;
+#define NUM_RETURNED(x) ((x) & 0xfff)
+#define RATE_DISCRETE(x) !((x) & BIT(12))
+#define NUM_REMAINING(x) ((x) >> 16)
+ struct {
+ __le32 value_low;
+ __le32 value_high;
+ } rate[0];
+#define RATE_TO_U64(X) \
+({ \
+ typeof(X) x = (X); \
+ le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
+})
+};
+
+struct scmi_clock_set_rate {
+ __le32 flags;
+#define CLOCK_SET_ASYNC BIT(0)
+#define CLOCK_SET_IGNORE_RESP BIT(1)
+#define CLOCK_SET_ROUND_UP BIT(2)
+#define CLOCK_SET_ROUND_AUTO BIT(3)
+ __le32 id;
+ __le32 value_low;
+ __le32 value_high;
+};
+
+struct clock_info {
+ u32 version;
+ int num_clocks;
+ int max_async_req;
+ unsigned cur_async_req;
+ struct scmi_clock_info *clk;
+};
+
+static int
+scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct clock_info *ci)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_clock_protocol_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ ci->num_clocks = le16_to_cpu(attr->num_clocks);
+ ci->max_async_req = attr->max_async_req;
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 clk_id, struct scmi_clock_info *clk)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_clock_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
+ sizeof(clk_id), sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
+ else
+ clk->name[0] = '\0';
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int rate_cmp_func(const void *_r1, const void *_r2)
+{
+ const u64 *r1 = _r1, *r2 = _r2;
+
+ if (*r1 < *r2)
+ return -1;
+ else if (*r1 == *r2)
+ return 0;
+ else
+ return 1;
+}
+
+static int
+scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
+ struct scmi_clock_info *clk)
+{
+ u64 *rate = NULL;
+ int ret, cnt;
+ bool rate_discrete = false;
+ u32 tot_rate_cnt = 0, rates_flag;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *t;
+ struct scmi_msg_clock_describe_rates *clk_desc;
+ struct scmi_msg_resp_clock_describe_rates *rlist;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
+ sizeof(*clk_desc), 0, &t);
+ if (ret)
+ return ret;
+
+ clk_desc = t->tx.buf;
+ rlist = t->rx.buf;
+
+ do {
+ clk_desc->id = cpu_to_le32(clk_id);
+ /* Set the number of rates to be skipped/already read */
+ clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (ret)
+ goto err;
+
+ rates_flag = le32_to_cpu(rlist->num_rates_flags);
+ num_remaining = NUM_REMAINING(rates_flag);
+ rate_discrete = RATE_DISCRETE(rates_flag);
+ num_returned = NUM_RETURNED(rates_flag);
+
+ if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
+ dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
+ break;
+ }
+
+ if (!rate_discrete) {
+ clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
+ clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
+ clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
+ dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
+ clk->range.min_rate, clk->range.max_rate,
+ clk->range.step_size);
+ break;
+ }
+
+ rate = &clk->list.rates[tot_rate_cnt];
+ for (cnt = 0; cnt < num_returned; cnt++, rate++) {
+ *rate = RATE_TO_U64(rlist->rate[cnt]);
+ dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
+ }
+
+ tot_rate_cnt += num_returned;
+
+ ph->xops->reset_rx_to_maxsz(ph, t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ if (rate_discrete && rate) {
+ clk->list.num_rates = tot_rate_cnt;
+ qsort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func);
+ }
+
+ clk->rate_discrete = rate_discrete;
+
+err:
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 *value)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
+ sizeof(__le32), sizeof(u64), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 rate)
+{
+ int ret;
+ u32 flags = 0;
+ struct scmi_xfer *t;
+ struct scmi_clock_set_rate *cfg;
+ struct clock_info *ci = ph->get_priv(ph);
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ if (ci->max_async_req &&
+ ci->cur_async_req++ < ci->max_async_req)
+ flags |= CLOCK_SET_ASYNC;
+
+ cfg = t->tx.buf;
+ cfg->flags = cpu_to_le32(flags);
+ cfg->id = cpu_to_le32(clk_id);
+ cfg->value_low = cpu_to_le32(rate & 0xffffffff);
+ cfg->value_high = cpu_to_le32(rate >> 32);
+
+ if (flags & CLOCK_SET_ASYNC)
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ else
+ ret = ph->xops->do_xfer(ph, t);
+
+ if (ci->max_async_req)
+ ci->cur_async_req--;
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u32 config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_clock_set_config *cfg;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
+ sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(clk_id);
+ cfg->attributes = cpu_to_le32(config);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
+}
+
+static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, 0);
+}
+
+static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
+{
+ struct clock_info *ci = ph->get_priv(ph);
+
+ return ci->num_clocks;
+}
+
+static const struct scmi_clock_info *
+scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk = ci->clk + clk_id;
+
+ if (!clk->name[0])
+ return NULL;
+
+ return clk;
+}
+
+static const struct scmi_clk_proto_ops clk_proto_ops = {
+ .count_get = scmi_clock_count_get,
+ .info_get = scmi_clock_info_get,
+ .rate_get = scmi_clock_rate_get,
+ .rate_set = scmi_clock_rate_set,
+ .enable = scmi_clock_enable,
+ .disable = scmi_clock_disable,
+};
+
+static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ u32 version;
+ int clkid, ret;
+ struct clock_info *cinfo;
+
+ ph->xops->version_get(ph, &version);
+
+ dev_dbg(ph->dev, "Clock Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ cinfo = kzalloc(sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ scmi_clock_protocol_attributes_get(ph, cinfo);
+
+ cinfo->clk = kcalloc(cinfo->num_clocks, sizeof(*cinfo->clk), GFP_KERNEL);
+ if (!cinfo->clk)
+ return -ENOMEM;
+
+ for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
+ struct scmi_clock_info *clk = cinfo->clk + clkid;
+
+ ret = scmi_clock_attributes_get(ph, clkid, clk);
+ if (!ret)
+ scmi_clock_describe_rates_get(ph, clkid, clk);
+ }
+
+ cinfo->version = version;
+ return ph->set_priv(ph, cinfo);
+}
+
+static const struct scmi_protocol scmi_clock = {
+ .id = SCMI_PROTOCOL_CLOCK,
+ .instance_init = &scmi_clock_protocol_init,
+ .ops = &clk_proto_ops,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER(clock, scmi_clock)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
new file mode 100644
index 0000000000..5004a71dc9
--- /dev/null
+++ b/drivers/firmware/arm_scmi/common.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * driver common header file containing some definitions, structures
+ * and function prototypes used in all the different SCMI protocols.
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+#ifndef _SCMI_COMMON_H
+#define _SCMI_COMMON_H
+
+#include <linux/bitfield.h>
+#include <driver.h>
+#include <errno.h>
+#include <linux/kernel.h>
+#include <linux/scmi_protocol.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+
+#include <asm/unaligned.h>
+
+#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
+#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
+#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
+#define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
+#define MAX_PROTOCOLS_IMP 16
+#define MAX_OPPS 16
+
+enum scmi_common_cmd {
+ PROTOCOL_VERSION = 0x0,
+ PROTOCOL_ATTRIBUTES = 0x1,
+ PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+};
+
+/**
+ * struct scmi_msg_resp_prot_version - Response for a message
+ *
+ * @minor_version: Minor version of the ABI that firmware supports
+ * @major_version: Major version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type SCMI_MSG_VERSION
+ */
+struct scmi_msg_resp_prot_version {
+ __le16 minor_version;
+ __le16 major_version;
+};
+
+#define MSG_ID_MASK GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
+#define MSG_TYPE_MASK GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND 0
+#define MSG_TYPE_DELAYED_RESP 2
+#define MSG_TYPE_NOTIFICATION 3
+#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
+#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
+#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
+#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+
+/**
+ * struct scmi_msg_hdr - Message(Tx/Rx) header
+ *
+ * @id: The identifier of the message being sent
+ * @protocol_id: The identifier of the protocol used to send @id message
+ * @seq: The token to identify the message. When a message returns, the
+ * platform returns the whole message header unmodified including the
+ * token
+ * @status: Status of the transfer once it's complete
+ * @poll_completion: Indicate if the transfer needs to be polled for
+ * completion or interrupt mode is used
+ */
+struct scmi_msg_hdr {
+ u8 id;
+ u8 protocol_id;
+ u16 seq;
+ u32 status;
+ bool poll_completion;
+};
+
+/**
+ * pack_scmi_header() - packs and returns 32-bit header
+ *
+ * @hdr: pointer to header containing all the information on message id,
+ * protocol id and sequence id.
+ *
+ * Return: 32-bit packed message header to be sent to the platform.
+ */
+static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
+{
+ return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+ FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
+ FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
+}
+
+/**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+ hdr->id = MSG_XTRACT_ID(msg_hdr);
+ hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+}
+
+/**
+ * struct scmi_msg - Message(Tx/Rx) structure
+ *
+ * @buf: Buffer pointer
+ * @len: Length of data in the Buffer
+ */
+struct scmi_msg {
+ void *buf;
+ size_t len;
+};
+
+/**
+ * struct scmi_xfer - Structure representing a message flow
+ *
+ * @transfer_id: Unique ID for debug & profiling purpose
+ * @hdr: Transmit message header
+ * @tx: Transmit message
+ * @rx: Receive message, the buffer should be pre-allocated to store
+ * message. If request-ACK protocol is used, we can reuse the same
+ * buffer for the rx path as we use for the tx path.
+ * @done: command message transmit completion event
+ * @async_done: pointer to delayed response message received event completion
+ */
+struct scmi_xfer {
+ int transfer_id;
+ struct scmi_msg_hdr hdr;
+ struct scmi_msg tx;
+ struct scmi_msg rx;
+ bool done;
+ bool *async_done;
+};
+
+struct scmi_xfer_ops;
+
+/**
+ * struct scmi_protocol_handle - Reference to an initialized protocol instance
+ *
+ * @dev: A reference to the associated SCMI instance device (handle->dev).
+ * @xops: A reference to a struct holding refs to the core xfer operations that
+ * can be used by the protocol implementation to generate SCMI messages.
+ * @set_priv: A method to set protocol private data for this instance.
+ * @get_priv: A method to get protocol private data previously set.
+ *
+ * This structure represents a protocol initialized against specific SCMI
+ * instance and it will be used as follows:
+ * - as a parameter fed from the core to the protocol initialization code so
+ * that it can access the core xfer operations to build and generate SCMI
+ * messages exclusively for the specific underlying protocol instance.
+ * - as an opaque handle fed by an SCMI driver user when it tries to access
+ * this protocol through its own protocol operations.
+ * In this case this handle will be returned as an opaque object together
+ * with the related protocol operations when the SCMI driver tries to access
+ * the protocol.
+ */
+struct scmi_protocol_handle {
+ struct device_d *dev;
+ const struct scmi_xfer_ops *xops;
+ int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
+ void *(*get_priv)(const struct scmi_protocol_handle *ph);
+};
+
+/**
+ * struct scmi_xfer_ops - References to the core SCMI xfer operations.
+ * @version_get: Get this version protocol.
+ * @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
+ * @reset_rx_to_maxsz: Reset rx size to max transport size.
+ * @do_xfer: Do the SCMI transfer.
+ * @do_xfer_with_response: Do the SCMI transfer waiting for a response.
+ * @xfer_put: Free the xfer slot.
+ *
+ * Note that all this operations expect a protocol handle as first parameter;
+ * they then internally use it to infer the underlying protocol number: this
+ * way is not possible for a protocol implementation to forge messages for
+ * another protocol.
+ */
+struct scmi_xfer_ops {
+ int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
+ int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
+ size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p);
+ void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ void (*xfer_put)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+};
+
+struct scmi_revision_info *
+scmi_revision_area_get(const struct scmi_protocol_handle *ph);
+int scmi_handle_put(const struct scmi_handle *handle);
+struct scmi_handle *scmi_handle_get(struct device_d *dev);
+void scmi_set_handle(struct scmi_device *scmi_dev);
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
+ u8 *prot_imp);
+
+typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
+
+/**
+ * struct scmi_protocol - Protocol descriptor
+ * @id: Protocol ID.
+ * @instance_init: Mandatory protocol initialization function.
+ * @instance_deinit: Optional protocol de-initialization function.
+ * @ops: Optional reference to the operations provided by the protocol and
+ * exposed in scmi_protocol.h.
+ * @events: An optional reference to the events supported by this protocol.
+ */
+struct scmi_protocol {
+ const u8 id;
+ const scmi_prot_init_ph_fn_t instance_init;
+ const scmi_prot_init_ph_fn_t instance_deinit;
+ const void *ops;
+ const struct scmi_protocol_events *events;
+};
+
+int __init scmi_bus_init(void);
+void __exit scmi_bus_exit(void);
+
+#define DECLARE_SCMI_REGISTER(func) \
+ int __init scmi_##func##_register(void);
+DECLARE_SCMI_REGISTER(base);
+DECLARE_SCMI_REGISTER(reset);
+DECLARE_SCMI_REGISTER(clock);
+DECLARE_SCMI_REGISTER(voltage);
+
+#define DEFINE_SCMI_PROTOCOL_REGISTER(name, proto) \
+static const struct scmi_protocol *__this_proto = &(proto); \
+ \
+int __init scmi_##name##_register(void) \
+{ \
+ return scmi_protocol_register(__this_proto); \
+}
+
+const struct scmi_protocol *scmi_protocol_get(int protocol_id);
+
+int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
+void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
+
+/* SCMI Transport */
+/**
+ * struct scmi_chan_info - Structure representing a SCMI channel information
+ *
+ * @dev: Reference to device in the SCMI hierarchy corresponding to this
+ * channel
+ * @handle: Pointer to SCMI entity handle
+ * @transport_info: Transport layer related information
+ */
+struct scmi_chan_info {
+ struct device_d *dev;
+ struct scmi_handle *handle;
+ void *transport_info;
+};
+
+/**
+ * struct scmi_transport_ops - Structure representing a SCMI transport ops
+ *
+ * @chan_available: Callback to check if channel is available or not
+ * @chan_setup: Callback to allocate and setup a channel
+ * @chan_free: Callback to free a channel
+ * @send_message: Callback to send a message
+ * @mark_txdone: Callback to mark tx as done
+ * @fetch_response: Callback to fetch response
+ * @clear_channel: Callback to clear a channel
+ * @poll_done: Callback to poll transfer status
+ */
+struct scmi_transport_ops {
+ bool (*chan_available)(struct device_d *dev, int idx);
+ int (*chan_setup)(struct scmi_chan_info *cinfo, struct device_d *dev,
+ bool tx);
+ int (*chan_free)(int id, void *p, void *data);
+ int (*send_message)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
+ void (*fetch_response)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ void (*clear_channel)(struct scmi_chan_info *cinfo);
+ bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
+};
+
+int scmi_protocol_device_request(const struct scmi_device_id *id_table);
+void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table);
+struct scmi_device *scmi_child_dev_find(struct device_d *parent,
+ int prot_id, const char *name);
+
+/**
+ * struct scmi_desc - Description of SoC integration
+ *
+ * @ops: Pointer to the transport specific ops structure
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msg: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct scmi_desc {
+ const struct scmi_transport_ops *ops;
+ int max_rx_timeout_ms;
+ int max_msg;
+ int max_msg_size;
+};
+
+#ifdef CONFIG_ARM_SMCCC
+extern const struct scmi_desc scmi_smc_desc;
+#endif
+
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
+
+/* shmem related declarations */
+struct scmi_shared_mem;
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+
+#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
new file mode 100644
index 0000000000..ef3d76b3f4
--- /dev/null
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -0,0 +1,1279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol driver
+ *
+ * SCMI Message Protocol is used between the System Control Processor(SCP)
+ * and the Application Processors(AP). The Message Handling Unit(MHU)
+ * provides a mechanism for inter-processor communication between SCP's
+ * Cortex M3 and AP.
+ *
+ * SCP offers control and management of the core/cluster power states,
+ * various power domain DVFS including the core/cluster, certain system
+ * clocks configuration, thermal sensors and many others.
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI DRIVER - " fmt
+
+#include <common.h>
+#include <linux/bitmap.h>
+#include <driver.h>
+#include <linux/export.h>
+#include <io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <of_address.h>
+#include <of_device.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/processor.h>
+
+#include "common.h"
+
+enum scmi_error_codes {
+ SCMI_SUCCESS = 0, /* Success */
+ SCMI_ERR_SUPPORT = -1, /* Not supported */
+ SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
+ SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
+ SCMI_ERR_ENTRY = -4, /* Not found */
+ SCMI_ERR_RANGE = -5, /* Value out of range */
+ SCMI_ERR_BUSY = -6, /* Device busy */
+ SCMI_ERR_COMMS = -7, /* Communication Error */
+ SCMI_ERR_GENERIC = -8, /* Generic Error */
+ SCMI_ERR_HARDWARE = -9, /* Hardware Error */
+ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+ SCMI_ERR_MAX
+};
+
+/* List of all SCMI devices active in system */
+static LIST_HEAD(scmi_list);
+/* Protection for the entire list */
+/* Track the unique id for the transfers for debug & profiling purpose */
+static unsigned transfer_last_id;
+
+static DEFINE_IDR(scmi_requested_devices);
+
+struct scmi_requested_dev {
+ const struct scmi_device_id *id_table;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_xfers_info - Structure to manage transfer information
+ *
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ * Index of this bitmap table is also used for message
+ * sequence identifier.
+ */
+struct scmi_xfers_info {
+ struct scmi_xfer *xfer_block;
+ unsigned long *xfer_alloc_table;
+};
+
+/**
+ * struct scmi_protocol_instance - Describe an initialized protocol instance.
+ * @handle: Reference to the SCMI handle associated to this protocol instance.
+ * @proto: A reference to the protocol descriptor.
+ * @users: A refcount to track effective users of this protocol.
+ * @priv: Reference for optional protocol private data.
+ * @ph: An embedded protocol handle that will be passed down to protocol
+ * initialization code to identify this instance.
+ *
+ * Each protocol is initialized independently once for each SCMI platform in
+ * which is defined by DT and implemented by the SCMI server fw.
+ */
+struct scmi_protocol_instance {
+ const struct scmi_handle *handle;
+ const struct scmi_protocol *proto;
+ int users;
+ void *priv;
+ struct scmi_protocol_handle ph;
+};
+
+#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
+
+/**
+ * struct scmi_info - Structure representing a SCMI instance
+ *
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @version: SCMI revision information containing protocol version,
+ * implementation version and (sub-)vendor identification.
+ * @handle: Instance of SCMI handle to send to clients
+ * @tx_minfo: Universal Transmit Message management info
+ * @rx_minfo: Universal Receive Message management info
+ * @tx_idr: IDR object to map protocol id to Tx channel info pointer
+ * @rx_idr: IDR object to map protocol id to Rx channel info pointer
+ * @protocols: IDR for protocols' instance descriptors initialized for
+ * this SCMI instance: populated on protocol's first attempted
+ * usage.
+ * @protocols_imp: List of protocols implemented, currently maximum of
+ * MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ * @active_protocols: IDR storing device_nodes for protocols actually defined
+ * in the DT and confirmed as implemented by fw.
+ * @node: List head
+ * @users: Number of users of this instance
+ */
+struct scmi_info {
+ struct device_d *dev;
+ const struct scmi_desc *desc;
+ struct scmi_revision_info version;
+ struct scmi_handle handle;
+ struct scmi_xfers_info tx_minfo;
+ struct scmi_xfers_info rx_minfo;
+ struct idr tx_idr;
+ struct idr rx_idr;
+ struct idr protocols;
+ /* Ensure mutual exclusive access to protocols instance array */
+ u8 *protocols_imp;
+ struct idr active_protocols;
+ struct list_head node;
+ int users;
+};
+
+#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+
+static const int scmi_linux_errmap[] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* SCMI_SUCCESS */
+ -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
+ -EINVAL, /* SCMI_ERR_PARAM */
+ -EACCES, /* SCMI_ERR_ACCESS */
+ -ENOENT, /* SCMI_ERR_ENTRY */
+ -ERANGE, /* SCMI_ERR_RANGE */
+ -EBUSY, /* SCMI_ERR_BUSY */
+ -ECOMM, /* SCMI_ERR_COMMS */
+ -EIO, /* SCMI_ERR_GENERIC */
+ -EREMOTEIO, /* SCMI_ERR_HARDWARE */
+ -EPROTO, /* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+ if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
+ return scmi_linux_errmap[-errno];
+ return -EIO;
+}
+
+/**
+ * scmi_dump_header_dbg() - Helper to dump a message header.
+ *
+ * @dev: Device pointer corresponding to the SCMI entity
+ * @hdr: pointer to header.
+ */
+static inline void scmi_dump_header_dbg(struct device_d *dev,
+ struct scmi_msg_hdr *hdr)
+{
+ dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
+ hdr->id, hdr->seq, hdr->protocol_id);
+}
+
+/**
+ * scmi_xfer_get() - Allocate one message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Helper function which is used by various message functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCMI entity.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
+ struct scmi_xfers_info *minfo)
+{
+ u16 xfer_id;
+ struct scmi_xfer *xfer;
+ unsigned long bit_pos;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+ info->desc->max_msg);
+ if (bit_pos == info->desc->max_msg)
+ return ERR_PTR(-ENOMEM);
+ set_bit(bit_pos, minfo->xfer_alloc_table);
+
+ xfer_id = bit_pos;
+
+ xfer = &minfo->xfer_block[xfer_id];
+ xfer->hdr.seq = xfer_id;
+ xfer->done = false;
+ xfer->transfer_id = ++transfer_last_id;
+
+ return xfer;
+}
+
+/**
+ * __scmi_xfer_put() - Release a message
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: message that was reserved by scmi_xfer_get
+ */
+static void
+__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
+{
+ clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+}
+
+static void scmi_handle_response(struct scmi_chan_info *cinfo,
+ u16 xfer_id, u8 msg_type)
+{
+ struct scmi_xfer *xfer;
+ struct device_d *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+
+ /* Are we even expecting this? */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "message for %d is not expected!\n", xfer_id);
+ info->desc->ops->clear_channel(cinfo);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+ /*
+ * Even if a response was indeed expected on this slot at this point,
+ * a buggy platform could wrongly reply feeding us an unexpected
+ * delayed response we're not prepared to handle: bail-out safely
+ * blaming firmware.
+ */
+ if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
+ dev_err(dev,
+ "Delayed Response for %d not expected! Buggy F/W ?\n",
+ xfer_id);
+ info->desc->ops->clear_channel(cinfo);
+ /* It was unexpected, so nobody will clear the xfer if not us */
+ __scmi_xfer_put(minfo, xfer);
+ return;
+ }
+
+ scmi_dump_header_dbg(dev, &xfer->hdr);
+
+ info->desc->ops->fetch_response(cinfo, xfer);
+
+ if (msg_type == MSG_TYPE_DELAYED_RESP) {
+ info->desc->ops->clear_channel(cinfo);
+ *xfer->async_done = true;
+ } else {
+ xfer->done = true;
+ }
+}
+
+/**
+ * scmi_rx_callback() - callback for receiving messages
+ *
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+
+ switch (msg_type) {
+ case MSG_TYPE_COMMAND:
+ case MSG_TYPE_DELAYED_RESP:
+ scmi_handle_response(cinfo, xfer_id, msg_type);
+ break;
+ default:
+ WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+ break;
+ }
+}
+
+/**
+ * xfer_put() - Release a transmit message
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @xfer: message that was reserved by scmi_xfer_get
+ */
+static void xfer_put(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
+ __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+/**
+ * do_xfer() - Do one transfer
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ * return corresponding error, else if all goes well,
+ * return 0.
+ */
+static int do_xfer(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
+{
+ int ret;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+ struct device_d *dev = info->dev;
+ struct scmi_chan_info *cinfo;
+ u64 start;
+
+ /*
+ * Re-instate protocol id here from protocol handle so that cannot be
+ * overridden by mistake (or malice) by the protocol code mangling with
+ * the scmi_xfer structure.
+ */
+ xfer->hdr.protocol_id = pi->proto->id;
+
+ cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
+ if (unlikely(!cinfo))
+ return -EINVAL;
+
+ ret = info->desc->ops->send_message(cinfo, xfer);
+ if (ret < 0) {
+ dev_dbg(dev, "Failed to send message %d\n", ret);
+ return ret;
+ }
+
+ /* And we wait for the response. */
+ start = get_time_ns();
+ while (!xfer->done) {
+ if (is_timeout(start, info->desc->max_rx_timeout_ms * (u64)NSEC_PER_MSEC)) {
+ dev_err(dev, "timed out in resp(caller: %pS)\n", (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ if (!ret && xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+
+ if (info->desc->ops->mark_txdone)
+ info->desc->ops->mark_txdone(cinfo, ret);
+
+ return ret;
+}
+
+static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
+ xfer->rx.len = info->desc->max_msg_size;
+}
+
+#define SCMI_MAX_RESPONSE_TIMEOUT_NS (2 * NSEC_PER_SEC)
+
+/**
+ * do_xfer_with_response() - Do one transfer and wait until the delayed
+ * response is received
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
+ * return corresponding error, else if all goes well, return 0.
+ */
+static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
+{
+ int ret;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ bool async_response = false;
+ u64 start;
+
+ xfer->hdr.protocol_id = pi->proto->id;
+
+ xfer->async_done = &async_response;
+
+ ret = do_xfer(ph, xfer);
+ if (ret)
+ goto out;
+
+ start = get_time_ns();
+ while (!*xfer->async_done) {
+ if (is_timeout(start, SCMI_MAX_RESPONSE_TIMEOUT_NS)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+out:
+ xfer->async_done = NULL;
+ return ret;
+}
+
+/**
+ * xfer_get_init() - Allocate and initialise one message for transmit
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @msg_id: Message identifier
+ * @tx_size: transmit message size
+ * @rx_size: receive message size
+ * @p: pointer to the allocated and initialised message
+ *
+ * This function allocates the message using @scmi_xfer_get and
+ * initialise the header.
+ *
+ * Return: 0 if all went fine with @p pointing to message, else
+ * corresponding error.
+ */
+static int xfer_get_init(const struct scmi_protocol_handle *ph,
+ u8 msg_id, size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p)
+{
+ int ret;
+ struct scmi_xfer *xfer;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+ struct device_d *dev = info->dev;
+
+ /* Ensure we have sane transfer sizes */
+ if (rx_size > info->desc->max_msg_size ||
+ tx_size > info->desc->max_msg_size)
+ return -ERANGE;
+
+ xfer = scmi_xfer_get(pi->handle, minfo);
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "failed to get free message slot(%d)\n", ret);
+ return ret;
+ }
+
+ xfer->tx.len = tx_size;
+ xfer->rx.len = rx_size ? : info->desc->max_msg_size;
+ xfer->hdr.id = msg_id;
+ xfer->hdr.protocol_id = pi->proto->id;
+ xfer->hdr.poll_completion = false;
+
+ *p = xfer;
+
+ return 0;
+}
+
+/**
+ * version_get() - command to get the revision of the SCMI entity
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @version: Holds returned version of protocol.
+ *
+ * Updates the SCMI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
+{
+ int ret;
+ __le32 *rev_info;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
+ if (ret)
+ return ret;
+
+ ret = do_xfer(ph, t);
+ if (!ret) {
+ rev_info = t->rx.buf;
+ *version = le32_to_cpu(*rev_info);
+ }
+
+ xfer_put(ph, t);
+ return ret;
+}
+
+/**
+ * scmi_set_protocol_priv - Set protocol specific data at init time
+ *
+ * @ph: A reference to the protocol handle.
+ * @priv: The private data to set.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
+ void *priv)
+{
+ struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ pi->priv = priv;
+
+ return 0;
+}
+
+/**
+ * scmi_get_protocol_priv - Set protocol specific data at init time
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * Return: Protocol private data if any was set.
+ */
+static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ return pi->priv;
+}
+
+static const struct scmi_xfer_ops xfer_ops = {
+ .version_get = version_get,
+ .xfer_get_init = xfer_get_init,
+ .reset_rx_to_maxsz = reset_rx_to_maxsz,
+ .do_xfer = do_xfer,
+ .do_xfer_with_response = do_xfer_with_response,
+ .xfer_put = xfer_put,
+};
+
+/**
+ * scmi_revision_area_get - Retrieve version memory area.
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * A helper to grab the version memory area reference during SCMI Base protocol
+ * initialization.
+ *
+ * Return: A reference to the version memory area associated to the SCMI
+ * instance underlying this protocol handle.
+ */
+struct scmi_revision_info *
+scmi_revision_area_get(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ return pi->handle->version;
+}
+
+/**
+ * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
+ * instance descriptor.
+ * @info: The reference to the related SCMI instance.
+ * @proto: The protocol descriptor.
+ *
+ * Allocate a new protocol instance descriptor, using the provided @proto
+ * description, against the specified SCMI instance @info, and initialize it;
+ *
+ * Return: A reference to a freshly allocated and initialized protocol instance
+ * or ERR_PTR on failure. On failure the @proto reference is at first
+ */
+static struct scmi_protocol_instance *
+scmi_alloc_init_protocol_instance(struct scmi_info *info,
+ const struct scmi_protocol *proto)
+{
+ int ret = -ENOMEM;
+ struct scmi_protocol_instance *pi;
+ const struct scmi_handle *handle = &info->handle;
+
+ pi = kzalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ goto clean;
+
+ pi->proto = proto;
+ pi->handle = handle;
+ pi->ph.dev = handle->dev;
+ pi->ph.xops = &xfer_ops;
+ pi->ph.set_priv = scmi_set_protocol_priv;
+ pi->ph.get_priv = scmi_get_protocol_priv;
+ pi->users++;
+ /* proto->init is assured NON NULL by scmi_protocol_register */
+ ret = pi->proto->instance_init(&pi->ph);
+ if (ret)
+ goto clean;
+
+ ret = idr_alloc_one(&info->protocols, pi, proto->id);
+ if (ret != proto->id)
+ goto clean;
+
+ dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
+
+ return pi;
+
+clean:
+ return ERR_PTR(ret);
+}
+
+/**
+ * scmi_get_protocol_instance - Protocol initialization helper.
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * In case the required protocol has never been requested before for this
+ * instance, allocate and initialize all the needed structures.
+ *
+ * Return: A reference to an initialized protocol instance or error on failure:
+ * in particular returns -EPROBE_DEFER when the desired protocol could
+ * NOT be found.
+ */
+static struct scmi_protocol_instance * __must_check
+scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ pi = idr_find(&info->protocols, protocol_id);
+
+ if (pi) {
+ pi->users++;
+ } else {
+ const struct scmi_protocol *proto;
+
+ /* Fails if protocol not registered on bus */
+ proto = scmi_protocol_get(protocol_id);
+ if (proto)
+ pi = scmi_alloc_init_protocol_instance(info, proto);
+ else
+ pi = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return pi;
+}
+
+/**
+ * scmi_protocol_acquire - Protocol acquire
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * Register a new user for the requested protocol on the specified SCMI
+ * platform instance, possibly triggering its initialization on first user.
+ *
+ * Return: 0 if protocol was acquired successfully.
+ */
+int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
+{
+ return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
+}
+
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
+ u8 *prot_imp)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
+ info->protocols_imp = prot_imp;
+}
+
+static bool
+scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
+{
+ int i;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ if (!info->protocols_imp)
+ return false;
+
+ for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
+ if (info->protocols_imp[i] == prot_id)
+ return true;
+ return false;
+}
+
+/**
+ * scmi_dev_protocol_get - get protocol operations and handle
+ * @protocol_id: The protocol being requested.
+ * @ph: A pointer reference used to pass back the associated protocol handle.
+ *
+ * Get hold of a protocol accounting for its usage, eventually triggering its
+ * initialization, and returning the protocol specific operations and related
+ * protocol handle which will be used as first argument in most of the
+ * protocols operations methods.
+ * Being a devres based managed method, protocol hold will be automatically
+ * released, and possibly de-initialized on last user, once the SCMI driver
+ * owning the scmi_device is unbound from it.
+ *
+ * Return: A reference to the requested protocol operations or error.
+ * Must be checked for errors by caller.
+ */
+static const void __must_check *
+scmi_dev_protocol_get(struct scmi_device *sdev, u8 protocol_id,
+ struct scmi_protocol_handle **ph)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_handle *handle = sdev->handle;
+
+ if (!ph)
+ return ERR_PTR(-EINVAL);
+
+ pi = scmi_get_protocol_instance(handle, protocol_id);
+ if (IS_ERR(pi))
+ return pi;
+
+ *ph = &pi->ph;
+
+ return pi->proto->ops;
+}
+
+static inline
+struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
+{
+ info->users++;
+ return &info->handle;
+}
+
+/**
+ * scmi_handle_get() - Get the SCMI handle for a device
+ *
+ * @dev: pointer to device for which we want SCMI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: pointer to handle if successful, NULL on error
+ */
+struct scmi_handle *scmi_handle_get(struct device_d *dev)
+{
+ struct list_head *p;
+ struct scmi_info *info;
+ struct scmi_handle *handle = NULL;
+
+ list_for_each(p, &scmi_list) {
+ info = list_entry(p, struct scmi_info, node);
+ if (dev->parent == info->dev) {
+ handle = scmi_handle_get_from_info_unlocked(info);
+ break;
+ }
+ }
+
+ return handle;
+}
+
+/**
+ * scmi_handle_put() - Release the handle acquired by scmi_handle_get
+ *
+ * @handle: handle acquired by scmi_handle_get
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: 0 is successfully released
+ * if null was passed, it returns -EINVAL;
+ */
+int scmi_handle_put(const struct scmi_handle *handle)
+{
+ struct scmi_info *info;
+
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_scmi_info(handle);
+ if (!WARN_ON(!info->users))
+ info->users--;
+
+ return 0;
+}
+
+static int __scmi_xfer_info_init(struct scmi_info *sinfo,
+ struct scmi_xfers_info *info)
+{
+ int i;
+ struct scmi_xfer *xfer;
+ struct device_d *dev = sinfo->dev;
+ const struct scmi_desc *desc = sinfo->desc;
+
+ /* Pre-allocated messages, no more than what hdr.seq can support */
+ if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
+ dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
+ desc->max_msg, MSG_TOKEN_MAX);
+ return -EINVAL;
+ }
+
+ info->xfer_block = kcalloc(desc->max_msg, sizeof(*info->xfer_block), GFP_KERNEL);
+ if (!info->xfer_block)
+ return -ENOMEM;
+
+ info->xfer_alloc_table = kcalloc(BITS_TO_LONGS(desc->max_msg),
+ sizeof(long), GFP_KERNEL);
+ if (!info->xfer_alloc_table)
+ return -ENOMEM;
+
+ /* Pre-initialize the buffer pointer to pre-allocated buffers */
+ for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
+ xfer->rx.buf = kcalloc(sizeof(u8), desc->max_msg_size,
+ GFP_KERNEL);
+ if (!xfer->rx.buf)
+ return -ENOMEM;
+
+ xfer->tx.buf = xfer->rx.buf;
+ xfer->done = false;
+ }
+
+ return 0;
+}
+
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
+{
+ int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+
+ if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+
+ return ret;
+}
+
+static int scmi_chan_setup(struct scmi_info *info, struct device_d *dev,
+ int prot_id, bool tx)
+{
+ int ret, idx;
+ struct scmi_chan_info *cinfo;
+ struct idr *idr;
+
+ /* Transmit channel is first entry i.e. index 0 */
+ idx = tx ? 0 : 1;
+ idr = tx ? &info->tx_idr : &info->rx_idr;
+
+ /* check if already allocated, used for multiple device per protocol */
+ cinfo = idr_find(idr, prot_id);
+ if (cinfo)
+ return 0;
+
+ if (!info->desc->ops->chan_available(dev, idx)) {
+ cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
+ if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
+ return -EINVAL;
+ goto idr_alloc;
+ }
+
+ cinfo = kzalloc(sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ cinfo->dev = dev;
+
+ ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
+ if (ret)
+ return ret;
+
+idr_alloc:
+ ret = idr_alloc_one(idr, cinfo, prot_id);
+ if (ret != prot_id) {
+ dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+ return ret;
+ }
+
+ cinfo->handle = &info->handle;
+ return 0;
+}
+
+static inline int
+scmi_txrx_setup(struct scmi_info *info, struct device_d *dev, int prot_id)
+{
+ int ret = scmi_chan_setup(info, dev, prot_id, true);
+
+ if (!ret) /* Rx is optional, hence no error check */
+ scmi_chan_setup(info, dev, prot_id, false);
+
+ return ret;
+}
+
+/**
+ * scmi_get_protocol_device - Helper to get/create an SCMI device.
+ *
+ * @np: A device node representing a valid active protocols for the referred
+ * SCMI instance.
+ * @info: The referred SCMI instance for which we are getting/creating this
+ * device.
+ * @prot_id: The protocol ID.
+ * @name: The device name.
+ *
+ * Referring to the specific SCMI instance identified by @info, this helper
+ * takes care to return a properly initialized device matching the requested
+ * @proto_id and @name: if device was still not existent it is created as a
+ * child of the specified SCMI instance @info and its transport properly
+ * initialized as usual.
+ */
+static inline struct scmi_device *
+scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ struct scmi_device *sdev;
+
+ /* Already created for this parent SCMI instance ? */
+ sdev = scmi_child_dev_find(info->dev, prot_id, name);
+ if (sdev)
+ return sdev;
+
+ pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
+
+ sdev = scmi_device_alloc(np, info->dev, prot_id, name);
+ if (!sdev) {
+ dev_err(info->dev, "failed to create %d protocol device\n",
+ prot_id);
+ return NULL;
+ }
+
+ if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
+ dev_err(&sdev->dev, "failed to setup transport\n");
+ scmi_device_destroy(sdev);
+ return NULL;
+ }
+
+ return sdev;
+}
+
+static inline void
+scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = scmi_get_protocol_device(np, info, prot_id, name);
+ if (!sdev)
+ return;
+
+ /* setup handle now as the transport is ready */
+ scmi_set_handle(sdev);
+
+ /* Register if not done yet */
+ if (sdev->dev.id == DEVICE_ID_DYNAMIC)
+ register_device(&sdev->dev);
+}
+
+/**
+ * scmi_create_protocol_devices - Create devices for all pending requests for
+ * this SCMI instance.
+ *
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ *
+ * All devices previously requested for this instance (if any) are found and
+ * created by scanning the proper @&scmi_requested_devices entry.
+ */
+static void scmi_create_protocol_devices(struct device_node *np,
+ struct scmi_info *info, int prot_id)
+{
+ struct list_head *phead;
+
+ phead = idr_find(&scmi_requested_devices, prot_id);
+ if (phead) {
+ struct scmi_requested_dev *rdev;
+
+ list_for_each_entry(rdev, phead, node)
+ scmi_create_protocol_device(np, info, prot_id,
+ rdev->id_table->name);
+ }
+}
+
+/**
+ * scmi_protocol_device_request - Helper to request a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be created.
+ *
+ * This helper let an SCMI driver request specific devices identified by the
+ * @id_table to be created for each active SCMI instance.
+ *
+ * The requested device name MUST NOT be already existent for any protocol;
+ * at first the freshly requested @id_table is annotated in the IDR table
+ * @scmi_requested_devices, then a matching device is created for each already
+ * active SCMI instance. (if any)
+ *
+ * This way the requested device is created straight-away for all the already
+ * initialized(probed) SCMI instances (handles) and it remains also annotated
+ * as pending creation if the requesting SCMI driver was loaded before some
+ * SCMI instance and related transports were available: when such late instance
+ * is probed, its probe will take care to scan the list of pending requested
+ * devices and create those on its own (see @scmi_create_protocol_devices and
+ * its enclosing loop)
+ *
+ * Return: 0 on Success
+ */
+int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+{
+ int ret = 0;
+ struct list_head *phead = NULL;
+ struct scmi_requested_dev *rdev;
+ struct scmi_info *info;
+ struct idr *idr;
+
+ pr_debug("Requesting SCMI device (%s) for protocol 0x%x\n",
+ id_table->name, id_table->protocol_id);
+
+ /*
+ * Search for the matching protocol rdev list and then search
+ * of any existent equally named device...fails if any duplicate found.
+ */
+ __idr_for_each_entry(&scmi_requested_devices, idr) {
+ struct list_head *head = idr->ptr;
+ if (!phead) {
+ /* A list found registered in the IDR is never empty */
+ rdev = list_first_entry(head, struct scmi_requested_dev,
+ node);
+ if (rdev->id_table->protocol_id ==
+ id_table->protocol_id)
+ phead = head;
+ }
+ list_for_each_entry(rdev, head, node) {
+ if (!strcmp(rdev->id_table->name, id_table->name)) {
+ pr_err("Ignoring duplicate request [%d] %s\n",
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * No duplicate found for requested id_table, so let's create a new
+ * requested device entry for this new valid request.
+ */
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rdev->id_table = id_table;
+
+ /*
+ * Append the new requested device table descriptor to the head of the
+ * related protocol list, eventually creating such head if not already
+ * there.
+ */
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead) {
+ kfree(rdev);
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(phead);
+
+ ret = idr_alloc_one(&scmi_requested_devices, (void *)phead,
+ id_table->protocol_id);
+ if (ret != id_table->protocol_id) {
+ pr_err("Failed to save SCMI device - ret:%d\n", ret);
+ kfree(rdev);
+ kfree(phead);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = 0;
+ }
+ list_add(&rdev->node, phead);
+
+ /*
+ * Now effectively create and initialize the requested device for every
+ * already initialized SCMI instance which has registered the requested
+ * protocol as a valid active one: i.e. defined in DT and supported by
+ * current platform FW.
+ */
+ list_for_each_entry(info, &scmi_list, node) {
+ struct device_node *child;
+
+ child = idr_find(&info->active_protocols,
+ id_table->protocol_id);
+ if (child) {
+ struct scmi_device *sdev;
+
+ sdev = scmi_get_protocol_device(child, info,
+ id_table->protocol_id,
+ id_table->name);
+ /* Set handle if not already set: device existed */
+ if (sdev && !sdev->handle)
+ sdev->handle =
+ scmi_handle_get_from_info_unlocked(info);
+ } else {
+ dev_err(info->dev,
+ "Failed. SCMI protocol %d not active.\n",
+ id_table->protocol_id);
+ }
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * scmi_protocol_device_unrequest - Helper to unrequest a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
+ *
+ * An helper to let an SCMI driver release its request about devices; note that
+ * devices are created and initialized once the first SCMI driver request them
+ * but they destroyed only on SCMI core unloading/unbinding.
+ *
+ * The current SCMI transport layer uses such devices as internal references and
+ * as such they could be shared as same transport between multiple drivers so
+ * that cannot be safely destroyed till the whole SCMI stack is removed.
+ * (unless adding further burden of refcounting.)
+ */
+void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+{
+ struct list_head *phead;
+
+ pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ struct scmi_requested_dev *victim, *tmp;
+
+ list_for_each_entry_safe(victim, tmp, phead, node) {
+ if (!strcmp(victim->id_table->name, id_table->name)) {
+ list_del(&victim->node);
+ kfree(victim);
+ break;
+ }
+ }
+
+ if (list_empty(phead)) {
+ idr_remove(&scmi_requested_devices,
+ id_table->protocol_id);
+ kfree(phead);
+ }
+ }
+}
+
+static void version_info(struct device_d *dev)
+{
+ struct scmi_info *info = dev->priv;
+
+ printf("SCMI information:\n"
+ " version: %u.%u\n"
+ " firmware version: 0x%x\n"
+ " vendor: %s (sub: %s)\n",
+ info->version.minor_ver,
+ info->version.major_ver,
+ info->version.impl_ver,
+ info->version.vendor_id,
+ info->version.sub_vendor_id);
+}
+
+static int scmi_probe(struct device_d *dev)
+{
+ int ret;
+ struct scmi_handle *handle;
+ const struct scmi_desc *desc;
+ struct scmi_info *info;
+ struct device_node *child, *np = dev->device_node;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->desc = desc;
+ INIT_LIST_HEAD(&info->node);
+ idr_init(&info->protocols);
+ idr_init(&info->active_protocols);
+
+ dev->priv = info;
+ idr_init(&info->tx_idr);
+ idr_init(&info->rx_idr);
+
+ handle = &info->handle;
+ handle->dev = info->dev;
+ handle->version = &info->version;
+ handle->protocol_get = scmi_dev_protocol_get;
+
+ ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+ if (ret)
+ return ret;
+
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ return ret;
+
+ /*
+ * Trigger SCMI Base protocol initialization.
+ * It's mandatory and won't be ever released/deinit until the
+ * SCMI stack is shutdown/unloaded as a whole.
+ */
+ ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
+ if (ret) {
+ dev_err(dev, "unable to communicate with SCMI\n");
+ return ret;
+ }
+
+ list_add_tail(&info->node, &scmi_list);
+
+ for_each_available_child_of_node(np, child) {
+ u32 prot_id;
+
+ if (of_property_read_u32(child, "reg", &prot_id))
+ continue;
+
+ if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+ dev_err(dev, "Out of range protocol %d\n", prot_id);
+
+ if (!scmi_is_protocol_implemented(handle, prot_id)) {
+ dev_err(dev, "SCMI protocol %d not implemented\n",
+ prot_id);
+ continue;
+ }
+
+ /*
+ * Save this valid DT protocol descriptor amongst
+ * @active_protocols for this SCMI instance/
+ */
+ ret = idr_alloc_one(&info->active_protocols, child, prot_id);
+ if (ret != prot_id) {
+ dev_err(dev, "SCMI protocol %d already activated. Skip\n",
+ prot_id);
+ continue;
+ }
+
+ scmi_create_protocol_devices(child, info, prot_id);
+ }
+
+ dev->info = version_info;
+
+ return 0;
+}
+
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
+{
+ idr_remove(idr, id);
+}
+
+/* Each compatible listed below must have descriptor associated with it */
+static const struct of_device_id scmi_of_match[] = {
+#ifdef CONFIG_ARM_SMCCC
+ { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+#endif
+ { /* Sentinel */ },
+};
+
+static struct driver_d scmi_driver = {
+ .name = "arm-scmi",
+ .of_compatible = scmi_of_match,
+ .probe = scmi_probe,
+};
+
+static int __init scmi_bus_driver_init(void)
+{
+ scmi_bus_init();
+
+ scmi_base_register();
+
+ scmi_reset_register();
+ scmi_clock_register();
+ scmi_voltage_register();
+
+ return 0;
+}
+pure_initcall(scmi_bus_driver_init);
+
+static int __init scmi_platform_driver_init(void)
+{
+ return platform_driver_register(&scmi_driver);
+}
+core_initcall(scmi_platform_driver_init);
+
+MODULE_ALIAS("platform: arm-scmi");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
new file mode 100644
index 0000000000..94baab99e1
--- /dev/null
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Reset Protocol
+ *
+ * Copyright (C) 2019-2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI RESET - " fmt
+
+#include <common.h>
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+
+enum scmi_reset_protocol_cmd {
+ RESET_DOMAIN_ATTRIBUTES = 0x3,
+ RESET = 0x4,
+ RESET_NOTIFY = 0x5,
+};
+
+#define NUM_RESET_DOMAIN_MASK 0xffff
+
+struct scmi_msg_resp_reset_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
+#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
+ __le32 latency;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_reset_domain_reset {
+ __le32 domain_id;
+ __le32 flags;
+#define AUTONOMOUS_RESET BIT(0)
+#define EXPLICIT_RESET_ASSERT BIT(1)
+#define ASYNCHRONOUS_RESET BIT(2)
+ __le32 reset_state;
+#define ARCH_COLD_RESET 0
+};
+
+struct reset_dom_info {
+ bool async_reset;
+ u32 latency_us;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_reset_info {
+ u32 version;
+ int num_domains;
+ struct reset_dom_info *dom_info;
+};
+
+static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_reset_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ u32 attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(attr), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ attr = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct reset_dom_info *dom_info)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_reset_domain_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ u32 attributes = le32_to_cpu(attr->attributes);
+
+ dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
+ dom_info->latency_us = le32_to_cpu(attr->latency);
+ if (dom_info->latency_us == U32_MAX)
+ dom_info->latency_us = 0;
+ strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+
+ return pi->num_domains;
+}
+
+static char *scmi_reset_name_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
+{
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->name;
+}
+
+static int scmi_reset_latency_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
+{
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->latency_us;
+}
+
+static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 flags, u32 state)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_domain_reset *dom;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+ struct reset_dom_info *rdom = pi->dom_info + domain;
+
+ if (rdom->async_reset)
+ flags |= ASYNCHRONOUS_RESET;
+
+ ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
+ if (ret)
+ return ret;
+
+ dom = t->tx.buf;
+ dom->domain_id = cpu_to_le32(domain);
+ dom->flags = cpu_to_le32(flags);
+ dom->reset_state = cpu_to_le32(state);
+
+ if (rdom->async_reset)
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ else
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_reset_domain_reset(const struct scmi_protocol_handle *ph,
+ u32 domain)
+{
+ return scmi_domain_reset(ph, domain, AUTONOMOUS_RESET,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_assert(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ return scmi_domain_reset(ph, domain, EXPLICIT_RESET_ASSERT,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_deassert(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ return scmi_domain_reset(ph, domain, 0, ARCH_COLD_RESET);
+}
+
+static const struct scmi_reset_proto_ops reset_proto_ops = {
+ .num_domains_get = scmi_reset_num_domains_get,
+ .name_get = scmi_reset_name_get,
+ .latency_get = scmi_reset_latency_get,
+ .reset = scmi_reset_domain_reset,
+ .assert = scmi_reset_domain_assert,
+ .deassert = scmi_reset_domain_deassert,
+};
+
+static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain;
+ u32 version;
+ struct scmi_reset_info *pinfo;
+
+ ph->xops->version_get(ph, &version);
+
+ dev_dbg(ph->dev, "Reset Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ scmi_reset_attributes_get(ph, pinfo);
+
+ pinfo->dom_info = kcalloc(pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct reset_dom_info *dom = pinfo->dom_info + domain;
+
+ scmi_reset_domain_attributes_get(ph, domain, dom);
+ }
+
+ pinfo->version = version;
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_reset = {
+ .id = SCMI_PROTOCOL_RESET,
+ .instance_init = &scmi_reset_protocol_init,
+ .ops = &reset_proto_ops,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER(reset, scmi_reset)
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
new file mode 100644
index 0000000000..2dde2b6e09
--- /dev/null
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transport using shared mem structure.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <common.h>
+#include <io.h>
+#include <linux/types.h>
+#include <linux/processor.h>
+
+#include "common.h"
+
+/*
+ * SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian
+ * format only.
+ */
+struct scmi_shared_mem {
+ __le32 reserved;
+ __le32 channel_status;
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
+ __le32 reserved1[2];
+ __le32 flags;
+#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
+ __le32 length;
+ __le32 msg_header;
+ u8 msg_payload[];
+};
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ /*
+ * Ideally channel must be free by now unless OS timeout last
+ * request and platform continued to process the same, wait
+ * until it releases the shared memory, otherwise we may endup
+ * overwriting its response with new message payload or vice-versa
+ */
+ spin_until_cond(ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ /* Mark channel busy + clear error */
+ iowrite32(0x0, &shmem->channel_status);
+ iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+ &shmem->flags);
+ iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
+ iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
+ if (xfer->tx.buf)
+ memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+{
+ return ioread32(&shmem->msg_header);
+}
+
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ xfer->hdr.status = ioread32(shmem->msg_payload);
+ /* Skip the length of header and status in shmem area i.e 8 bytes */
+ xfer->rx.len = min_t(size_t, xfer->rx.len,
+ ioread32(&shmem->length) - 8);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+}
+
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
+{
+ iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
+}
+
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ u16 xfer_id;
+
+ xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
+
+ if (xfer->hdr.seq != xfer_id)
+ return false;
+
+ return ioread32(&shmem->channel_status) &
+ (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
new file mode 100644
index 0000000000..67f19a7b43
--- /dev/null
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message SMC/HVC
+ * Transport driver
+ *
+ * Copyright 2020 NXP
+ */
+
+#include <common.h>
+#include <linux/arm-smccc.h>
+#include <driver.h>
+#include <linux/err.h>
+#include <of.h>
+#include <of_address.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_smc - Structure representing a SCMI smc transport
+ *
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ * @func_id: smc/hvc call function id
+ */
+
+struct scmi_smc {
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+ u32 func_id;
+};
+
+static bool smc_chan_available(struct device_d *dev, int idx)
+{
+ return of_parse_phandle(dev->device_node, "shmem", 0) != NULL;
+}
+
+static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device_d *dev,
+ bool tx)
+{
+ struct device_d *cdev = cinfo->dev;
+ struct scmi_smc *scmi_info;
+ resource_size_t size;
+ struct resource res;
+ struct device_node *np;
+ u32 func_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ scmi_info = kzalloc(sizeof(*scmi_info), GFP_KERNEL);
+ if (!scmi_info)
+ return -ENOMEM;
+
+ np = of_parse_phandle(cdev->device_node, "shmem", 0);
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI Tx shared memory\n");
+ return ret;
+ }
+
+ size = resource_size(&res);
+ scmi_info->shmem = IOMEM(res.start);
+
+ ret = of_property_read_u32(dev->device_node, "arm,smc-id", &func_id);
+ if (ret < 0)
+ return ret;
+
+ scmi_info->func_id = func_id;
+ scmi_info->cinfo = cinfo;
+ cinfo->transport_info = scmi_info;
+
+ return 0;
+}
+
+static int smc_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ cinfo->transport_info = NULL;
+ scmi_info->cinfo = NULL;
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static int smc_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+ struct arm_smccc_res res;
+
+ shmem_tx_prepare(scmi_info->shmem, xfer);
+
+ arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
+
+ /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
+ if (res.a0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void smc_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ shmem_fetch_response(scmi_info->shmem, xfer);
+}
+
+static bool
+smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ return shmem_poll_done(scmi_info->shmem, xfer);
+}
+
+static const struct scmi_transport_ops scmi_smc_ops = {
+ .chan_available = smc_chan_available,
+ .chan_setup = smc_chan_setup,
+ .chan_free = smc_chan_free,
+ .send_message = smc_send_message,
+ .fetch_response = smc_fetch_response,
+ .poll_done = smc_poll_done,
+};
+
+const struct scmi_desc scmi_smc_desc = {
+ .ops = &scmi_smc_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 20,
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
new file mode 100644
index 0000000000..a3d78db28f
--- /dev/null
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Voltage Protocol
+ *
+ * Copyright (C) 2020-2021 ARM Ltd.
+ */
+
+#include <common.h>
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+
+#define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0)
+#define REMAINING_LEVELS_MASK GENMASK(31, 16)
+#define RETURNED_LEVELS_MASK GENMASK(11, 0)
+
+enum scmi_voltage_protocol_cmd {
+ VOLTAGE_DOMAIN_ATTRIBUTES = 0x3,
+ VOLTAGE_DESCRIBE_LEVELS = 0x4,
+ VOLTAGE_CONFIG_SET = 0x5,
+ VOLTAGE_CONFIG_GET = 0x6,
+ VOLTAGE_LEVEL_SET = 0x7,
+ VOLTAGE_LEVEL_GET = 0x8,
+};
+
+#define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x))))
+
+struct scmi_msg_resp_domain_attributes {
+ __le32 attr;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_cmd_describe_levels {
+ __le32 domain_id;
+ __le32 level_index;
+};
+
+struct scmi_msg_resp_describe_levels {
+ __le32 flags;
+#define NUM_REMAINING_LEVELS(f) ((u16)(FIELD_GET(REMAINING_LEVELS_MASK, (f))))
+#define NUM_RETURNED_LEVELS(f) ((u16)(FIELD_GET(RETURNED_LEVELS_MASK, (f))))
+#define SUPPORTS_SEGMENTED_LEVELS(f) ((f) & BIT(12))
+ __le32 voltage[];
+};
+
+struct scmi_msg_cmd_config_set {
+ __le32 domain_id;
+ __le32 config;
+};
+
+struct scmi_msg_cmd_level_set {
+ __le32 domain_id;
+ __le32 flags;
+ __le32 voltage_level;
+};
+
+struct voltage_info {
+ unsigned int version;
+ unsigned int num_domains;
+ struct scmi_voltage_info *domains;
+};
+
+static int scmi_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct voltage_info *vinfo)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(__le32), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ vinfo->num_domains =
+ NUM_VOLTAGE_DOMAINS(get_unaligned_le32(t->rx.buf));
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_init_voltage_levels(struct device_d *dev,
+ struct scmi_voltage_info *v,
+ u32 num_returned, u32 num_remaining,
+ bool segmented)
+{
+ u32 num_levels;
+
+ num_levels = num_returned + num_remaining;
+ /*
+ * segmented levels entries are represented by a single triplet
+ * returned all in one go.
+ */
+ if (!num_levels ||
+ (segmented && (num_remaining || num_returned != 3))) {
+ dev_err(dev,
+ "Invalid level descriptor(%d/%d/%d) for voltage dom %d\n",
+ num_levels, num_returned, num_remaining, v->id);
+ return -EINVAL;
+ }
+
+ v->levels_uv = kcalloc(num_levels, sizeof(u32), GFP_KERNEL);
+ if (!v->levels_uv)
+ return -ENOMEM;
+
+ v->num_levels = num_levels;
+ v->segmented = segmented;
+
+ return 0;
+}
+
+static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
+ struct voltage_info *vinfo)
+{
+ int ret, dom;
+ struct scmi_xfer *td, *tl;
+ struct device_d *dev = ph->dev;
+ struct scmi_msg_resp_domain_attributes *resp_dom;
+ struct scmi_msg_resp_describe_levels *resp_levels;
+
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES,
+ sizeof(__le32), sizeof(*resp_dom), &td);
+ if (ret)
+ return ret;
+ resp_dom = td->rx.buf;
+
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_DESCRIBE_LEVELS,
+ sizeof(__le64), 0, &tl);
+ if (ret)
+ goto outd;
+ resp_levels = tl->rx.buf;
+
+ for (dom = 0; dom < vinfo->num_domains; dom++) {
+ u32 desc_index = 0;
+ u16 num_returned = 0, num_remaining = 0;
+ struct scmi_msg_cmd_describe_levels *cmd;
+ struct scmi_voltage_info *v;
+
+ /* Retrieve domain attributes at first ... */
+ put_unaligned_le32(dom, td->tx.buf);
+ ret = ph->xops->do_xfer(ph, td);
+ /* Skip domain on comms error */
+ if (ret)
+ continue;
+
+ v = vinfo->domains + dom;
+ v->id = dom;
+ v->attributes = le32_to_cpu(resp_dom->attr);
+ strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE);
+
+ cmd = tl->tx.buf;
+ /* ...then retrieve domain levels descriptions */
+ do {
+ u32 flags;
+ int cnt;
+
+ cmd->domain_id = cpu_to_le32(v->id);
+ cmd->level_index = desc_index;
+ ret = ph->xops->do_xfer(ph, tl);
+ if (ret)
+ break;
+
+ flags = le32_to_cpu(resp_levels->flags);
+ num_returned = NUM_RETURNED_LEVELS(flags);
+ num_remaining = NUM_REMAINING_LEVELS(flags);
+
+ /* Allocate space for num_levels if not already done */
+ if (!v->num_levels) {
+ ret = scmi_init_voltage_levels(dev, v,
+ num_returned,
+ num_remaining,
+ SUPPORTS_SEGMENTED_LEVELS(flags));
+ if (ret)
+ break;
+ }
+
+ if (desc_index + num_returned > v->num_levels) {
+ dev_err(ph->dev,
+ "No. of voltage levels can't exceed %d\n",
+ v->num_levels);
+ ret = -EINVAL;
+ break;
+ }
+
+ for (cnt = 0; cnt < num_returned; cnt++) {
+ s32 val;
+
+ val =
+ (s32)le32_to_cpu(resp_levels->voltage[cnt]);
+ v->levels_uv[desc_index + cnt] = val;
+ if (val < 0)
+ v->negative_volts_allowed = true;
+ }
+
+ desc_index += num_returned;
+
+ ph->xops->reset_rx_to_maxsz(ph, tl);
+ /* check both to avoid infinite loop due to buggy fw */
+ } while (num_returned && num_remaining);
+
+ if (ret) {
+ v->num_levels = 0;
+ kfree(v->levels_uv);
+ }
+
+ ph->xops->reset_rx_to_maxsz(ph, td);
+ }
+
+ ph->xops->xfer_put(ph, tl);
+outd:
+ ph->xops->xfer_put(ph, td);
+
+ return ret;
+}
+
+static int __scmi_voltage_get_u32(const struct scmi_protocol_handle *ph,
+ u8 cmd_id, u32 domain_id, u32 *value)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct voltage_info *vinfo = ph->get_priv(ph);
+
+ if (domain_id >= vinfo->num_domains)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *value = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_voltage_config_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct voltage_info *vinfo = ph->get_priv(ph);
+ struct scmi_msg_cmd_config_set *cmd;
+
+ if (domain_id >= vinfo->num_domains)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_CONFIG_SET,
+ sizeof(*cmd), 0, &t);
+ if (ret)
+ return ret;
+
+ cmd = t->tx.buf;
+ cmd->domain_id = cpu_to_le32(domain_id);
+ cmd->config = cpu_to_le32(config & GENMASK(3, 0));
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *config)
+{
+ return __scmi_voltage_get_u32(ph, VOLTAGE_CONFIG_GET,
+ domain_id, config);
+}
+
+static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 flags, s32 volt_uV)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct voltage_info *vinfo = ph->get_priv(ph);
+ struct scmi_msg_cmd_level_set *cmd;
+
+ if (domain_id >= vinfo->num_domains)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_LEVEL_SET,
+ sizeof(*cmd), 0, &t);
+ if (ret)
+ return ret;
+
+ cmd = t->tx.buf;
+ cmd->domain_id = cpu_to_le32(domain_id);
+ cmd->flags = cpu_to_le32(flags);
+ cmd->voltage_level = cpu_to_le32(volt_uV);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_voltage_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, s32 *volt_uV)
+{
+ return __scmi_voltage_get_u32(ph, VOLTAGE_LEVEL_GET,
+ domain_id, (u32 *)volt_uV);
+}
+
+static const struct scmi_voltage_info * __must_check
+scmi_voltage_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
+{
+ struct voltage_info *vinfo = ph->get_priv(ph);
+
+ if (domain_id >= vinfo->num_domains ||
+ !vinfo->domains[domain_id].num_levels)
+ return NULL;
+
+ return vinfo->domains + domain_id;
+}
+
+static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
+{
+ struct voltage_info *vinfo = ph->get_priv(ph);
+
+ return vinfo->num_domains;
+}
+
+static struct scmi_voltage_proto_ops voltage_proto_ops = {
+ .num_domains_get = scmi_voltage_domains_num_get,
+ .info_get = scmi_voltage_info_get,
+ .config_set = scmi_voltage_config_set,
+ .config_get = scmi_voltage_config_get,
+ .level_set = scmi_voltage_level_set,
+ .level_get = scmi_voltage_level_get,
+};
+
+static int scmi_voltage_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ u32 version;
+ struct voltage_info *vinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Voltage Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ vinfo = kzalloc(sizeof(*vinfo), GFP_KERNEL);
+ if (!vinfo)
+ return -ENOMEM;
+ vinfo->version = version;
+
+ ret = scmi_protocol_attributes_get(ph, vinfo);
+ if (ret)
+ return ret;
+
+ if (vinfo->num_domains) {
+ vinfo->domains = kcalloc(vinfo->num_domains,
+ sizeof(*vinfo->domains),
+ GFP_KERNEL);
+ if (!vinfo->domains)
+ return -ENOMEM;
+ ret = scmi_voltage_descriptors_get(ph, vinfo);
+ if (ret)
+ return ret;
+ } else {
+ dev_warn(ph->dev, "No Voltage domains found.\n");
+ }
+
+ return ph->set_priv(ph, vinfo);
+}
+
+static const struct scmi_protocol scmi_voltage = {
+ .id = SCMI_PROTOCOL_VOLTAGE,
+ .instance_init = &scmi_voltage_protocol_init,
+ .ops = &voltage_proto_ops,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER(voltage, scmi_voltage)
diff --git a/drivers/pinctrl/pinctrl-stm32.c b/drivers/pinctrl/pinctrl-stm32.c
index ceaa4254c4..cee10636ce 100644
--- a/drivers/pinctrl/pinctrl-stm32.c
+++ b/drivers/pinctrl/pinctrl-stm32.c
@@ -303,7 +303,7 @@ static int stm32_gpiochip_add(struct stm32_gpio_bank *bank,
enum { PINCTRL_PHANDLE, GPIOCTRL_OFFSET, PINCTRL_OFFSET, PINCOUNT, GPIO_RANGE_NCELLS };
const __be32 *gpio_ranges;
u32 ngpios;
- int id, ret, size;
+ int ret, size;
dev = of_platform_device_create(np, parent);
if (!dev)
@@ -347,17 +347,7 @@ static int stm32_gpiochip_add(struct stm32_gpio_bank *bank,
bank->base = IOMEM(iores->start);
- if (dev->id >= 0) {
- id = dev->id;
- } else {
- id = of_alias_get_id(np, "gpio");
- if (id < 0) {
- dev_err(dev, "Failed to get GPIO alias\n");
- return id;
- }
- }
-
- bank->chip.base = id * STM32_GPIO_PINS_PER_BANK;
+ bank->chip.base = be32_to_cpu(gpio_ranges[PINCTRL_OFFSET]);
bank->chip.ops = &stm32_gpio_ops;
bank->chip.dev = dev;
bank->clk = clk_get(dev, NULL);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 564b77ecba..f931c117f4 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -62,3 +62,10 @@ config POWER_RESET_HTIF_POWEROFF
help
Adds poweroff support via the syscall device on systems
supporting the UC Berkely Host/Target Interface (HTIF).
+
+config RESET_STM32
+ bool "STM32 restart Driver"
+ depends on ARCH_STM32MP || COMPILE_TEST
+ help
+ This enables support for restarting and reset source
+ computation on the STM32MP1.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index c581382be8..347da50d76 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HTIF_POWEROFF) += htif-poweroff.o
+obj-$(CONFIG_RESET_STM32) += stm32-reboot.o
diff --git a/drivers/reset/reset-stm32.c b/drivers/power/reset/stm32-reboot.c
index 186b2a8bc6..809531e713 100644
--- a/drivers/reset/reset-stm32.c
+++ b/drivers/power/reset/stm32-reboot.c
@@ -6,12 +6,11 @@
*/
#include <common.h>
-#include <init.h>
#include <linux/err.h>
-#include <linux/reset-controller.h>
#include <restart.h>
#include <reset_source.h>
#include <asm/io.h>
+#include <soc/stm32/reboot.h>
#define RCC_CL 0x4
@@ -42,43 +41,50 @@ struct stm32_reset_reason {
struct stm32_reset {
void __iomem *base;
- struct reset_controller_dev rcdev;
struct restart_handler restart;
- const struct stm32_reset_ops *ops;
};
-struct stm32_reset_ops {
- void (*reset)(void __iomem *reg, unsigned offset, bool assert);
- void __noreturn (*sys_reset)(struct restart_handler *rst);
- const struct stm32_reset_reason *reset_reasons;
-};
-
-static struct stm32_reset *to_stm32_reset(struct reset_controller_dev *rcdev)
+static u32 stm32_reset_status(struct stm32_reset *priv, unsigned long bank)
{
- return container_of(rcdev, struct stm32_reset, rcdev);
+ return readl(priv->base + bank);
}
-static void stm32mp_reset(void __iomem *reg, unsigned offset, bool assert)
+static void stm32_reset(struct stm32_reset *priv, unsigned long id, bool assert)
{
+ int bank = (id / 32) * 4;
+ int offset = id % 32;
+ void __iomem *reg = priv->base + bank;
+
if (!assert)
reg += RCC_CL;
writel(BIT(offset), reg);
}
-static u32 stm32_reset_status(struct stm32_reset *priv, unsigned long bank)
+static void __noreturn stm32mp_rcc_restart_handler(struct restart_handler *rst)
{
- return readl(priv->base + bank);
-}
+ struct stm32_reset *priv = container_of(rst, struct stm32_reset, restart);
-static void stm32_reset(struct stm32_reset *priv, unsigned long id, bool assert)
-{
- int bank = (id / 32) * 4;
- int offset = id % 32;
+ stm32_reset(priv, RCC_MP_GRSTCSETR * BITS_PER_BYTE, true);
- priv->ops->reset(priv->base + bank, offset, assert);
+ mdelay(1000);
+ hang();
}
+static const struct stm32_reset_reason stm32mp_reset_reasons[] = {
+ { STM32MP_RCC_RSTF_POR, RESET_POR, 0 },
+ { STM32MP_RCC_RSTF_BOR, RESET_BROWNOUT, 0 },
+ { STM32MP_RCC_RSTF_STDBY, RESET_WKE, 0 },
+ { STM32MP_RCC_RSTF_CSTDBY, RESET_WKE, 1 },
+ { STM32MP_RCC_RSTF_MPSYS, RESET_RST, 2 },
+ { STM32MP_RCC_RSTF_MPUP0, RESET_RST, 0 },
+ { STM32MP_RCC_RSTF_MPUP1, RESET_RST, 1 },
+ { STM32MP_RCC_RSTF_IWDG1, RESET_WDG, 0 },
+ { STM32MP_RCC_RSTF_IWDG2, RESET_WDG, 1 },
+ { STM32MP_RCC_RSTF_PAD, RESET_EXT, 1 },
+ { /* sentinel */ }
+};
+
static void stm32_set_reset_reason(struct stm32_reset *priv,
const struct stm32_reset_reason *reasons)
{
@@ -102,100 +108,19 @@ static void stm32_set_reset_reason(struct stm32_reset *priv,
reset_source_to_string(type), reg);
}
-static int stm32_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- stm32_reset(to_stm32_reset(rcdev), id, true);
- return 0;
-}
-
-static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- stm32_reset(to_stm32_reset(rcdev), id, false);
- return 0;
-}
-
-static const struct reset_control_ops stm32_reset_ops = {
- .assert = stm32_reset_assert,
- .deassert = stm32_reset_deassert,
-};
-
-static int stm32_reset_probe(struct device_d *dev)
+void stm32mp_system_restart_init(void __iomem *base)
{
struct stm32_reset *priv;
- struct resource *iores;
- int ret;
priv = xzalloc(sizeof(*priv));
- ret = dev_get_drvdata(dev, (const void **)&priv->ops);
- if (ret)
- return ret;
-
- iores = dev_request_mem_resource(dev, 0);
- if (IS_ERR(iores))
- return PTR_ERR(iores);
-
- priv->base = IOMEM(iores->start);
- priv->rcdev.nr_resets = (iores->end - iores->start) * BITS_PER_BYTE;
- priv->rcdev.ops = &stm32_reset_ops;
- priv->rcdev.of_node = dev->device_node;
-
- if (priv->ops->sys_reset) {
- priv->restart.name = "stm32-rcc";
- priv->restart.restart = priv->ops->sys_reset;
- priv->restart.priority = 200;
-
- ret = restart_handler_register(&priv->restart);
- if (ret)
- dev_warn(dev, "Cannot register restart handler\n");
- }
- if (priv->ops->reset_reasons)
- stm32_set_reset_reason(priv, priv->ops->reset_reasons);
-
- return reset_controller_register(&priv->rcdev);
-}
+ priv->base = base;
-static void __noreturn stm32mp_rcc_restart_handler(struct restart_handler *rst)
-{
- struct stm32_reset *priv = container_of(rst, struct stm32_reset, restart);
+ priv->restart.name = "stm32-rcc";
+ priv->restart.restart = stm32mp_rcc_restart_handler;
+ priv->restart.priority = 200;
- stm32_reset(priv, RCC_MP_GRSTCSETR * BITS_PER_BYTE, true);
+ restart_handler_register(&priv->restart);
- mdelay(1000);
- hang();
+ stm32_set_reset_reason(priv, stm32mp_reset_reasons);
}
-
-static const struct stm32_reset_reason stm32mp_reset_reasons[] = {
- { STM32MP_RCC_RSTF_POR, RESET_POR, 0 },
- { STM32MP_RCC_RSTF_BOR, RESET_BROWNOUT, 0 },
- { STM32MP_RCC_RSTF_STDBY, RESET_WKE, 0 },
- { STM32MP_RCC_RSTF_CSTDBY, RESET_WKE, 1 },
- { STM32MP_RCC_RSTF_MPSYS, RESET_RST, 2 },
- { STM32MP_RCC_RSTF_MPUP0, RESET_RST, 0 },
- { STM32MP_RCC_RSTF_MPUP1, RESET_RST, 1 },
- { STM32MP_RCC_RSTF_IWDG1, RESET_WDG, 0 },
- { STM32MP_RCC_RSTF_IWDG2, RESET_WDG, 1 },
- { STM32MP_RCC_RSTF_PAD, RESET_EXT, 1 },
- { /* sentinel */ }
-};
-
-static const struct stm32_reset_ops stm32mp1_reset_ops = {
- .reset = stm32mp_reset,
- .sys_reset = stm32mp_rcc_restart_handler,
- .reset_reasons = stm32mp_reset_reasons,
-};
-
-static const struct of_device_id stm32_rcc_reset_dt_ids[] = {
- { .compatible = "st,stm32mp1-rcc", .data = &stm32mp1_reset_ops },
- { /* sentinel */ },
-};
-
-static struct driver_d stm32_rcc_reset_driver = {
- .name = "stm32mp_rcc_reset",
- .probe = stm32_reset_probe,
- .of_compatible = DRV_OF_COMPAT(stm32_rcc_reset_dt_ids),
-};
-
-postcore_platform_driver(stm32_rcc_reset_driver);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c468e45915..38a47b7bc2 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -55,4 +55,13 @@ config REGULATOR_ANATOP
regulators. It is recommended that this option be
enabled on i.MX6 platform.
+config REGULATOR_ARM_SCMI
+ tristate "SCMI based regulator driver"
+ depends on ARM_SCMI_PROTOCOL && OFDEVICE
+ help
+ This adds the regulator driver support for ARM platforms using SCMI
+ protocol for device voltage management.
+ This driver uses SCMI Message Protocol driver to interact with the
+ firmware providing the device Voltage functionality.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 7b5d527cb1..5eaa657ad1 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
+obj-$(CONFIG_REGULATOR_ARM_SCMI) += scmi-regulator.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a29fe90d8d..af8a0cb4fc 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -166,6 +166,8 @@ int of_regulator_register(struct regulator_dev *rd, struct device_node *node)
rd->always_on = of_property_read_bool(node, "regulator-always-on");
name = of_get_property(node, "regulator-name", NULL);
+ if (!name)
+ name = node->name;
ri = __regulator_register(rd, name);
if (IS_ERR(ri))
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
new file mode 100644
index 0000000000..3e6fd3ec60
--- /dev/null
+++ b/drivers/regulator/scmi-regulator.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// System Control and Management Interface (SCMI) based regulator driver
+//
+// Copyright (C) 2020-2021 ARM Ltd.
+//
+// Implements a regulator driver on top of the SCMI Voltage Protocol.
+//
+// The ARM SCMI Protocol aims in general to hide as much as possible all the
+// underlying operational details while providing an abstracted interface for
+// its users to operate upon: as a consequence the resulting operational
+// capabilities and configurability of this regulator device are much more
+// limited than the ones usually available on a standard physical regulator.
+//
+// The supported SCMI regulator ops are restricted to the bare minimum:
+//
+// - 'status_ops': enable/disable/is_enabled
+// - 'voltage_ops': get_voltage_sel/set_voltage_sel
+// list_voltage/map_voltage
+//
+// Each SCMI regulator instance is associated, through the means of a proper DT
+// entry description, to a specific SCMI Voltage Domain.
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <common.h>
+#include <of.h>
+#include <regulator.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+static const struct scmi_voltage_proto_ops *voltage_ops;
+
+struct scmi_reg_desc {
+ struct regulator_desc desc;
+ const char *name;
+ const char *supply_name;
+};
+
+struct scmi_regulator {
+ u32 id;
+ struct scmi_device *sdev;
+ struct scmi_protocol_handle *ph;
+ struct regulator_dev rdev;
+ struct device_node *device_node;
+ struct scmi_reg_desc sdesc;
+};
+
+struct scmi_regulator_info {
+ int num_doms;
+ struct scmi_regulator **sregv;
+};
+
+static inline struct scmi_regulator *to_scmi_regulator(struct regulator_dev *rdev)
+{
+ return container_of(rdev, struct scmi_regulator, rdev);
+}
+
+static int scmi_reg_enable(struct regulator_dev *rdev)
+{
+ struct scmi_regulator *sreg = to_scmi_regulator(rdev);
+
+ return voltage_ops->config_set(sreg->ph, sreg->id,
+ SCMI_VOLTAGE_ARCH_STATE_ON);
+}
+
+static int scmi_reg_disable(struct regulator_dev *rdev)
+{
+ struct scmi_regulator *sreg = to_scmi_regulator(rdev);
+
+ return voltage_ops->config_set(sreg->ph, sreg->id,
+ SCMI_VOLTAGE_ARCH_STATE_OFF);
+}
+
+static int scmi_reg_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ u32 config;
+ struct scmi_regulator *sreg = to_scmi_regulator(rdev);
+ struct scmi_reg_desc *sdesc = &sreg->sdesc;
+
+ ret = voltage_ops->config_get(sreg->ph, sreg->id, &config);
+ if (ret) {
+ dev_err(&sreg->sdev->dev,
+ "Error %d reading regulator %s status.\n",
+ ret, sdesc->name);
+ return ret;
+ }
+
+ return config & SCMI_VOLTAGE_ARCH_STATE_ON;
+}
+
+static int scmi_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ int ret;
+ s32 volt_uV;
+ struct scmi_regulator *sreg = to_scmi_regulator(rdev);
+
+ ret = voltage_ops->level_get(sreg->ph, sreg->id, &volt_uV);
+ if (ret)
+ return ret;
+
+ return sreg->sdesc.desc.ops->map_voltage(rdev, volt_uV, volt_uV);
+}
+
+static int scmi_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ s32 volt_uV;
+ struct scmi_regulator *sreg = to_scmi_regulator(rdev);
+
+ volt_uV = sreg->sdesc.desc.ops->list_voltage(rdev, selector);
+ if (volt_uV <= 0)
+ return -EINVAL;
+
+ return voltage_ops->level_set(sreg->ph, sreg->id, 0x0, volt_uV);
+}
+
+static const struct regulator_ops scmi_reg_fixed_ops = {
+ .enable = scmi_reg_enable,
+ .disable = scmi_reg_disable,
+ .is_enabled = scmi_reg_is_enabled,
+};
+
+static const struct regulator_ops scmi_reg_linear_ops = {
+ .enable = scmi_reg_enable,
+ .disable = scmi_reg_disable,
+ .is_enabled = scmi_reg_is_enabled,
+ .get_voltage_sel = scmi_reg_get_voltage_sel,
+ .set_voltage_sel = scmi_reg_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+};
+
+static const struct regulator_ops scmi_reg_discrete_ops = {
+ .enable = scmi_reg_enable,
+ .disable = scmi_reg_disable,
+ .is_enabled = scmi_reg_is_enabled,
+ .get_voltage_sel = scmi_reg_get_voltage_sel,
+ .set_voltage_sel = scmi_reg_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+static int
+scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
+ const struct scmi_voltage_info *vinfo)
+{
+ struct regulator_desc *desc = &sreg->sdesc.desc;
+ s32 delta_uV;
+
+ /*
+ * Note that SCMI voltage domains describable by linear ranges
+ * (segments) {low, high, step} are guaranteed to come in one single
+ * triplet by the SCMI Voltage Domain protocol support itself.
+ */
+
+ delta_uV = (vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_HIGH] -
+ vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_LOW]);
+
+ /* Rule out buggy negative-intervals answers from fw */
+ if (delta_uV < 0) {
+ dev_err(&sreg->sdev->dev,
+ "Invalid volt-range %d-%duV for domain %d\n",
+ vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_LOW],
+ vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_HIGH],
+ sreg->id);
+ return -EINVAL;
+ }
+
+ if (!delta_uV) {
+ /* Just one fixed voltage exposed by SCMI */
+ desc->fixed_uV =
+ vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_LOW];
+ desc->n_voltages = 1;
+ desc->ops = &scmi_reg_fixed_ops;
+ } else {
+ /* One simple linear mapping. */
+ desc->min_uV =
+ vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_LOW];
+ desc->uV_step =
+ vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
+ desc->linear_min_sel = 0;
+ desc->n_voltages = (delta_uV / desc->uV_step) + 1;
+ desc->ops = &scmi_reg_linear_ops;
+ }
+
+ return 0;
+}
+
+static int
+scmi_config_discrete_regulator_mappings(struct scmi_regulator *sreg,
+ const struct scmi_voltage_info *vinfo)
+{
+ struct regulator_desc *desc = &sreg->sdesc.desc;
+
+ /* Discrete non linear levels are mapped to volt_table */
+ desc->n_voltages = vinfo->num_levels;
+
+ if (desc->n_voltages > 1) {
+ desc->volt_table = (const unsigned int *)vinfo->levels_uv;
+ desc->ops = &scmi_reg_discrete_ops;
+ } else {
+ desc->fixed_uV = vinfo->levels_uv[0];
+ desc->ops = &scmi_reg_fixed_ops;
+ }
+
+ return 0;
+}
+
+static int scmi_regulator_common_init(struct scmi_regulator *sreg)
+{
+ int ret;
+ struct device_d *dev = &sreg->sdev->dev;
+ const struct scmi_voltage_info *vinfo;
+ struct scmi_reg_desc *sdesc = &sreg->sdesc;
+
+ vinfo = voltage_ops->info_get(sreg->ph, sreg->id);
+ if (!vinfo) {
+ dev_warn(dev, "Failure to get voltage domain %d\n",
+ sreg->id);
+ return -ENODEV;
+ }
+
+ /*
+ * Regulator framework does not fully support negative voltages
+ * so we discard any voltage domain reported as supporting negative
+ * voltages: as a consequence each levels_uv entry is guaranteed to
+ * be non-negative from here on.
+ */
+ if (vinfo->negative_volts_allowed) {
+ dev_warn(dev, "Negative voltages NOT supported...skip %s\n",
+ sreg->device_node->full_name);
+ return -EOPNOTSUPP;
+ }
+
+ sdesc->name = basprintf("%s", vinfo->name);
+ if (!sdesc->name)
+ return -ENOMEM;
+
+ if (vinfo->segmented)
+ ret = scmi_config_linear_regulator_mappings(sreg, vinfo);
+ else
+ ret = scmi_config_discrete_regulator_mappings(sreg, vinfo);
+
+ return ret;
+}
+
+static int process_scmi_regulator_of_node(struct scmi_device *sdev,
+ struct scmi_protocol_handle *ph,
+ struct device_node *np,
+ struct scmi_regulator_info *rinfo)
+{
+ u32 dom, ret;
+
+ ret = of_property_read_u32(np, "reg", &dom);
+ if (ret)
+ return ret;
+
+ if (dom >= rinfo->num_doms)
+ return -ENODEV;
+
+ if (rinfo->sregv[dom]) {
+ dev_err(&sdev->dev,
+ "SCMI Voltage Domain %d already in use. Skipping: %s\n",
+ dom, np->full_name);
+ return -EINVAL;
+ }
+
+ rinfo->sregv[dom] = kzalloc(sizeof(struct scmi_regulator), GFP_KERNEL);
+ if (!rinfo->sregv[dom])
+ return -ENOMEM;
+
+ rinfo->sregv[dom]->id = dom;
+ rinfo->sregv[dom]->sdev = sdev;
+ rinfo->sregv[dom]->ph = ph;
+
+ /* get hold of good nodes */
+ rinfo->sregv[dom]->device_node = np;
+
+ dev_dbg(&sdev->dev,
+ "Found SCMI Regulator entry -- OF node [%d] -> %s\n",
+ dom, np->full_name);
+
+ return 0;
+}
+
+static int scmi_regulator_probe(struct scmi_device *sdev)
+{
+ int d, ret, num_doms;
+ struct device_node *np, *child;
+ const struct scmi_handle *handle = sdev->handle;
+ struct scmi_regulator_info *rinfo;
+ struct scmi_protocol_handle *ph;
+ struct scmi_reg_desc *sdesc;
+
+ if (!handle)
+ return -ENODEV;
+
+ voltage_ops = handle->protocol_get(sdev, SCMI_PROTOCOL_VOLTAGE, &ph);
+ if (IS_ERR(voltage_ops))
+ return PTR_ERR(voltage_ops);
+
+ num_doms = voltage_ops->num_domains_get(ph);
+ if (num_doms <= 0) {
+ if (!num_doms) {
+ dev_err(&sdev->dev,
+ "number of voltage domains invalid\n");
+ num_doms = -EINVAL;
+ } else {
+ dev_err(&sdev->dev,
+ "failed to get voltage domains - err:%d\n",
+ num_doms);
+ }
+
+ return num_doms;
+ }
+
+ rinfo = kzalloc(sizeof(*rinfo), GFP_KERNEL);
+ if (!rinfo)
+ return -ENOMEM;
+
+ /* Allocate pointers array for all possible domains */
+ rinfo->sregv = kcalloc(num_doms, sizeof(void *), GFP_KERNEL);
+ if (!rinfo->sregv)
+ return -ENOMEM;
+
+ rinfo->num_doms = num_doms;
+
+ /*
+ * Start collecting into rinfo->sregv possibly good SCMI Regulators as
+ * described by a well-formed DT entry and associated with an existing
+ * plausible SCMI Voltage Domain number, all belonging to this SCMI
+ * platform instance node (handle->dev->of_node).
+ */
+ np = of_find_node_by_name(handle->dev->device_node, "regulators");
+ for_each_child_of_node(np, child) {
+ ret = process_scmi_regulator_of_node(sdev, ph, child, rinfo);
+ /* abort on any mem issue */
+ if (ret == -ENOMEM)
+ return ret;
+ }
+
+ /*
+ * Register a regulator for each valid regulator-DT-entry that we
+ * can successfully reach via SCMI and has a valid associated voltage
+ * domain.
+ */
+ for (d = 0; d < num_doms; d++) {
+ struct scmi_regulator *sreg = rinfo->sregv[d];
+
+ /* Skip empty slots */
+ if (!sreg)
+ continue;
+
+ sdesc = &sreg->sdesc;
+
+ ret = scmi_regulator_common_init(sreg);
+ /* Skip invalid voltage domains */
+ if (ret)
+ continue;
+
+ ret = of_regulator_register(&sreg->rdev, np);
+ if (ret)
+ continue;
+
+ dev_info(&sdev->dev,
+ "Regulator %s registered for domain [%d]\n",
+ sreg->sdesc.name, sreg->id);
+ }
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_regulator_id_table[] = {
+ { SCMI_PROTOCOL_VOLTAGE, "regulator" },
+ { },
+};
+
+static struct scmi_driver scmi_drv = {
+ .name = "scmi-regulator",
+ .probe = scmi_regulator_probe,
+ .id_table = scmi_regulator_id_table,
+};
+core_scmi_driver(scmi_drv);
+
+MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index b12159094d..913b309eac 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -39,16 +39,21 @@ config RESET_IMX7
help
This enables the reset controller driver for i.MX7 SoCs.
-config RESET_STM32
- bool "STM32 Reset Driver"
- depends on ARCH_STM32MP || COMPILE_TEST
- help
- This enables the reset controller driver for STM32MP1.
-
config RESET_STARFIVE
bool "StarFive Controller Driver" if COMPILE_TEST
default SOC_STARFIVE
help
This enables the reset controller driver for the StarFive JH7100.
+config RESET_SCMI
+ tristate "Reset driver controlled via ARM SCMI interface"
+ depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
+ default ARM_SCMI_PROTOCOL
+ help
+ This driver provides support for reset signal/domains that are
+ controlled by firmware that implements the SCMI interface.
+
+ This driver uses SCMI Message Protocol to interact with the
+ firmware controlling all the reset signals.
+
endif
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index b4270411fd..b1668433d7 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_RESET_CONTROLLER) += core.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
-obj-$(CONFIG_RESET_STM32) += reset-stm32.o
obj-$(CONFIG_RESET_STARFIVE) += reset-starfive-vic.o
+obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 17deb3fa8f..4355c3415e 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -82,6 +82,27 @@ void reset_controller_unregister(struct reset_controller_dev *rcdev)
EXPORT_SYMBOL_GPL(reset_controller_unregister);
/**
+ * reset_control_status - returns a negative errno if not supported, a
+ * positive value if the reset line is asserted, or zero if the reset
+ * line is not asserted or if the desc is NULL (optional reset).
+ * @rstc: reset controller
+ */
+int reset_control_status(struct reset_control *rstc)
+{
+ if (!rstc)
+ return 0;
+
+ if (WARN_ON(IS_ERR(rstc)))
+ return -EINVAL;
+
+ if (rstc->rcdev->ops->status)
+ return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(reset_control_status);
+
+/**
* reset_control_reset - reset the controlled device
* @rstc: reset controller
*/
diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c
new file mode 100644
index 0000000000..c33bbc5c8a
--- /dev/null
+++ b/drivers/reset/reset-scmi.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM System Control and Management Interface (ARM SCMI) reset driver
+ *
+ * Copyright (C) 2019-2021 ARM Ltd.
+ */
+
+#include <common.h>
+#include <of.h>
+#include <driver.h>
+#include <linux/reset-controller.h>
+#include <linux/scmi_protocol.h>
+
+static const struct scmi_reset_proto_ops *reset_ops;
+
+/**
+ * struct scmi_reset_data - reset controller information structure
+ * @rcdev: reset controller entity
+ * @ph: ARM SCMI protocol handle used for communication with system controller
+ */
+struct scmi_reset_data {
+ struct reset_controller_dev rcdev;
+ const struct scmi_protocol_handle *ph;
+};
+
+#define to_scmi_reset_data(p) container_of((p), struct scmi_reset_data, rcdev)
+#define to_scmi_handle(p) (to_scmi_reset_data(p)->ph)
+
+/**
+ * scmi_reset_assert() - assert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to be asserted
+ *
+ * This function implements the reset driver op to assert a device's reset
+ * using the ARM SCMI protocol.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int
+scmi_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
+
+ return reset_ops->assert(ph, id);
+}
+
+/**
+ * scmi_reset_deassert() - deassert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to be deasserted
+ *
+ * This function implements the reset driver op to deassert a device's reset
+ * using the ARM SCMI protocol.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int
+scmi_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
+
+ return reset_ops->deassert(ph, id);
+}
+
+/**
+ * scmi_reset_reset() - reset the device
+ * @rcdev: reset controller entity
+ * @id: ID of the reset signal to be reset(assert + deassert)
+ *
+ * This function implements the reset driver op to trigger a device's
+ * reset signal using the ARM SCMI protocol.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int
+scmi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
+
+ return reset_ops->reset(ph, id);
+}
+
+static const struct reset_control_ops scmi_reset_ops = {
+ .assert = scmi_reset_assert,
+ .deassert = scmi_reset_deassert,
+ .reset = scmi_reset_reset,
+};
+
+static int scmi_reset_probe(struct scmi_device *sdev)
+{
+ struct scmi_reset_data *data;
+ struct device_d *dev = &sdev->dev;
+ struct device_node *np = dev->device_node;
+ const struct scmi_handle *handle = sdev->handle;
+ struct scmi_protocol_handle *ph;
+
+ if (!handle)
+ return -ENODEV;
+
+ reset_ops = handle->protocol_get(sdev, SCMI_PROTOCOL_RESET, &ph);
+ if (IS_ERR(reset_ops))
+ return PTR_ERR(reset_ops);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->rcdev.ops = &scmi_reset_ops;
+ data->rcdev.of_node = np;
+ data->rcdev.nr_resets = reset_ops->num_domains_get(ph);
+ data->ph = ph;
+
+ return reset_controller_register(&data->rcdev);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_RESET, "reset" },
+ { },
+};
+
+static struct scmi_driver scmi_reset_driver = {
+ .name = "scmi-reset",
+ .probe = scmi_reset_probe,
+ .id_table = scmi_id_table,
+};
+core_scmi_driver(scmi_reset_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI reset controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 082956d94d..9db00f64f4 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -75,7 +75,7 @@ static int reset_simple_reset(struct reset_controller_dev *rcdev,
return reset_simple_deassert(rcdev, id);
}
-static int __maybe_unused reset_simple_status(struct reset_controller_dev *rcdev,
+static int reset_simple_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct reset_simple_data *data = to_reset_simple_data(rcdev);
@@ -93,6 +93,7 @@ const struct reset_control_ops reset_simple_ops = {
.assert = reset_simple_assert,
.deassert = reset_simple_deassert,
.reset = reset_simple_reset,
+ .status = reset_simple_status,
};
EXPORT_SYMBOL_GPL(reset_simple_ops);
diff --git a/drivers/serial/serial_stm32.c b/drivers/serial/serial_stm32.c
index 271897f174..f1d4c6de90 100644
--- a/drivers/serial/serial_stm32.c
+++ b/drivers/serial/serial_stm32.c
@@ -41,6 +41,8 @@ static int stm32_serial_setbaudrate(struct console_device *cdev, int baudrate)
unsigned long clock_rate;
clock_rate = clk_get_rate(stm32->clk);
+ if (!clock_rate)
+ return -EINVAL;
int_div = DIV_ROUND_CLOSEST(clock_rate, baudrate);